Lines Matching refs:device
93 tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf) in tape_medium_state_show()
105 tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf) in tape_first_minor_show()
117 tape_state_show(struct device *dev, struct device_attribute *attr, char *buf) in tape_state_show()
130 tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf) in tape_operation_show()
157 tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf) in tape_blocksize_show()
186 tape_state_set(struct tape_device *device, enum tape_state newstate) in tape_state_set() argument
190 if (device->tape_state == TS_NOT_OPER) { in tape_state_set()
194 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); in tape_state_set()
196 if (device->tape_state < TS_SIZE && device->tape_state >=0 ) in tape_state_set()
197 str = tape_state_verbose[device->tape_state]; in tape_state_set()
207 device->tape_state = newstate; in tape_state_set()
208 wake_up(&device->state_change_wq); in tape_state_set()
212 struct tape_device *device; member
224 struct tape_device *device = p->device; in tape_med_state_work_handler() local
230 "unloaded\n", dev_name(&device->cdev->dev)); in tape_med_state_work_handler()
232 kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); in tape_med_state_work_handler()
236 dev_name(&device->cdev->dev)); in tape_med_state_work_handler()
238 kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); in tape_med_state_work_handler()
243 tape_put_device(device); in tape_med_state_work_handler()
248 tape_med_state_work(struct tape_device *device, enum tape_medium_state state) in tape_med_state_work() argument
255 p->device = tape_get_device(device); in tape_med_state_work()
262 tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) in tape_med_state_set() argument
266 oldstate = device->medium_state; in tape_med_state_set()
269 device->medium_state = newstate; in tape_med_state_set()
272 device->tape_generic_status |= GMT_DR_OPEN(~0); in tape_med_state_set()
274 tape_med_state_work(device, MS_UNLOADED); in tape_med_state_set()
277 device->tape_generic_status &= ~GMT_DR_OPEN(~0); in tape_med_state_set()
279 tape_med_state_work(device, MS_LOADED); in tape_med_state_set()
284 wake_up(&device->state_change_wq); in tape_med_state_set()
291 __tape_cancel_io(struct tape_device *device, struct tape_request *request) in __tape_cancel_io() argument
302 rc = ccw_device_clear(device->cdev, (long) request); in __tape_cancel_io()
310 schedule_delayed_work(&device->tape_dnr, 0); in __tape_cancel_io()
331 tape_assign_minor(struct tape_device *device) in tape_assign_minor() argument
347 device->first_minor = minor; in tape_assign_minor()
348 list_add_tail(&device->node, &tmp->node); in tape_assign_minor()
355 tape_remove_minor(struct tape_device *device) in tape_remove_minor() argument
358 list_del_init(&device->node); in tape_remove_minor()
359 device->first_minor = -1; in tape_remove_minor()
372 tape_generic_online(struct tape_device *device, in tape_generic_online() argument
377 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline); in tape_generic_online()
379 if (device->tape_state != TS_INIT) { in tape_generic_online()
380 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state); in tape_generic_online()
384 init_timer(&device->lb_timeout); in tape_generic_online()
385 device->lb_timeout.function = tape_long_busy_timeout; in tape_generic_online()
388 device->discipline = discipline; in tape_generic_online()
393 rc = discipline->setup_device(device); in tape_generic_online()
396 rc = tape_assign_minor(device); in tape_generic_online()
400 rc = tapechar_setup_device(device); in tape_generic_online()
404 tape_state_set(device, TS_UNUSED); in tape_generic_online()
406 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id); in tape_generic_online()
411 tape_remove_minor(device); in tape_generic_online()
413 device->discipline->cleanup_device(device); in tape_generic_online()
414 device->discipline = NULL; in tape_generic_online()
421 tape_cleanup_device(struct tape_device *device) in tape_cleanup_device() argument
423 tapechar_cleanup_device(device); in tape_cleanup_device()
424 device->discipline->cleanup_device(device); in tape_cleanup_device()
425 module_put(device->discipline->owner); in tape_cleanup_device()
426 tape_remove_minor(device); in tape_cleanup_device()
427 tape_med_state_set(device, MS_UNKNOWN); in tape_cleanup_device()
445 struct tape_device *device; in tape_generic_pm_suspend() local
447 device = dev_get_drvdata(&cdev->dev); in tape_generic_pm_suspend()
448 if (!device) { in tape_generic_pm_suspend()
453 device->cdev_id, device); in tape_generic_pm_suspend()
455 if (device->medium_state != MS_UNLOADED) { in tape_generic_pm_suspend()
461 spin_lock_irq(get_ccwdev_lock(device->cdev)); in tape_generic_pm_suspend()
462 switch (device->tape_state) { in tape_generic_pm_suspend()
466 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_generic_pm_suspend()
471 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_generic_pm_suspend()
475 DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id); in tape_generic_pm_suspend()
489 struct tape_device *device; in tape_generic_offline() local
491 device = dev_get_drvdata(&cdev->dev); in tape_generic_offline()
492 if (!device) { in tape_generic_offline()
497 device->cdev_id, device); in tape_generic_offline()
499 spin_lock_irq(get_ccwdev_lock(device->cdev)); in tape_generic_offline()
500 switch (device->tape_state) { in tape_generic_offline()
503 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_generic_offline()
506 tape_state_set(device, TS_INIT); in tape_generic_offline()
507 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_generic_offline()
508 tape_cleanup_device(device); in tape_generic_offline()
513 device->cdev_id); in tape_generic_offline()
514 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_generic_offline()
518 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id); in tape_generic_offline()
528 struct tape_device *device; in tape_alloc_device() local
530 device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); in tape_alloc_device()
531 if (device == NULL) { in tape_alloc_device()
535 device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); in tape_alloc_device()
536 if (device->modeset_byte == NULL) { in tape_alloc_device()
538 kfree(device); in tape_alloc_device()
541 mutex_init(&device->mutex); in tape_alloc_device()
542 INIT_LIST_HEAD(&device->req_queue); in tape_alloc_device()
543 INIT_LIST_HEAD(&device->node); in tape_alloc_device()
544 init_waitqueue_head(&device->state_change_wq); in tape_alloc_device()
545 init_waitqueue_head(&device->wait_queue); in tape_alloc_device()
546 device->tape_state = TS_INIT; in tape_alloc_device()
547 device->medium_state = MS_UNKNOWN; in tape_alloc_device()
548 *device->modeset_byte = 0; in tape_alloc_device()
549 device->first_minor = -1; in tape_alloc_device()
550 atomic_set(&device->ref_count, 1); in tape_alloc_device()
551 INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request); in tape_alloc_device()
553 return device; in tape_alloc_device()
561 tape_get_device(struct tape_device *device) in tape_get_device() argument
565 count = atomic_inc_return(&device->ref_count); in tape_get_device()
566 DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count); in tape_get_device()
567 return device; in tape_get_device()
577 tape_put_device(struct tape_device *device) in tape_put_device() argument
581 count = atomic_dec_return(&device->ref_count); in tape_put_device()
582 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count); in tape_put_device()
585 kfree(device->modeset_byte); in tape_put_device()
586 kfree(device); in tape_put_device()
596 struct tape_device *device, *tmp; in tape_find_device() local
598 device = ERR_PTR(-ENODEV); in tape_find_device()
602 device = tape_get_device(tmp); in tape_find_device()
607 return device; in tape_find_device()
616 struct tape_device *device; in tape_generic_probe() local
620 device = tape_alloc_device(); in tape_generic_probe()
621 if (IS_ERR(device)) in tape_generic_probe()
627 tape_put_device(device); in tape_generic_probe()
630 dev_set_drvdata(&cdev->dev, device); in tape_generic_probe()
632 device->cdev = cdev; in tape_generic_probe()
634 device->cdev_id = devid_to_int(&dev_id); in tape_generic_probe()
639 __tape_discard_requests(struct tape_device *device) in __tape_discard_requests() argument
644 list_for_each_safe(l, n, &device->req_queue) { in __tape_discard_requests()
651 request->device = NULL; in __tape_discard_requests()
652 tape_put_device(device); in __tape_discard_requests()
668 struct tape_device * device; in tape_generic_remove() local
670 device = dev_get_drvdata(&cdev->dev); in tape_generic_remove()
671 if (!device) { in tape_generic_remove()
674 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev); in tape_generic_remove()
676 spin_lock_irq(get_ccwdev_lock(device->cdev)); in tape_generic_remove()
677 switch (device->tape_state) { in tape_generic_remove()
679 tape_state_set(device, TS_NOT_OPER); in tape_generic_remove()
684 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_generic_remove()
690 tape_state_set(device, TS_NOT_OPER); in tape_generic_remove()
691 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_generic_remove()
692 tape_cleanup_device(device); in tape_generic_remove()
701 device->cdev_id); in tape_generic_remove()
703 dev_name(&device->cdev->dev)); in tape_generic_remove()
704 tape_state_set(device, TS_NOT_OPER); in tape_generic_remove()
705 __tape_discard_requests(device); in tape_generic_remove()
706 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_generic_remove()
707 tape_cleanup_device(device); in tape_generic_remove()
710 device = dev_get_drvdata(&cdev->dev); in tape_generic_remove()
711 if (device) { in tape_generic_remove()
714 tape_put_device(device); in tape_generic_remove()
769 if (request->device) in tape_free_request()
770 tape_put_device(request->device); in tape_free_request()
777 __tape_start_io(struct tape_device *device, struct tape_request *request) in __tape_start_io() argument
782 device->cdev, in __tape_start_io()
793 schedule_delayed_work(&device->tape_dnr, 0); in __tape_start_io()
803 __tape_start_next_request(struct tape_device *device) in __tape_start_next_request() argument
809 DBF_LH(6, "__tape_start_next_request(%p)\n", device); in __tape_start_next_request()
814 list_for_each_safe(l, n, &device->req_queue) { in __tape_start_next_request()
838 rc = __tape_cancel_io(device, request); in __tape_start_next_request()
840 rc = __tape_start_io(device, request); in __tape_start_next_request()
861 struct tape_device *device = in tape_delayed_next_request() local
864 DBF_LH(6, "tape_delayed_next_request(%p)\n", device); in tape_delayed_next_request()
865 spin_lock_irq(get_ccwdev_lock(device->cdev)); in tape_delayed_next_request()
866 __tape_start_next_request(device); in tape_delayed_next_request()
867 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_delayed_next_request()
873 struct tape_device *device; in tape_long_busy_timeout() local
875 device = (struct tape_device *) data; in tape_long_busy_timeout()
876 spin_lock_irq(get_ccwdev_lock(device->cdev)); in tape_long_busy_timeout()
877 request = list_entry(device->req_queue.next, struct tape_request, list); in tape_long_busy_timeout()
879 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); in tape_long_busy_timeout()
880 __tape_start_next_request(device); in tape_long_busy_timeout()
881 device->lb_timeout.data = 0UL; in tape_long_busy_timeout()
882 tape_put_device(device); in tape_long_busy_timeout()
883 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_long_busy_timeout()
888 struct tape_device * device, in __tape_end_request() argument
892 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); in __tape_end_request()
906 if (!list_empty(&device->req_queue)) in __tape_end_request()
907 __tape_start_next_request(device); in __tape_end_request()
914 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, in tape_dump_sense_dbf() argument
926 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); in tape_dump_sense_dbf()
940 __tape_start_request(struct tape_device *device, struct tape_request *request) in __tape_start_request() argument
950 if (device->tape_state == TS_INIT) in __tape_start_request()
952 if (device->tape_state == TS_UNUSED) in __tape_start_request()
955 if (device->tape_state == TS_BLKUSE) in __tape_start_request()
957 if (device->tape_state != TS_IN_USE) in __tape_start_request()
962 request->device = tape_get_device(device); in __tape_start_request()
964 if (list_empty(&device->req_queue)) { in __tape_start_request()
966 rc = __tape_start_io(device, request); in __tape_start_request()
971 list_add(&request->list, &device->req_queue); in __tape_start_request()
975 list_add_tail(&request->list, &device->req_queue); in __tape_start_request()
985 tape_do_io_async(struct tape_device *device, struct tape_request *request) in tape_do_io_async() argument
989 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); in tape_do_io_async()
991 spin_lock_irq(get_ccwdev_lock(device->cdev)); in tape_do_io_async()
993 rc = __tape_start_request(device, request); in tape_do_io_async()
994 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_do_io_async()
1011 tape_do_io(struct tape_device *device, struct tape_request *request) in tape_do_io() argument
1015 spin_lock_irq(get_ccwdev_lock(device->cdev)); in tape_do_io()
1018 request->callback_data = &device->wait_queue; in tape_do_io()
1020 rc = __tape_start_request(device, request); in tape_do_io()
1021 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_do_io()
1025 wait_event(device->wait_queue, (request->callback == NULL)); in tape_do_io()
1043 tape_do_io_interruptible(struct tape_device *device, in tape_do_io_interruptible() argument
1048 spin_lock_irq(get_ccwdev_lock(device->cdev)); in tape_do_io_interruptible()
1051 request->callback_data = &device->wait_queue; in tape_do_io_interruptible()
1052 rc = __tape_start_request(device, request); in tape_do_io_interruptible()
1053 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_do_io_interruptible()
1057 rc = wait_event_interruptible(device->wait_queue, in tape_do_io_interruptible()
1064 spin_lock_irq(get_ccwdev_lock(device->cdev)); in tape_do_io_interruptible()
1065 rc = __tape_cancel_io(device, request); in tape_do_io_interruptible()
1066 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_do_io_interruptible()
1071 device->wait_queue, in tape_do_io_interruptible()
1076 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); in tape_do_io_interruptible()
1086 tape_cancel_io(struct tape_device *device, struct tape_request *request) in tape_cancel_io() argument
1090 spin_lock_irq(get_ccwdev_lock(device->cdev)); in tape_cancel_io()
1091 rc = __tape_cancel_io(device, request); in tape_cancel_io()
1092 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_cancel_io()
1102 struct tape_device *device; in __tape_do_irq() local
1106 device = dev_get_drvdata(&cdev->dev); in __tape_do_irq()
1107 if (device == NULL) { in __tape_do_irq()
1112 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); in __tape_do_irq()
1120 device->cdev_id); in __tape_do_irq()
1122 __tape_end_request(device, request, -EIO); in __tape_do_irq()
1126 device->cdev_id, PTR_ERR(irb)); in __tape_do_irq()
1142 device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl); in __tape_do_irq()
1144 schedule_delayed_work(&device->tape_dnr, HZ); in __tape_do_irq()
1152 !list_empty(&device->req_queue)) { in __tape_do_irq()
1155 req = list_entry(device->req_queue.next, in __tape_do_irq()
1158 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); in __tape_do_irq()
1159 if (del_timer(&device->lb_timeout)) { in __tape_do_irq()
1160 device->lb_timeout.data = 0UL; in __tape_do_irq()
1161 tape_put_device(device); in __tape_do_irq()
1162 __tape_start_next_request(device); in __tape_do_irq()
1170 device->tape_generic_status |= GMT_ONLINE(~0); in __tape_do_irq()
1172 device->tape_generic_status &= ~GMT_ONLINE(~0); in __tape_do_irq()
1179 tape_dump_sense_dbf(device, request, irb); in __tape_do_irq()
1182 device->tape_generic_status |= GMT_ONLINE(~0); in __tape_do_irq()
1184 if (device->tape_state == TS_NOT_OPER) { in __tape_do_irq()
1194 __tape_end_request(device, request, -EIO); in __tape_do_irq()
1198 rc = device->discipline->irq(device, request, irb); in __tape_do_irq()
1209 device->tape_generic_status |= GMT_ONLINE(~0); in __tape_do_irq()
1210 __tape_end_request(device, request, rc); in __tape_do_irq()
1215 device->lb_timeout.data = in __tape_do_irq()
1216 (unsigned long) tape_get_device(device); in __tape_do_irq()
1217 device->lb_timeout.expires = jiffies + in __tape_do_irq()
1219 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); in __tape_do_irq()
1220 add_timer(&device->lb_timeout); in __tape_do_irq()
1224 rc = __tape_start_io(device, request); in __tape_do_irq()
1226 __tape_end_request(device, request, rc); in __tape_do_irq()
1229 rc = __tape_cancel_io(device, request); in __tape_do_irq()
1231 __tape_end_request(device, request, rc); in __tape_do_irq()
1236 __tape_end_request(device, request, -EIO); in __tape_do_irq()
1238 __tape_end_request(device, request, rc); in __tape_do_irq()
1248 tape_open(struct tape_device *device) in tape_open() argument
1252 spin_lock_irq(get_ccwdev_lock(device->cdev)); in tape_open()
1253 if (device->tape_state == TS_NOT_OPER) { in tape_open()
1256 } else if (device->tape_state == TS_IN_USE) { in tape_open()
1259 } else if (device->tape_state == TS_BLKUSE) { in tape_open()
1262 } else if (device->discipline != NULL && in tape_open()
1263 !try_module_get(device->discipline->owner)) { in tape_open()
1267 tape_state_set(device, TS_IN_USE); in tape_open()
1270 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_open()
1278 tape_release(struct tape_device *device) in tape_release() argument
1280 spin_lock_irq(get_ccwdev_lock(device->cdev)); in tape_release()
1281 if (device->tape_state == TS_IN_USE) in tape_release()
1282 tape_state_set(device, TS_UNUSED); in tape_release()
1283 module_put(device->discipline->owner); in tape_release()
1284 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in tape_release()
1292 tape_mtop(struct tape_device *device, int mt_op, int mt_count) in tape_mtop() argument
1303 fn = device->discipline->mtop_array[mt_op]; in tape_mtop()
1312 if ((rc = fn(device, 500)) != 0) in tape_mtop()
1315 rc = fn(device, mt_count); in tape_mtop()
1317 rc = fn(device, mt_count); in tape_mtop()