Lines Matching refs:device
94 struct dasd_device *device; in dasd_alloc_device() local
96 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); in dasd_alloc_device()
97 if (!device) in dasd_alloc_device()
101 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); in dasd_alloc_device()
102 if (!device->ccw_mem) { in dasd_alloc_device()
103 kfree(device); in dasd_alloc_device()
107 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); in dasd_alloc_device()
108 if (!device->erp_mem) { in dasd_alloc_device()
109 free_pages((unsigned long) device->ccw_mem, 1); in dasd_alloc_device()
110 kfree(device); in dasd_alloc_device()
114 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); in dasd_alloc_device()
115 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); in dasd_alloc_device()
116 spin_lock_init(&device->mem_lock); in dasd_alloc_device()
117 atomic_set(&device->tasklet_scheduled, 0); in dasd_alloc_device()
118 tasklet_init(&device->tasklet, in dasd_alloc_device()
120 (unsigned long) device); in dasd_alloc_device()
121 INIT_LIST_HEAD(&device->ccw_queue); in dasd_alloc_device()
122 init_timer(&device->timer); in dasd_alloc_device()
123 device->timer.function = dasd_device_timeout; in dasd_alloc_device()
124 device->timer.data = (unsigned long) device; in dasd_alloc_device()
125 INIT_WORK(&device->kick_work, do_kick_device); in dasd_alloc_device()
126 INIT_WORK(&device->restore_device, do_restore_device); in dasd_alloc_device()
127 INIT_WORK(&device->reload_device, do_reload_device); in dasd_alloc_device()
128 INIT_WORK(&device->requeue_requests, do_requeue_requests); in dasd_alloc_device()
129 device->state = DASD_STATE_NEW; in dasd_alloc_device()
130 device->target = DASD_STATE_NEW; in dasd_alloc_device()
131 mutex_init(&device->state_mutex); in dasd_alloc_device()
132 spin_lock_init(&device->profile.lock); in dasd_alloc_device()
133 return device; in dasd_alloc_device()
139 void dasd_free_device(struct dasd_device *device) in dasd_free_device() argument
141 kfree(device->private); in dasd_free_device()
142 free_page((unsigned long) device->erp_mem); in dasd_free_device()
143 free_pages((unsigned long) device->ccw_mem, 1); in dasd_free_device()
144 kfree(device); in dasd_free_device()
187 static int dasd_state_new_to_known(struct dasd_device *device) in dasd_state_new_to_known() argument
195 dasd_get_device(device); in dasd_state_new_to_known()
197 if (device->block) { in dasd_state_new_to_known()
198 rc = dasd_alloc_queue(device->block); in dasd_state_new_to_known()
200 dasd_put_device(device); in dasd_state_new_to_known()
204 device->state = DASD_STATE_KNOWN; in dasd_state_new_to_known()
211 static int dasd_state_known_to_new(struct dasd_device *device) in dasd_state_known_to_new() argument
214 dasd_eer_disable(device); in dasd_state_known_to_new()
215 device->state = DASD_STATE_NEW; in dasd_state_known_to_new()
217 if (device->block) in dasd_state_known_to_new()
218 dasd_free_queue(device->block); in dasd_state_known_to_new()
221 dasd_put_device(device); in dasd_state_known_to_new()
241 static int dasd_state_known_to_basic(struct dasd_device *device) in dasd_state_known_to_basic() argument
243 struct dasd_block *block = device->block; in dasd_state_known_to_basic()
256 dasd_profile_on(&device->block->profile); in dasd_state_known_to_basic()
258 device->debugfs_dentry = in dasd_state_known_to_basic()
259 dasd_debugfs_setup(dev_name(&device->cdev->dev), in dasd_state_known_to_basic()
261 dasd_profile_init(&device->profile, device->debugfs_dentry); in dasd_state_known_to_basic()
262 dasd_hosts_init(device->debugfs_dentry, device); in dasd_state_known_to_basic()
265 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, in dasd_state_known_to_basic()
267 debug_register_view(device->debug_area, &debug_sprintf_view); in dasd_state_known_to_basic()
268 debug_set_level(device->debug_area, DBF_WARNING); in dasd_state_known_to_basic()
269 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); in dasd_state_known_to_basic()
271 device->state = DASD_STATE_BASIC; in dasd_state_known_to_basic()
279 static int dasd_state_basic_to_known(struct dasd_device *device) in dasd_state_basic_to_known() argument
283 if (device->discipline->basic_to_known) { in dasd_state_basic_to_known()
284 rc = device->discipline->basic_to_known(device); in dasd_state_basic_to_known()
289 if (device->block) { in dasd_state_basic_to_known()
290 dasd_profile_exit(&device->block->profile); in dasd_state_basic_to_known()
291 debugfs_remove(device->block->debugfs_dentry); in dasd_state_basic_to_known()
292 dasd_gendisk_free(device->block); in dasd_state_basic_to_known()
293 dasd_block_clear_timer(device->block); in dasd_state_basic_to_known()
295 rc = dasd_flush_device_queue(device); in dasd_state_basic_to_known()
298 dasd_device_clear_timer(device); in dasd_state_basic_to_known()
299 dasd_profile_exit(&device->profile); in dasd_state_basic_to_known()
300 dasd_hosts_exit(device); in dasd_state_basic_to_known()
301 debugfs_remove(device->debugfs_dentry); in dasd_state_basic_to_known()
302 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); in dasd_state_basic_to_known()
303 if (device->debug_area != NULL) { in dasd_state_basic_to_known()
304 debug_unregister(device->debug_area); in dasd_state_basic_to_known()
305 device->debug_area = NULL; in dasd_state_basic_to_known()
307 device->state = DASD_STATE_KNOWN; in dasd_state_basic_to_known()
325 static int dasd_state_basic_to_ready(struct dasd_device *device) in dasd_state_basic_to_ready() argument
332 block = device->block; in dasd_state_basic_to_ready()
339 device->state = DASD_STATE_UNFMT; in dasd_state_basic_to_ready()
340 disk = device->block->gdp; in dasd_state_basic_to_ready()
350 device->state = DASD_STATE_READY; in dasd_state_basic_to_ready()
353 device->state = DASD_STATE_BASIC; in dasd_state_basic_to_ready()
357 device->state = DASD_STATE_READY; in dasd_state_basic_to_ready()
360 if (device->discipline->basic_to_ready) in dasd_state_basic_to_ready()
361 rc = device->discipline->basic_to_ready(device); in dasd_state_basic_to_ready()
366 int _wait_for_empty_queues(struct dasd_device *device) in _wait_for_empty_queues() argument
368 if (device->block) in _wait_for_empty_queues()
369 return list_empty(&device->ccw_queue) && in _wait_for_empty_queues()
370 list_empty(&device->block->ccw_queue); in _wait_for_empty_queues()
372 return list_empty(&device->ccw_queue); in _wait_for_empty_queues()
380 static int dasd_state_ready_to_basic(struct dasd_device *device) in dasd_state_ready_to_basic() argument
384 device->state = DASD_STATE_BASIC; in dasd_state_ready_to_basic()
385 if (device->block) { in dasd_state_ready_to_basic()
386 struct dasd_block *block = device->block; in dasd_state_ready_to_basic()
389 device->state = DASD_STATE_READY; in dasd_state_ready_to_basic()
403 static int dasd_state_unfmt_to_basic(struct dasd_device *device) in dasd_state_unfmt_to_basic() argument
405 device->state = DASD_STATE_BASIC; in dasd_state_unfmt_to_basic()
415 dasd_state_ready_to_online(struct dasd_device * device) in dasd_state_ready_to_online() argument
421 device->state = DASD_STATE_ONLINE; in dasd_state_ready_to_online()
422 if (device->block) { in dasd_state_ready_to_online()
423 dasd_schedule_block_bh(device->block); in dasd_state_ready_to_online()
424 if ((device->features & DASD_FEATURE_USERAW)) { in dasd_state_ready_to_online()
425 disk = device->block->gdp; in dasd_state_ready_to_online()
429 disk = device->block->bdev->bd_disk; in dasd_state_ready_to_online()
441 static int dasd_state_online_to_ready(struct dasd_device *device) in dasd_state_online_to_ready() argument
448 if (device->discipline->online_to_ready) { in dasd_state_online_to_ready()
449 rc = device->discipline->online_to_ready(device); in dasd_state_online_to_ready()
454 device->state = DASD_STATE_READY; in dasd_state_online_to_ready()
455 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { in dasd_state_online_to_ready()
456 disk = device->block->bdev->bd_disk; in dasd_state_online_to_ready()
468 static int dasd_increase_state(struct dasd_device *device) in dasd_increase_state() argument
473 if (device->state == DASD_STATE_NEW && in dasd_increase_state()
474 device->target >= DASD_STATE_KNOWN) in dasd_increase_state()
475 rc = dasd_state_new_to_known(device); in dasd_increase_state()
478 device->state == DASD_STATE_KNOWN && in dasd_increase_state()
479 device->target >= DASD_STATE_BASIC) in dasd_increase_state()
480 rc = dasd_state_known_to_basic(device); in dasd_increase_state()
483 device->state == DASD_STATE_BASIC && in dasd_increase_state()
484 device->target >= DASD_STATE_READY) in dasd_increase_state()
485 rc = dasd_state_basic_to_ready(device); in dasd_increase_state()
488 device->state == DASD_STATE_UNFMT && in dasd_increase_state()
489 device->target > DASD_STATE_UNFMT) in dasd_increase_state()
493 device->state == DASD_STATE_READY && in dasd_increase_state()
494 device->target >= DASD_STATE_ONLINE) in dasd_increase_state()
495 rc = dasd_state_ready_to_online(device); in dasd_increase_state()
503 static int dasd_decrease_state(struct dasd_device *device) in dasd_decrease_state() argument
508 if (device->state == DASD_STATE_ONLINE && in dasd_decrease_state()
509 device->target <= DASD_STATE_READY) in dasd_decrease_state()
510 rc = dasd_state_online_to_ready(device); in dasd_decrease_state()
513 device->state == DASD_STATE_READY && in dasd_decrease_state()
514 device->target <= DASD_STATE_BASIC) in dasd_decrease_state()
515 rc = dasd_state_ready_to_basic(device); in dasd_decrease_state()
518 device->state == DASD_STATE_UNFMT && in dasd_decrease_state()
519 device->target <= DASD_STATE_BASIC) in dasd_decrease_state()
520 rc = dasd_state_unfmt_to_basic(device); in dasd_decrease_state()
523 device->state == DASD_STATE_BASIC && in dasd_decrease_state()
524 device->target <= DASD_STATE_KNOWN) in dasd_decrease_state()
525 rc = dasd_state_basic_to_known(device); in dasd_decrease_state()
528 device->state == DASD_STATE_KNOWN && in dasd_decrease_state()
529 device->target <= DASD_STATE_NEW) in dasd_decrease_state()
530 rc = dasd_state_known_to_new(device); in dasd_decrease_state()
538 static void dasd_change_state(struct dasd_device *device) in dasd_change_state() argument
542 if (device->state == device->target) in dasd_change_state()
545 if (device->state < device->target) in dasd_change_state()
546 rc = dasd_increase_state(device); in dasd_change_state()
548 rc = dasd_decrease_state(device); in dasd_change_state()
552 device->target = device->state; in dasd_change_state()
555 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); in dasd_change_state()
557 if (device->state == device->target) in dasd_change_state()
569 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); in do_kick_device() local
570 mutex_lock(&device->state_mutex); in do_kick_device()
571 dasd_change_state(device); in do_kick_device()
572 mutex_unlock(&device->state_mutex); in do_kick_device()
573 dasd_schedule_device_bh(device); in do_kick_device()
574 dasd_put_device(device); in do_kick_device()
577 void dasd_kick_device(struct dasd_device *device) in dasd_kick_device() argument
579 dasd_get_device(device); in dasd_kick_device()
581 if (!schedule_work(&device->kick_work)) in dasd_kick_device()
582 dasd_put_device(device); in dasd_kick_device()
592 struct dasd_device *device = container_of(work, struct dasd_device, in do_reload_device() local
594 device->discipline->reload(device); in do_reload_device()
595 dasd_put_device(device); in do_reload_device()
598 void dasd_reload_device(struct dasd_device *device) in dasd_reload_device() argument
600 dasd_get_device(device); in dasd_reload_device()
602 if (!schedule_work(&device->reload_device)) in dasd_reload_device()
603 dasd_put_device(device); in dasd_reload_device()
613 struct dasd_device *device = container_of(work, struct dasd_device, in do_restore_device() local
615 device->cdev->drv->restore(device->cdev); in do_restore_device()
616 dasd_put_device(device); in do_restore_device()
619 void dasd_restore_device(struct dasd_device *device) in dasd_restore_device() argument
621 dasd_get_device(device); in dasd_restore_device()
623 if (!schedule_work(&device->restore_device)) in dasd_restore_device()
624 dasd_put_device(device); in dasd_restore_device()
630 void dasd_set_target_state(struct dasd_device *device, int target) in dasd_set_target_state() argument
632 dasd_get_device(device); in dasd_set_target_state()
633 mutex_lock(&device->state_mutex); in dasd_set_target_state()
637 if (device->target != target) { in dasd_set_target_state()
638 if (device->state == target) in dasd_set_target_state()
640 device->target = target; in dasd_set_target_state()
642 if (device->state != device->target) in dasd_set_target_state()
643 dasd_change_state(device); in dasd_set_target_state()
644 mutex_unlock(&device->state_mutex); in dasd_set_target_state()
645 dasd_put_device(device); in dasd_set_target_state()
652 static inline int _wait_for_device(struct dasd_device *device) in _wait_for_device() argument
654 return (device->state == device->target); in _wait_for_device()
657 void dasd_enable_device(struct dasd_device *device) in dasd_enable_device() argument
659 dasd_set_target_state(device, DASD_STATE_ONLINE); in dasd_enable_device()
660 if (device->state <= DASD_STATE_KNOWN) in dasd_enable_device()
662 dasd_set_target_state(device, DASD_STATE_NEW); in dasd_enable_device()
664 wait_event(dasd_init_waitq, _wait_for_device(device)); in dasd_enable_device()
666 dasd_reload_device(device); in dasd_enable_device()
667 if (device->discipline->kick_validate) in dasd_enable_device()
668 device->discipline->kick_validate(device); in dasd_enable_device()
693 struct dasd_device *device; in dasd_profile_start() local
723 device = cqr->startdev; in dasd_profile_start()
724 if (device->profile.data) { in dasd_profile_start()
726 list_for_each(l, &device->ccw_queue) in dasd_profile_start()
730 spin_lock(&device->profile.lock); in dasd_profile_start()
731 if (device->profile.data) { in dasd_profile_start()
732 device->profile.data->dasd_io_nr_req[counter]++; in dasd_profile_start()
734 device->profile.data->dasd_read_nr_req[counter]++; in dasd_profile_start()
736 spin_unlock(&device->profile.lock); in dasd_profile_start()
803 struct dasd_device *device; in dasd_profile_end() local
808 device = cqr->startdev; in dasd_profile_end()
811 device->profile.data)) in dasd_profile_end()
870 spin_lock(&device->profile.lock); in dasd_profile_end()
871 if (device->profile.data) { in dasd_profile_end()
872 data = device->profile.data; in dasd_profile_end()
877 dasd_profile_end_add_data(device->profile.data, in dasd_profile_end()
886 spin_unlock(&device->profile.lock); in dasd_profile_end()
1178 struct dasd_device *device; in dasd_hosts_show() local
1181 device = m->private; in dasd_hosts_show()
1182 dasd_get_device(device); in dasd_hosts_show()
1184 if (device->discipline->hosts_print) in dasd_hosts_show()
1185 rc = device->discipline->hosts_print(device, m); in dasd_hosts_show()
1187 dasd_put_device(device); in dasd_hosts_show()
1193 struct dasd_device *device = inode->i_private; in dasd_hosts_open() local
1195 return single_open(file, dasd_hosts_show, device); in dasd_hosts_open()
1206 static void dasd_hosts_exit(struct dasd_device *device) in dasd_hosts_exit() argument
1208 debugfs_remove(device->hosts_dentry); in dasd_hosts_exit()
1209 device->hosts_dentry = NULL; in dasd_hosts_exit()
1213 struct dasd_device *device) in dasd_hosts_init() argument
1223 device, &dasd_hosts_fops); in dasd_hosts_init()
1225 device->hosts_dentry = pde; in dasd_hosts_init()
1237 struct dasd_device *device) in dasd_kmalloc_request() argument
1268 dasd_get_device(device); in dasd_kmalloc_request()
1275 struct dasd_device *device) in dasd_smalloc_request() argument
1287 spin_lock_irqsave(&device->mem_lock, flags); in dasd_smalloc_request()
1289 dasd_alloc_chunk(&device->ccw_chunks, size); in dasd_smalloc_request()
1290 spin_unlock_irqrestore(&device->mem_lock, flags); in dasd_smalloc_request()
1308 dasd_get_device(device); in dasd_smalloc_request()
1318 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) in dasd_kfree_request() argument
1330 dasd_put_device(device); in dasd_kfree_request()
1334 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) in dasd_sfree_request() argument
1338 spin_lock_irqsave(&device->mem_lock, flags); in dasd_sfree_request()
1339 dasd_free_chunk(&device->ccw_chunks, cqr); in dasd_sfree_request()
1340 spin_unlock_irqrestore(&device->mem_lock, flags); in dasd_sfree_request()
1341 dasd_put_device(device); in dasd_sfree_request()
1350 struct dasd_device *device; in dasd_check_cqr() local
1354 device = cqr->startdev; in dasd_check_cqr()
1355 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { in dasd_check_cqr()
1356 DBF_DEV_EVENT(DBF_WARNING, device, in dasd_check_cqr()
1360 *(unsigned int *) device->discipline->name); in dasd_check_cqr()
1374 struct dasd_device *device; in dasd_term_IO() local
1383 device = (struct dasd_device *) cqr->startdev; in dasd_term_IO()
1385 rc = ccw_device_clear(device->cdev, (long) cqr); in dasd_term_IO()
1391 DBF_DEV_EVENT(DBF_DEBUG, device, in dasd_term_IO()
1396 DBF_DEV_EVENT(DBF_ERR, device, "%s", in dasd_term_IO()
1400 DBF_DEV_EVENT(DBF_ERR, device, "%s", in dasd_term_IO()
1413 DBF_DEV_EVENT(DBF_ERR, device, "%s", in dasd_term_IO()
1419 DBF_DEV_EVENT(DBF_ERR, device, "%s", in dasd_term_IO()
1425 dev_err(&device->cdev->dev, "An error occurred in the " in dasd_term_IO()
1432 dasd_schedule_device_bh(device); in dasd_term_IO()
1443 struct dasd_device *device; in dasd_start_IO() local
1453 device = (struct dasd_device *) cqr->startdev; in dasd_start_IO()
1456 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && in dasd_start_IO()
1458 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " in dasd_start_IO()
1467 dev_err(&device->cdev->dev, "An error occurred in the DASD " in dasd_start_IO()
1476 cqr->lpm &= dasd_path_get_opm(device); in dasd_start_IO()
1478 cqr->lpm = dasd_path_get_opm(device); in dasd_start_IO()
1481 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, in dasd_start_IO()
1484 rc = ccw_device_start(device->cdev, cqr->cpaddr, in dasd_start_IO()
1492 DBF_DEV_EVENT(DBF_WARNING, device, "%s", in dasd_start_IO()
1496 DBF_DEV_EVENT(DBF_WARNING, device, "%s", in dasd_start_IO()
1508 DBF_DEV_EVENT(DBF_WARNING, device, in dasd_start_IO()
1511 } else if (cqr->lpm != dasd_path_get_opm(device)) { in dasd_start_IO()
1512 cqr->lpm = dasd_path_get_opm(device); in dasd_start_IO()
1513 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", in dasd_start_IO()
1517 DBF_DEV_EVENT(DBF_WARNING, device, "%s", in dasd_start_IO()
1520 dasd_generic_last_path_gone(device); in dasd_start_IO()
1521 dasd_path_no_path(device); in dasd_start_IO()
1522 dasd_path_set_tbvpm(device, in dasd_start_IO()
1524 device->cdev)); in dasd_start_IO()
1528 DBF_DEV_EVENT(DBF_WARNING, device, "%s", in dasd_start_IO()
1532 DBF_DEV_EVENT(DBF_WARNING, device, "%s", in dasd_start_IO()
1537 DBF_DEV_EVENT(DBF_WARNING, device, "%s", in dasd_start_IO()
1544 dev_err(&device->cdev->dev, in dasd_start_IO()
1566 struct dasd_device *device; in dasd_device_timeout() local
1568 device = (struct dasd_device *) ptr; in dasd_device_timeout()
1569 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); in dasd_device_timeout()
1571 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); in dasd_device_timeout()
1572 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); in dasd_device_timeout()
1573 dasd_schedule_device_bh(device); in dasd_device_timeout()
1579 void dasd_device_set_timer(struct dasd_device *device, int expires) in dasd_device_set_timer() argument
1582 del_timer(&device->timer); in dasd_device_set_timer()
1584 mod_timer(&device->timer, jiffies + expires); in dasd_device_set_timer()
1591 void dasd_device_clear_timer(struct dasd_device *device) in dasd_device_clear_timer() argument
1593 del_timer(&device->timer); in dasd_device_clear_timer()
1601 struct dasd_device *device; in dasd_handle_killed_request() local
1613 device = dasd_device_from_cdev_locked(cdev); in dasd_handle_killed_request()
1614 if (IS_ERR(device)) { in dasd_handle_killed_request()
1621 device != cqr->startdev || in dasd_handle_killed_request()
1626 dasd_put_device(device); in dasd_handle_killed_request()
1633 dasd_device_clear_timer(device); in dasd_handle_killed_request()
1634 dasd_schedule_device_bh(device); in dasd_handle_killed_request()
1635 dasd_put_device(device); in dasd_handle_killed_request()
1638 void dasd_generic_handle_state_change(struct dasd_device *device) in dasd_generic_handle_state_change() argument
1641 dasd_eer_snss(device); in dasd_generic_handle_state_change()
1643 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); in dasd_generic_handle_state_change()
1644 dasd_schedule_device_bh(device); in dasd_generic_handle_state_change()
1645 if (device->block) { in dasd_generic_handle_state_change()
1646 dasd_schedule_block_bh(device->block); in dasd_generic_handle_state_change()
1647 if (device->block->request_queue) in dasd_generic_handle_state_change()
1648 blk_mq_run_hw_queues(device->block->request_queue, in dasd_generic_handle_state_change()
1668 struct dasd_device *device; in dasd_int_handler() local
1680 device = cqr->startdev; in dasd_int_handler()
1682 dasd_device_clear_timer(device); in dasd_int_handler()
1684 dasd_schedule_device_bh(device); in dasd_int_handler()
1708 device = dasd_device_from_cdev_locked(cdev); in dasd_int_handler()
1709 if (IS_ERR(device)) in dasd_int_handler()
1712 if (device->discipline == dasd_diag_discipline_pointer) { in dasd_int_handler()
1713 dasd_put_device(device); in dasd_int_handler()
1731 device->discipline->dump_sense_dbf(device, irb, "int"); in dasd_int_handler()
1733 if (device->features & DASD_FEATURE_ERPLOG) in dasd_int_handler()
1734 device->discipline->dump_sense(device, cqr, irb); in dasd_int_handler()
1735 device->discipline->check_for_device_change(device, cqr, irb); in dasd_int_handler()
1736 dasd_put_device(device); in dasd_int_handler()
1741 device = dasd_device_from_cdev_locked(cdev); in dasd_int_handler()
1742 if (!IS_ERR(device)) { in dasd_int_handler()
1743 device->discipline->check_attention(device, in dasd_int_handler()
1745 dasd_put_device(device); in dasd_int_handler()
1752 device = (struct dasd_device *) cqr->startdev; in dasd_int_handler()
1753 if (!device || in dasd_int_handler()
1754 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { in dasd_int_handler()
1764 dasd_device_clear_timer(device); in dasd_int_handler()
1766 dasd_schedule_device_bh(device); in dasd_int_handler()
1772 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " in dasd_int_handler()
1785 if (cqr->devlist.next != &device->ccw_queue) { in dasd_int_handler()
1795 device->discipline->handle_hpf_error) in dasd_int_handler()
1796 device->discipline->handle_hpf_error(device, irb); in dasd_int_handler()
1803 if (cqr->lpm == dasd_path_get_opm(device)) in dasd_int_handler()
1804 DBF_DEV_EVENT(DBF_DEBUG, device, in dasd_int_handler()
1809 cqr->lpm = dasd_path_get_opm(device); in dasd_int_handler()
1816 (!device->stopped)) { in dasd_int_handler()
1817 if (device->discipline->start_IO(next) == 0) in dasd_int_handler()
1821 dasd_device_set_timer(device, expires); in dasd_int_handler()
1823 dasd_device_clear_timer(device); in dasd_int_handler()
1824 dasd_schedule_device_bh(device); in dasd_int_handler()
1830 struct dasd_device *device; in dasd_generic_uc_handler() local
1832 device = dasd_device_from_cdev_locked(cdev); in dasd_generic_uc_handler()
1834 if (IS_ERR(device)) in dasd_generic_uc_handler()
1836 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || in dasd_generic_uc_handler()
1837 device->state != device->target || in dasd_generic_uc_handler()
1838 !device->discipline->check_for_device_change){ in dasd_generic_uc_handler()
1839 dasd_put_device(device); in dasd_generic_uc_handler()
1842 if (device->discipline->dump_sense_dbf) in dasd_generic_uc_handler()
1843 device->discipline->dump_sense_dbf(device, irb, "uc"); in dasd_generic_uc_handler()
1844 device->discipline->check_for_device_change(device, NULL, irb); in dasd_generic_uc_handler()
1845 dasd_put_device(device); in dasd_generic_uc_handler()
1855 static void __dasd_device_recovery(struct dasd_device *device, in __dasd_device_recovery() argument
1867 list_for_each_safe(l, n, &device->ccw_queue) { in __dasd_device_recovery()
1880 static void __dasd_device_process_ccw_queue(struct dasd_device *device, in __dasd_device_process_ccw_queue() argument
1887 list_for_each_safe(l, n, &device->ccw_queue) { in __dasd_device_process_ccw_queue()
1896 __dasd_device_recovery(device, cqr); in __dasd_device_process_ccw_queue()
1907 static void __dasd_device_process_final_queue(struct dasd_device *device, in __dasd_device_process_final_queue() argument
1938 dev_err(&device->cdev->dev, in __dasd_device_process_final_queue()
1954 static void __dasd_device_check_expire(struct dasd_device *device) in __dasd_device_check_expire() argument
1958 if (list_empty(&device->ccw_queue)) in __dasd_device_check_expire()
1960 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); in __dasd_device_check_expire()
1963 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { in __dasd_device_check_expire()
1970 if (device->discipline->term_IO(cqr) != 0) { in __dasd_device_check_expire()
1972 dev_err(&device->cdev->dev, in __dasd_device_check_expire()
1977 dasd_device_set_timer(device, 5*HZ); in __dasd_device_check_expire()
1979 dev_err(&device->cdev->dev, in __dasd_device_check_expire()
1990 static int __dasd_device_is_unusable(struct dasd_device *device, in __dasd_device_is_unusable() argument
1995 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && in __dasd_device_is_unusable()
1996 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { in __dasd_device_is_unusable()
2003 if (device->stopped) { in __dasd_device_is_unusable()
2004 if (device->stopped & mask) { in __dasd_device_is_unusable()
2022 static void __dasd_device_start_head(struct dasd_device *device) in __dasd_device_start_head() argument
2027 if (list_empty(&device->ccw_queue)) in __dasd_device_start_head()
2029 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); in __dasd_device_start_head()
2033 if (__dasd_device_is_unusable(device, cqr)) { in __dasd_device_start_head()
2036 dasd_schedule_device_bh(device); in __dasd_device_start_head()
2040 rc = device->discipline->start_IO(cqr); in __dasd_device_start_head()
2042 dasd_device_set_timer(device, cqr->expires); in __dasd_device_start_head()
2044 dasd_schedule_device_bh(device); in __dasd_device_start_head()
2047 dasd_device_set_timer(device, 50); in __dasd_device_start_head()
2050 static void __dasd_device_check_path_events(struct dasd_device *device) in __dasd_device_check_path_events() argument
2054 if (!dasd_path_get_tbvpm(device)) in __dasd_device_check_path_events()
2057 if (device->stopped & in __dasd_device_check_path_events()
2060 rc = device->discipline->verify_path(device, in __dasd_device_check_path_events()
2061 dasd_path_get_tbvpm(device)); in __dasd_device_check_path_events()
2063 dasd_device_set_timer(device, 50); in __dasd_device_check_path_events()
2065 dasd_path_clear_all_verify(device); in __dasd_device_check_path_events()
2078 int dasd_flush_device_queue(struct dasd_device *device) in dasd_flush_device_queue() argument
2085 spin_lock_irq(get_ccwdev_lock(device->cdev)); in dasd_flush_device_queue()
2087 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { in dasd_flush_device_queue()
2091 rc = device->discipline->term_IO(cqr); in dasd_flush_device_queue()
2094 dev_err(&device->cdev->dev, in dasd_flush_device_queue()
2111 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in dasd_flush_device_queue()
2124 __dasd_device_process_final_queue(device, &flush_queue); in dasd_flush_device_queue()
2132 static void dasd_device_tasklet(struct dasd_device *device) in dasd_device_tasklet() argument
2136 atomic_set (&device->tasklet_scheduled, 0); in dasd_device_tasklet()
2138 spin_lock_irq(get_ccwdev_lock(device->cdev)); in dasd_device_tasklet()
2140 __dasd_device_check_expire(device); in dasd_device_tasklet()
2142 __dasd_device_process_ccw_queue(device, &final_queue); in dasd_device_tasklet()
2143 __dasd_device_check_path_events(device); in dasd_device_tasklet()
2144 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in dasd_device_tasklet()
2146 __dasd_device_process_final_queue(device, &final_queue); in dasd_device_tasklet()
2147 spin_lock_irq(get_ccwdev_lock(device->cdev)); in dasd_device_tasklet()
2149 __dasd_device_start_head(device); in dasd_device_tasklet()
2150 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in dasd_device_tasklet()
2153 dasd_put_device(device); in dasd_device_tasklet()
2159 void dasd_schedule_device_bh(struct dasd_device *device) in dasd_schedule_device_bh() argument
2162 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) in dasd_schedule_device_bh()
2164 dasd_get_device(device); in dasd_schedule_device_bh()
2165 tasklet_hi_schedule(&device->tasklet); in dasd_schedule_device_bh()
2169 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) in dasd_device_set_stop_bits() argument
2171 device->stopped |= bits; in dasd_device_set_stop_bits()
2175 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) in dasd_device_remove_stop_bits() argument
2177 device->stopped &= ~bits; in dasd_device_remove_stop_bits()
2178 if (!device->stopped) in dasd_device_remove_stop_bits()
2189 struct dasd_device *device; in dasd_add_request_head() local
2192 device = cqr->startdev; in dasd_add_request_head()
2193 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); in dasd_add_request_head()
2195 list_add(&cqr->devlist, &device->ccw_queue); in dasd_add_request_head()
2197 dasd_schedule_device_bh(device); in dasd_add_request_head()
2198 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); in dasd_add_request_head()
2208 struct dasd_device *device; in dasd_add_request_tail() local
2211 device = cqr->startdev; in dasd_add_request_tail()
2212 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); in dasd_add_request_tail()
2214 list_add_tail(&cqr->devlist, &device->ccw_queue); in dasd_add_request_tail()
2216 dasd_schedule_device_bh(device); in dasd_add_request_tail()
2217 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); in dasd_add_request_tail()
2235 struct dasd_device *device; in _wait_for_wakeup() local
2238 device = cqr->startdev; in _wait_for_wakeup()
2239 spin_lock_irq(get_ccwdev_lock(device->cdev)); in _wait_for_wakeup()
2241 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in _wait_for_wakeup()
2250 struct dasd_device *device; in __dasd_sleep_on_erp() local
2255 device = cqr->startdev; in __dasd_sleep_on_erp()
2258 device->discipline->handle_terminated_request(cqr); in __dasd_sleep_on_erp()
2262 erp_fn = device->discipline->erp_action(cqr); in __dasd_sleep_on_erp()
2269 __dasd_process_erp(device, cqr); in __dasd_sleep_on_erp()
2289 struct dasd_device *device; in _dasd_sleep_on() local
2296 device = maincqr->startdev; in _dasd_sleep_on()
2306 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && in _dasd_sleep_on()
2313 if (device->stopped & ~DASD_STOPPED_PENDING && in _dasd_sleep_on()
2315 (!dasd_eer_enabled(device))) { in _dasd_sleep_on()
2324 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { in _dasd_sleep_on()
2336 generic_waitq, !(device->stopped)); in _dasd_sleep_on()
2343 wait_event(generic_waitq, !(device->stopped)); in _dasd_sleep_on()
2393 struct dasd_device *device; in _dasd_sleep_on_queue() local
2400 device = cqr->startdev; in _dasd_sleep_on_queue()
2404 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && in _dasd_sleep_on_queue()
2411 if (device->stopped & ~DASD_STOPPED_PENDING && in _dasd_sleep_on_queue()
2413 !dasd_eer_enabled(device)) { in _dasd_sleep_on_queue()
2422 generic_waitq, !device->stopped); in _dasd_sleep_on_queue()
2429 wait_event(generic_waitq, !(device->stopped)); in _dasd_sleep_on_queue()
2509 static inline int _dasd_term_running_cqr(struct dasd_device *device) in _dasd_term_running_cqr() argument
2514 if (list_empty(&device->ccw_queue)) in _dasd_term_running_cqr()
2516 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); in _dasd_term_running_cqr()
2517 rc = device->discipline->term_IO(cqr); in _dasd_term_running_cqr()
2530 struct dasd_device *device; in dasd_sleep_on_immediatly() local
2533 device = cqr->startdev; in dasd_sleep_on_immediatly()
2534 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && in dasd_sleep_on_immediatly()
2540 spin_lock_irq(get_ccwdev_lock(device->cdev)); in dasd_sleep_on_immediatly()
2541 rc = _dasd_term_running_cqr(device); in dasd_sleep_on_immediatly()
2543 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in dasd_sleep_on_immediatly()
2553 list_add(&cqr->devlist, device->ccw_queue.next); in dasd_sleep_on_immediatly()
2556 dasd_schedule_device_bh(device); in dasd_sleep_on_immediatly()
2558 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in dasd_sleep_on_immediatly()
2570 dasd_schedule_device_bh(device); in dasd_sleep_on_immediatly()
2571 if (device->block) in dasd_sleep_on_immediatly()
2572 dasd_schedule_block_bh(device->block); in dasd_sleep_on_immediatly()
2589 struct dasd_device *device = cqr->startdev; in dasd_cancel_req() local
2594 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); in dasd_cancel_req()
2602 rc = device->discipline->term_IO(cqr); in dasd_cancel_req()
2604 dev_err(&device->cdev->dev, in dasd_cancel_req()
2614 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); in dasd_cancel_req()
2615 dasd_schedule_device_bh(device); in dasd_cancel_req()
2667 static void __dasd_process_erp(struct dasd_device *device, in __dasd_process_erp() argument
2673 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); in __dasd_process_erp()
2675 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); in __dasd_process_erp()
2676 erp_fn = device->discipline->erp_postaction(cqr); in __dasd_process_erp()
3078 struct dasd_device *device; in dasd_times_out() local
3088 device = cqr->startdev ? cqr->startdev : block->base; in dasd_times_out()
3089 if (!device->blk_timeout) { in dasd_times_out()
3093 DBF_DEV_EVENT(DBF_WARNING, device, in dasd_times_out()
3098 spin_lock(get_ccwdev_lock(device->cdev)); in dasd_times_out()
3102 spin_unlock(get_ccwdev_lock(device->cdev)); in dasd_times_out()
3107 spin_unlock(get_ccwdev_lock(device->cdev)); in dasd_times_out()
3122 spin_unlock(get_ccwdev_lock(device->cdev)); in dasd_times_out()
3124 spin_lock(get_ccwdev_lock(device->cdev)); in dasd_times_out()
3138 spin_unlock(get_ccwdev_lock(device->cdev)); in dasd_times_out()
3397 int dasd_device_is_ro(struct dasd_device *device) in dasd_device_is_ro() argument
3405 ccw_device_get_id(device->cdev, &dev_id); in dasd_device_is_ro()
3461 void dasd_generic_free_discipline(struct dasd_device *device) in dasd_generic_free_discipline() argument
3464 if (device->discipline) { in dasd_generic_free_discipline()
3465 if (device->discipline->uncheck_device) in dasd_generic_free_discipline()
3466 device->discipline->uncheck_device(device); in dasd_generic_free_discipline()
3467 module_put(device->discipline->owner); in dasd_generic_free_discipline()
3468 device->discipline = NULL; in dasd_generic_free_discipline()
3470 if (device->base_discipline) { in dasd_generic_free_discipline()
3471 module_put(device->base_discipline->owner); in dasd_generic_free_discipline()
3472 device->base_discipline = NULL; in dasd_generic_free_discipline()
3483 struct dasd_device *device; in dasd_generic_remove() local
3488 device = dasd_device_from_cdev(cdev); in dasd_generic_remove()
3489 if (IS_ERR(device)) { in dasd_generic_remove()
3493 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && in dasd_generic_remove()
3494 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { in dasd_generic_remove()
3496 dasd_put_device(device); in dasd_generic_remove()
3505 dasd_set_target_state(device, DASD_STATE_NEW); in dasd_generic_remove()
3507 block = device->block; in dasd_generic_remove()
3508 dasd_delete_device(device); in dasd_generic_remove()
3529 struct dasd_device *device; in dasd_generic_set_online() local
3534 device = dasd_create_device(cdev); in dasd_generic_set_online()
3535 if (IS_ERR(device)) in dasd_generic_set_online()
3536 return PTR_ERR(device); in dasd_generic_set_online()
3539 if (device->features & DASD_FEATURE_USEDIAG) { in dasd_generic_set_online()
3549 dasd_delete_device(device); in dasd_generic_set_online()
3558 dasd_delete_device(device); in dasd_generic_set_online()
3564 dasd_delete_device(device); in dasd_generic_set_online()
3569 dasd_delete_device(device); in dasd_generic_set_online()
3572 device->base_discipline = base_discipline; in dasd_generic_set_online()
3573 device->discipline = discipline; in dasd_generic_set_online()
3576 rc = discipline->check_device(device); in dasd_generic_set_online()
3582 dasd_delete_device(device); in dasd_generic_set_online()
3586 dasd_set_target_state(device, DASD_STATE_ONLINE); in dasd_generic_set_online()
3587 if (device->state <= DASD_STATE_KNOWN) { in dasd_generic_set_online()
3591 dasd_set_target_state(device, DASD_STATE_NEW); in dasd_generic_set_online()
3592 if (device->block) in dasd_generic_set_online()
3593 dasd_free_block(device->block); in dasd_generic_set_online()
3594 dasd_delete_device(device); in dasd_generic_set_online()
3599 wait_event(dasd_init_waitq, _wait_for_device(device)); in dasd_generic_set_online()
3601 dasd_put_device(device); in dasd_generic_set_online()
3608 struct dasd_device *device; in dasd_generic_set_offline() local
3615 device = dasd_device_from_cdev_locked(cdev); in dasd_generic_set_offline()
3616 if (IS_ERR(device)) { in dasd_generic_set_offline()
3618 return PTR_ERR(device); in dasd_generic_set_offline()
3627 if (device->block) { in dasd_generic_set_offline()
3628 max_count = device->block->bdev ? 0 : -1; in dasd_generic_set_offline()
3629 open_count = atomic_read(&device->block->open_count); in dasd_generic_set_offline()
3648 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { in dasd_generic_set_offline()
3649 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { in dasd_generic_set_offline()
3651 &device->flags); in dasd_generic_set_offline()
3657 set_bit(DASD_FLAG_OFFLINE, &device->flags); in dasd_generic_set_offline()
3664 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && in dasd_generic_set_offline()
3665 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { in dasd_generic_set_offline()
3674 if (device->block) { in dasd_generic_set_offline()
3675 rc = fsync_bdev(device->block->bdev); in dasd_generic_set_offline()
3679 dasd_schedule_device_bh(device); in dasd_generic_set_offline()
3681 _wait_for_empty_queues(device)); in dasd_generic_set_offline()
3693 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { in dasd_generic_set_offline()
3697 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); in dasd_generic_set_offline()
3701 dasd_set_target_state(device, DASD_STATE_NEW); in dasd_generic_set_offline()
3703 block = device->block; in dasd_generic_set_offline()
3704 dasd_delete_device(device); in dasd_generic_set_offline()
3717 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); in dasd_generic_set_offline()
3718 clear_bit(DASD_FLAG_OFFLINE, &device->flags); in dasd_generic_set_offline()
3720 dasd_put_device(device); in dasd_generic_set_offline()
3726 int dasd_generic_last_path_gone(struct dasd_device *device) in dasd_generic_last_path_gone() argument
3730 dev_warn(&device->cdev->dev, "No operational channel path is left " in dasd_generic_last_path_gone()
3732 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); in dasd_generic_last_path_gone()
3734 dasd_eer_write(device, NULL, DASD_EER_NOPATH); in dasd_generic_last_path_gone()
3736 if (device->state < DASD_STATE_BASIC) in dasd_generic_last_path_gone()
3739 list_for_each_entry(cqr, &device->ccw_queue, devlist) in dasd_generic_last_path_gone()
3745 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); in dasd_generic_last_path_gone()
3746 dasd_device_clear_timer(device); in dasd_generic_last_path_gone()
3747 dasd_schedule_device_bh(device); in dasd_generic_last_path_gone()
3752 int dasd_generic_path_operational(struct dasd_device *device) in dasd_generic_path_operational() argument
3754 dev_info(&device->cdev->dev, "A channel path to the device has become " in dasd_generic_path_operational()
3756 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); in dasd_generic_path_operational()
3757 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); in dasd_generic_path_operational()
3758 if (device->stopped & DASD_UNRESUMED_PM) { in dasd_generic_path_operational()
3759 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); in dasd_generic_path_operational()
3760 dasd_restore_device(device); in dasd_generic_path_operational()
3763 dasd_schedule_device_bh(device); in dasd_generic_path_operational()
3764 if (device->block) { in dasd_generic_path_operational()
3765 dasd_schedule_block_bh(device->block); in dasd_generic_path_operational()
3766 if (device->block->request_queue) in dasd_generic_path_operational()
3767 blk_mq_run_hw_queues(device->block->request_queue, in dasd_generic_path_operational()
3771 if (!device->stopped) in dasd_generic_path_operational()
3780 struct dasd_device *device; in dasd_generic_notify() local
3783 device = dasd_device_from_cdev_locked(cdev); in dasd_generic_notify()
3784 if (IS_ERR(device)) in dasd_generic_notify()
3791 dasd_path_no_path(device); in dasd_generic_notify()
3792 ret = dasd_generic_last_path_gone(device); in dasd_generic_notify()
3796 if (dasd_path_get_opm(device)) in dasd_generic_notify()
3797 ret = dasd_generic_path_operational(device); in dasd_generic_notify()
3800 dasd_put_device(device); in dasd_generic_notify()
3807 struct dasd_device *device; in dasd_generic_path_event() local
3810 device = dasd_device_from_cdev_locked(cdev); in dasd_generic_path_event()
3811 if (IS_ERR(device)) in dasd_generic_path_event()
3814 oldopm = dasd_path_get_opm(device); in dasd_generic_path_event()
3817 dasd_path_notoper(device, chp); in dasd_generic_path_event()
3820 dasd_path_available(device, chp); in dasd_generic_path_event()
3821 dasd_schedule_device_bh(device); in dasd_generic_path_event()
3824 if (!dasd_path_is_operational(device, chp) && in dasd_generic_path_event()
3825 !dasd_path_need_verify(device, chp)) { in dasd_generic_path_event()
3831 dasd_path_available(device, chp); in dasd_generic_path_event()
3832 dasd_schedule_device_bh(device); in dasd_generic_path_event()
3834 DBF_DEV_EVENT(DBF_WARNING, device, "%s", in dasd_generic_path_event()
3836 if (device->discipline->kick_validate) in dasd_generic_path_event()
3837 device->discipline->kick_validate(device); in dasd_generic_path_event()
3840 hpfpm = dasd_path_get_hpfpm(device); in dasd_generic_path_event()
3841 ifccpm = dasd_path_get_ifccpm(device); in dasd_generic_path_event()
3842 if (!dasd_path_get_opm(device) && hpfpm) { in dasd_generic_path_event()
3848 if (device->discipline->disable_hpf) in dasd_generic_path_event()
3849 device->discipline->disable_hpf(device); in dasd_generic_path_event()
3850 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); in dasd_generic_path_event()
3851 dasd_path_set_tbvpm(device, hpfpm); in dasd_generic_path_event()
3852 dasd_schedule_device_bh(device); in dasd_generic_path_event()
3853 dasd_schedule_requeue(device); in dasd_generic_path_event()
3854 } else if (!dasd_path_get_opm(device) && ifccpm) { in dasd_generic_path_event()
3860 dasd_path_set_tbvpm(device, ifccpm); in dasd_generic_path_event()
3861 dasd_schedule_device_bh(device); in dasd_generic_path_event()
3863 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { in dasd_generic_path_event()
3864 dev_warn(&device->cdev->dev, in dasd_generic_path_event()
3866 DBF_DEV_EVENT(DBF_WARNING, device, in dasd_generic_path_event()
3868 dasd_eer_write(device, NULL, DASD_EER_NOPATH); in dasd_generic_path_event()
3869 dasd_device_set_stop_bits(device, in dasd_generic_path_event()
3872 dasd_put_device(device); in dasd_generic_path_event()
3876 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) in dasd_generic_verify_path() argument
3878 if (!dasd_path_get_opm(device) && lpm) { in dasd_generic_verify_path()
3879 dasd_path_set_opm(device, lpm); in dasd_generic_verify_path()
3880 dasd_generic_path_operational(device); in dasd_generic_verify_path()
3882 dasd_path_add_opm(device, lpm); in dasd_generic_verify_path()
3890 static int dasd_generic_requeue_all_requests(struct dasd_device *device) in dasd_generic_requeue_all_requests() argument
3898 spin_lock_irq(get_ccwdev_lock(device->cdev)); in dasd_generic_requeue_all_requests()
3900 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { in dasd_generic_requeue_all_requests()
3903 rc = device->discipline->term_IO(cqr); in dasd_generic_requeue_all_requests()
3906 dev_err(&device->cdev->dev, in dasd_generic_requeue_all_requests()
3909 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in dasd_generic_requeue_all_requests()
3910 dasd_put_device(device); in dasd_generic_requeue_all_requests()
3916 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in dasd_generic_requeue_all_requests()
3952 spin_lock_irq(get_ccwdev_lock(device->cdev)); in dasd_generic_requeue_all_requests()
3953 list_splice_tail(&requeue_queue, &device->ccw_queue); in dasd_generic_requeue_all_requests()
3954 spin_unlock_irq(get_ccwdev_lock(device->cdev)); in dasd_generic_requeue_all_requests()
3956 dasd_schedule_device_bh(device); in dasd_generic_requeue_all_requests()
3962 struct dasd_device *device = container_of(work, struct dasd_device, in do_requeue_requests() local
3964 dasd_generic_requeue_all_requests(device); in do_requeue_requests()
3965 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); in do_requeue_requests()
3966 if (device->block) in do_requeue_requests()
3967 dasd_schedule_block_bh(device->block); in do_requeue_requests()
3968 dasd_put_device(device); in do_requeue_requests()
3971 void dasd_schedule_requeue(struct dasd_device *device) in dasd_schedule_requeue() argument
3973 dasd_get_device(device); in dasd_schedule_requeue()
3975 if (!schedule_work(&device->requeue_requests)) in dasd_schedule_requeue()
3976 dasd_put_device(device); in dasd_schedule_requeue()
3982 struct dasd_device *device = dasd_device_from_cdev(cdev); in dasd_generic_pm_freeze() local
3984 if (IS_ERR(device)) in dasd_generic_pm_freeze()
3985 return PTR_ERR(device); in dasd_generic_pm_freeze()
3988 set_bit(DASD_FLAG_SUSPENDED, &device->flags); in dasd_generic_pm_freeze()
3990 if (device->discipline->freeze) in dasd_generic_pm_freeze()
3991 device->discipline->freeze(device); in dasd_generic_pm_freeze()
3994 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); in dasd_generic_pm_freeze()
3996 return dasd_generic_requeue_all_requests(device); in dasd_generic_pm_freeze()
4002 struct dasd_device *device = dasd_device_from_cdev(cdev); in dasd_generic_restore_device() local
4005 if (IS_ERR(device)) in dasd_generic_restore_device()
4006 return PTR_ERR(device); in dasd_generic_restore_device()
4009 dasd_device_remove_stop_bits(device, in dasd_generic_restore_device()
4012 dasd_schedule_device_bh(device); in dasd_generic_restore_device()
4018 if (device->discipline->restore && !(device->stopped)) in dasd_generic_restore_device()
4019 rc = device->discipline->restore(device); in dasd_generic_restore_device()
4020 if (rc || device->stopped) in dasd_generic_restore_device()
4025 device->stopped |= DASD_UNRESUMED_PM; in dasd_generic_restore_device()
4027 if (device->block) { in dasd_generic_restore_device()
4028 dasd_schedule_block_bh(device->block); in dasd_generic_restore_device()
4029 if (device->block->request_queue) in dasd_generic_restore_device()
4030 blk_mq_run_hw_queues(device->block->request_queue, in dasd_generic_restore_device()
4034 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); in dasd_generic_restore_device()
4035 dasd_put_device(device); in dasd_generic_restore_device()
4040 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, in dasd_generic_build_rdc() argument
4049 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); in dasd_generic_build_rdc()
4053 dev_err(&device->cdev->dev, in dasd_generic_build_rdc()
4072 cqr->startdev = device; in dasd_generic_build_rdc()
4073 cqr->memdev = device; in dasd_generic_build_rdc()
4082 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, in dasd_generic_read_dev_chars() argument
4088 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, in dasd_generic_read_dev_chars()
4135 struct dasd_device *device; in dasd_generic_shutdown() local
4137 device = dasd_device_from_cdev(cdev); in dasd_generic_shutdown()
4138 if (IS_ERR(device)) in dasd_generic_shutdown()
4141 if (device->block) in dasd_generic_shutdown()
4142 dasd_schedule_block_bh(device->block); in dasd_generic_shutdown()
4144 dasd_schedule_device_bh(device); in dasd_generic_shutdown()
4146 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); in dasd_generic_shutdown()