• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5  *		    Carsten Otte <Cotte@de.ibm.com>
6  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  */
10 
11 #define KMSG_COMPONENT "dasd"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 
14 #include <linux/kmod.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/ctype.h>
18 #include <linux/major.h>
19 #include <linux/slab.h>
20 #include <linux/hdreg.h>
21 #include <linux/async.h>
22 #include <linux/mutex.h>
23 #include <linux/debugfs.h>
24 #include <linux/seq_file.h>
25 #include <linux/vmalloc.h>
26 
27 #include <asm/ccwdev.h>
28 #include <asm/ebcdic.h>
29 #include <asm/idals.h>
30 #include <asm/itcw.h>
31 #include <asm/diag.h>
32 
33 /* This is ugly... */
34 #define PRINTK_HEADER "dasd:"
35 
36 #include "dasd_int.h"
37 /*
38  * SECTION: Constant definitions to be used within this file
39  */
40 #define DASD_CHANQ_MAX_SIZE 4
41 
42 #define DASD_DIAG_MOD		"dasd_diag_mod"
43 
44 static unsigned int queue_depth = 32;
45 static unsigned int nr_hw_queues = 4;
46 
47 module_param(queue_depth, uint, 0444);
48 MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
49 
50 module_param(nr_hw_queues, uint, 0444);
51 MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
52 
53 /*
54  * SECTION: exported variables of dasd.c
55  */
56 debug_info_t *dasd_debug_area;
57 EXPORT_SYMBOL(dasd_debug_area);
58 static struct dentry *dasd_debugfs_root_entry;
59 struct dasd_discipline *dasd_diag_discipline_pointer;
60 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
61 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
62 
63 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
64 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
65 		   " Copyright IBM Corp. 2000");
66 MODULE_LICENSE("GPL");
67 
68 /*
69  * SECTION: prototypes for static functions of dasd.c
70  */
71 static int  dasd_alloc_queue(struct dasd_block *);
72 static void dasd_free_queue(struct dasd_block *);
73 static int dasd_flush_block_queue(struct dasd_block *);
74 static void dasd_device_tasklet(unsigned long);
75 static void dasd_block_tasklet(unsigned long);
76 static void do_kick_device(struct work_struct *);
77 static void do_reload_device(struct work_struct *);
78 static void do_requeue_requests(struct work_struct *);
79 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
80 static void dasd_device_timeout(struct timer_list *);
81 static void dasd_block_timeout(struct timer_list *);
82 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
83 static void dasd_profile_init(struct dasd_profile *, struct dentry *);
84 static void dasd_profile_exit(struct dasd_profile *);
85 static void dasd_hosts_init(struct dentry *, struct dasd_device *);
86 static void dasd_hosts_exit(struct dasd_device *);
87 
88 /*
89  * SECTION: Operations on the device structure.
90  */
91 static wait_queue_head_t dasd_init_waitq;
92 static wait_queue_head_t dasd_flush_wq;
93 static wait_queue_head_t generic_waitq;
94 static wait_queue_head_t shutdown_waitq;
95 
96 /*
97  * Allocate memory for a new device structure.
98  */
dasd_alloc_device(void)99 struct dasd_device *dasd_alloc_device(void)
100 {
101 	struct dasd_device *device;
102 
103 	device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
104 	if (!device)
105 		return ERR_PTR(-ENOMEM);
106 
107 	/* Get two pages for normal block device operations. */
108 	device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
109 	if (!device->ccw_mem) {
110 		kfree(device);
111 		return ERR_PTR(-ENOMEM);
112 	}
113 	/* Get one page for error recovery. */
114 	device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
115 	if (!device->erp_mem) {
116 		free_pages((unsigned long) device->ccw_mem, 1);
117 		kfree(device);
118 		return ERR_PTR(-ENOMEM);
119 	}
120 	/* Get two pages for ese format. */
121 	device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
122 	if (!device->ese_mem) {
123 		free_page((unsigned long) device->erp_mem);
124 		free_pages((unsigned long) device->ccw_mem, 1);
125 		kfree(device);
126 		return ERR_PTR(-ENOMEM);
127 	}
128 
129 	dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
130 	dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
131 	dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
132 	spin_lock_init(&device->mem_lock);
133 	atomic_set(&device->tasklet_scheduled, 0);
134 	tasklet_init(&device->tasklet, dasd_device_tasklet,
135 		     (unsigned long) device);
136 	INIT_LIST_HEAD(&device->ccw_queue);
137 	timer_setup(&device->timer, dasd_device_timeout, 0);
138 	INIT_WORK(&device->kick_work, do_kick_device);
139 	INIT_WORK(&device->reload_device, do_reload_device);
140 	INIT_WORK(&device->requeue_requests, do_requeue_requests);
141 	device->state = DASD_STATE_NEW;
142 	device->target = DASD_STATE_NEW;
143 	mutex_init(&device->state_mutex);
144 	spin_lock_init(&device->profile.lock);
145 	return device;
146 }
147 
148 /*
149  * Free memory of a device structure.
150  */
dasd_free_device(struct dasd_device * device)151 void dasd_free_device(struct dasd_device *device)
152 {
153 	kfree(device->private);
154 	free_pages((unsigned long) device->ese_mem, 1);
155 	free_page((unsigned long) device->erp_mem);
156 	free_pages((unsigned long) device->ccw_mem, 1);
157 	kfree(device);
158 }
159 
160 /*
161  * Allocate memory for a new device structure.
162  */
dasd_alloc_block(void)163 struct dasd_block *dasd_alloc_block(void)
164 {
165 	struct dasd_block *block;
166 
167 	block = kzalloc(sizeof(*block), GFP_ATOMIC);
168 	if (!block)
169 		return ERR_PTR(-ENOMEM);
170 	/* open_count = 0 means device online but not in use */
171 	atomic_set(&block->open_count, -1);
172 
173 	atomic_set(&block->tasklet_scheduled, 0);
174 	tasklet_init(&block->tasklet, dasd_block_tasklet,
175 		     (unsigned long) block);
176 	INIT_LIST_HEAD(&block->ccw_queue);
177 	spin_lock_init(&block->queue_lock);
178 	INIT_LIST_HEAD(&block->format_list);
179 	spin_lock_init(&block->format_lock);
180 	timer_setup(&block->timer, dasd_block_timeout, 0);
181 	spin_lock_init(&block->profile.lock);
182 
183 	return block;
184 }
185 EXPORT_SYMBOL_GPL(dasd_alloc_block);
186 
187 /*
188  * Free memory of a device structure.
189  */
dasd_free_block(struct dasd_block * block)190 void dasd_free_block(struct dasd_block *block)
191 {
192 	kfree(block);
193 }
194 EXPORT_SYMBOL_GPL(dasd_free_block);
195 
196 /*
197  * Make a new device known to the system.
198  */
dasd_state_new_to_known(struct dasd_device * device)199 static int dasd_state_new_to_known(struct dasd_device *device)
200 {
201 	int rc;
202 
203 	/*
204 	 * As long as the device is not in state DASD_STATE_NEW we want to
205 	 * keep the reference count > 0.
206 	 */
207 	dasd_get_device(device);
208 
209 	if (device->block) {
210 		rc = dasd_alloc_queue(device->block);
211 		if (rc) {
212 			dasd_put_device(device);
213 			return rc;
214 		}
215 	}
216 	device->state = DASD_STATE_KNOWN;
217 	return 0;
218 }
219 
220 /*
221  * Let the system forget about a device.
222  */
dasd_state_known_to_new(struct dasd_device * device)223 static int dasd_state_known_to_new(struct dasd_device *device)
224 {
225 	/* Disable extended error reporting for this device. */
226 	dasd_eer_disable(device);
227 	device->state = DASD_STATE_NEW;
228 
229 	if (device->block)
230 		dasd_free_queue(device->block);
231 
232 	/* Give up reference we took in dasd_state_new_to_known. */
233 	dasd_put_device(device);
234 	return 0;
235 }
236 
dasd_debugfs_setup(const char * name,struct dentry * base_dentry)237 static struct dentry *dasd_debugfs_setup(const char *name,
238 					 struct dentry *base_dentry)
239 {
240 	struct dentry *pde;
241 
242 	if (!base_dentry)
243 		return NULL;
244 	pde = debugfs_create_dir(name, base_dentry);
245 	if (!pde || IS_ERR(pde))
246 		return NULL;
247 	return pde;
248 }
249 
250 /*
251  * Request the irq line for the device.
252  */
dasd_state_known_to_basic(struct dasd_device * device)253 static int dasd_state_known_to_basic(struct dasd_device *device)
254 {
255 	struct dasd_block *block = device->block;
256 	int rc = 0;
257 
258 	/* Allocate and register gendisk structure. */
259 	if (block) {
260 		rc = dasd_gendisk_alloc(block);
261 		if (rc)
262 			return rc;
263 		block->debugfs_dentry =
264 			dasd_debugfs_setup(block->gdp->disk_name,
265 					   dasd_debugfs_root_entry);
266 		dasd_profile_init(&block->profile, block->debugfs_dentry);
267 		if (dasd_global_profile_level == DASD_PROFILE_ON)
268 			dasd_profile_on(&device->block->profile);
269 	}
270 	device->debugfs_dentry =
271 		dasd_debugfs_setup(dev_name(&device->cdev->dev),
272 				   dasd_debugfs_root_entry);
273 	dasd_profile_init(&device->profile, device->debugfs_dentry);
274 	dasd_hosts_init(device->debugfs_dentry, device);
275 
276 	/* register 'device' debug area, used for all DBF_DEV_XXX calls */
277 	device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
278 					    8 * sizeof(long));
279 	debug_register_view(device->debug_area, &debug_sprintf_view);
280 	debug_set_level(device->debug_area, DBF_WARNING);
281 	DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
282 
283 	device->state = DASD_STATE_BASIC;
284 
285 	return rc;
286 }
287 
288 /*
289  * Release the irq line for the device. Terminate any running i/o.
290  */
dasd_state_basic_to_known(struct dasd_device * device)291 static int dasd_state_basic_to_known(struct dasd_device *device)
292 {
293 	int rc;
294 
295 	if (device->discipline->basic_to_known) {
296 		rc = device->discipline->basic_to_known(device);
297 		if (rc)
298 			return rc;
299 	}
300 
301 	if (device->block) {
302 		dasd_profile_exit(&device->block->profile);
303 		debugfs_remove(device->block->debugfs_dentry);
304 		dasd_gendisk_free(device->block);
305 		dasd_block_clear_timer(device->block);
306 	}
307 	rc = dasd_flush_device_queue(device);
308 	if (rc)
309 		return rc;
310 	dasd_device_clear_timer(device);
311 	dasd_profile_exit(&device->profile);
312 	dasd_hosts_exit(device);
313 	debugfs_remove(device->debugfs_dentry);
314 	DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
315 	if (device->debug_area != NULL) {
316 		debug_unregister(device->debug_area);
317 		device->debug_area = NULL;
318 	}
319 	device->state = DASD_STATE_KNOWN;
320 	return 0;
321 }
322 
323 /*
324  * Do the initial analysis. The do_analysis function may return
325  * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
326  * until the discipline decides to continue the startup sequence
327  * by calling the function dasd_change_state. The eckd disciplines
328  * uses this to start a ccw that detects the format. The completion
329  * interrupt for this detection ccw uses the kernel event daemon to
330  * trigger the call to dasd_change_state. All this is done in the
331  * discipline code, see dasd_eckd.c.
332  * After the analysis ccw is done (do_analysis returned 0) the block
333  * device is setup.
334  * In case the analysis returns an error, the device setup is stopped
335  * (a fake disk was already added to allow formatting).
336  */
dasd_state_basic_to_ready(struct dasd_device * device)337 static int dasd_state_basic_to_ready(struct dasd_device *device)
338 {
339 	int rc;
340 	struct dasd_block *block;
341 	struct gendisk *disk;
342 
343 	rc = 0;
344 	block = device->block;
345 	/* make disk known with correct capacity */
346 	if (block) {
347 		if (block->base->discipline->do_analysis != NULL)
348 			rc = block->base->discipline->do_analysis(block);
349 		if (rc) {
350 			if (rc != -EAGAIN) {
351 				device->state = DASD_STATE_UNFMT;
352 				disk = device->block->gdp;
353 				kobject_uevent(&disk_to_dev(disk)->kobj,
354 					       KOBJ_CHANGE);
355 				goto out;
356 			}
357 			return rc;
358 		}
359 		if (device->discipline->setup_blk_queue)
360 			device->discipline->setup_blk_queue(block);
361 		set_capacity(block->gdp,
362 			     block->blocks << block->s2b_shift);
363 		device->state = DASD_STATE_READY;
364 		rc = dasd_scan_partitions(block);
365 		if (rc) {
366 			device->state = DASD_STATE_BASIC;
367 			return rc;
368 		}
369 	} else {
370 		device->state = DASD_STATE_READY;
371 	}
372 out:
373 	if (device->discipline->basic_to_ready)
374 		rc = device->discipline->basic_to_ready(device);
375 	return rc;
376 }
377 
378 static inline
_wait_for_empty_queues(struct dasd_device * device)379 int _wait_for_empty_queues(struct dasd_device *device)
380 {
381 	if (device->block)
382 		return list_empty(&device->ccw_queue) &&
383 			list_empty(&device->block->ccw_queue);
384 	else
385 		return list_empty(&device->ccw_queue);
386 }
387 
388 /*
389  * Remove device from block device layer. Destroy dirty buffers.
390  * Forget format information. Check if the target level is basic
391  * and if it is create fake disk for formatting.
392  */
dasd_state_ready_to_basic(struct dasd_device * device)393 static int dasd_state_ready_to_basic(struct dasd_device *device)
394 {
395 	int rc;
396 
397 	device->state = DASD_STATE_BASIC;
398 	if (device->block) {
399 		struct dasd_block *block = device->block;
400 		rc = dasd_flush_block_queue(block);
401 		if (rc) {
402 			device->state = DASD_STATE_READY;
403 			return rc;
404 		}
405 		dasd_destroy_partitions(block);
406 		block->blocks = 0;
407 		block->bp_block = 0;
408 		block->s2b_shift = 0;
409 	}
410 	return 0;
411 }
412 
413 /*
414  * Back to basic.
415  */
dasd_state_unfmt_to_basic(struct dasd_device * device)416 static int dasd_state_unfmt_to_basic(struct dasd_device *device)
417 {
418 	device->state = DASD_STATE_BASIC;
419 	return 0;
420 }
421 
422 /*
423  * Make the device online and schedule the bottom half to start
424  * the requeueing of requests from the linux request queue to the
425  * ccw queue.
426  */
427 static int
dasd_state_ready_to_online(struct dasd_device * device)428 dasd_state_ready_to_online(struct dasd_device * device)
429 {
430 	device->state = DASD_STATE_ONLINE;
431 	if (device->block) {
432 		dasd_schedule_block_bh(device->block);
433 		if ((device->features & DASD_FEATURE_USERAW)) {
434 			kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
435 					KOBJ_CHANGE);
436 			return 0;
437 		}
438 		disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
439 	}
440 	return 0;
441 }
442 
443 /*
444  * Stop the requeueing of requests again.
445  */
dasd_state_online_to_ready(struct dasd_device * device)446 static int dasd_state_online_to_ready(struct dasd_device *device)
447 {
448 	int rc;
449 
450 	if (device->discipline->online_to_ready) {
451 		rc = device->discipline->online_to_ready(device);
452 		if (rc)
453 			return rc;
454 	}
455 
456 	device->state = DASD_STATE_READY;
457 	if (device->block && !(device->features & DASD_FEATURE_USERAW))
458 		disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
459 	return 0;
460 }
461 
462 /*
463  * Device startup state changes.
464  */
dasd_increase_state(struct dasd_device * device)465 static int dasd_increase_state(struct dasd_device *device)
466 {
467 	int rc;
468 
469 	rc = 0;
470 	if (device->state == DASD_STATE_NEW &&
471 	    device->target >= DASD_STATE_KNOWN)
472 		rc = dasd_state_new_to_known(device);
473 
474 	if (!rc &&
475 	    device->state == DASD_STATE_KNOWN &&
476 	    device->target >= DASD_STATE_BASIC)
477 		rc = dasd_state_known_to_basic(device);
478 
479 	if (!rc &&
480 	    device->state == DASD_STATE_BASIC &&
481 	    device->target >= DASD_STATE_READY)
482 		rc = dasd_state_basic_to_ready(device);
483 
484 	if (!rc &&
485 	    device->state == DASD_STATE_UNFMT &&
486 	    device->target > DASD_STATE_UNFMT)
487 		rc = -EPERM;
488 
489 	if (!rc &&
490 	    device->state == DASD_STATE_READY &&
491 	    device->target >= DASD_STATE_ONLINE)
492 		rc = dasd_state_ready_to_online(device);
493 
494 	return rc;
495 }
496 
497 /*
498  * Device shutdown state changes.
499  */
dasd_decrease_state(struct dasd_device * device)500 static int dasd_decrease_state(struct dasd_device *device)
501 {
502 	int rc;
503 
504 	rc = 0;
505 	if (device->state == DASD_STATE_ONLINE &&
506 	    device->target <= DASD_STATE_READY)
507 		rc = dasd_state_online_to_ready(device);
508 
509 	if (!rc &&
510 	    device->state == DASD_STATE_READY &&
511 	    device->target <= DASD_STATE_BASIC)
512 		rc = dasd_state_ready_to_basic(device);
513 
514 	if (!rc &&
515 	    device->state == DASD_STATE_UNFMT &&
516 	    device->target <= DASD_STATE_BASIC)
517 		rc = dasd_state_unfmt_to_basic(device);
518 
519 	if (!rc &&
520 	    device->state == DASD_STATE_BASIC &&
521 	    device->target <= DASD_STATE_KNOWN)
522 		rc = dasd_state_basic_to_known(device);
523 
524 	if (!rc &&
525 	    device->state == DASD_STATE_KNOWN &&
526 	    device->target <= DASD_STATE_NEW)
527 		rc = dasd_state_known_to_new(device);
528 
529 	return rc;
530 }
531 
532 /*
533  * This is the main startup/shutdown routine.
534  */
dasd_change_state(struct dasd_device * device)535 static void dasd_change_state(struct dasd_device *device)
536 {
537 	int rc;
538 
539 	if (device->state == device->target)
540 		/* Already where we want to go today... */
541 		return;
542 	if (device->state < device->target)
543 		rc = dasd_increase_state(device);
544 	else
545 		rc = dasd_decrease_state(device);
546 	if (rc == -EAGAIN)
547 		return;
548 	if (rc)
549 		device->target = device->state;
550 
551 	/* let user-space know that the device status changed */
552 	kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
553 
554 	if (device->state == device->target)
555 		wake_up(&dasd_init_waitq);
556 }
557 
558 /*
559  * Kick starter for devices that did not complete the startup/shutdown
560  * procedure or were sleeping because of a pending state.
561  * dasd_kick_device will schedule a call do do_kick_device to the kernel
562  * event daemon.
563  */
do_kick_device(struct work_struct * work)564 static void do_kick_device(struct work_struct *work)
565 {
566 	struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
567 	mutex_lock(&device->state_mutex);
568 	dasd_change_state(device);
569 	mutex_unlock(&device->state_mutex);
570 	dasd_schedule_device_bh(device);
571 	dasd_put_device(device);
572 }
573 
dasd_kick_device(struct dasd_device * device)574 void dasd_kick_device(struct dasd_device *device)
575 {
576 	dasd_get_device(device);
577 	/* queue call to dasd_kick_device to the kernel event daemon. */
578 	if (!schedule_work(&device->kick_work))
579 		dasd_put_device(device);
580 }
581 EXPORT_SYMBOL(dasd_kick_device);
582 
583 /*
584  * dasd_reload_device will schedule a call do do_reload_device to the kernel
585  * event daemon.
586  */
do_reload_device(struct work_struct * work)587 static void do_reload_device(struct work_struct *work)
588 {
589 	struct dasd_device *device = container_of(work, struct dasd_device,
590 						  reload_device);
591 	device->discipline->reload(device);
592 	dasd_put_device(device);
593 }
594 
dasd_reload_device(struct dasd_device * device)595 void dasd_reload_device(struct dasd_device *device)
596 {
597 	dasd_get_device(device);
598 	/* queue call to dasd_reload_device to the kernel event daemon. */
599 	if (!schedule_work(&device->reload_device))
600 		dasd_put_device(device);
601 }
602 EXPORT_SYMBOL(dasd_reload_device);
603 
604 /*
605  * Set the target state for a device and starts the state change.
606  */
dasd_set_target_state(struct dasd_device * device,int target)607 void dasd_set_target_state(struct dasd_device *device, int target)
608 {
609 	dasd_get_device(device);
610 	mutex_lock(&device->state_mutex);
611 	/* If we are in probeonly mode stop at DASD_STATE_READY. */
612 	if (dasd_probeonly && target > DASD_STATE_READY)
613 		target = DASD_STATE_READY;
614 	if (device->target != target) {
615 		if (device->state == target)
616 			wake_up(&dasd_init_waitq);
617 		device->target = target;
618 	}
619 	if (device->state != device->target)
620 		dasd_change_state(device);
621 	mutex_unlock(&device->state_mutex);
622 	dasd_put_device(device);
623 }
624 
625 /*
626  * Enable devices with device numbers in [from..to].
627  */
_wait_for_device(struct dasd_device * device)628 static inline int _wait_for_device(struct dasd_device *device)
629 {
630 	return (device->state == device->target);
631 }
632 
dasd_enable_device(struct dasd_device * device)633 void dasd_enable_device(struct dasd_device *device)
634 {
635 	dasd_set_target_state(device, DASD_STATE_ONLINE);
636 	if (device->state <= DASD_STATE_KNOWN)
637 		/* No discipline for device found. */
638 		dasd_set_target_state(device, DASD_STATE_NEW);
639 	/* Now wait for the devices to come up. */
640 	wait_event(dasd_init_waitq, _wait_for_device(device));
641 
642 	dasd_reload_device(device);
643 	if (device->discipline->kick_validate)
644 		device->discipline->kick_validate(device);
645 }
646 EXPORT_SYMBOL(dasd_enable_device);
647 
648 /*
649  * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
650  */
651 
652 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
653 
654 #ifdef CONFIG_DASD_PROFILE
655 struct dasd_profile dasd_global_profile = {
656 	.lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
657 };
658 static struct dentry *dasd_debugfs_global_entry;
659 
660 /*
661  * Add profiling information for cqr before execution.
662  */
dasd_profile_start(struct dasd_block * block,struct dasd_ccw_req * cqr,struct request * req)663 static void dasd_profile_start(struct dasd_block *block,
664 			       struct dasd_ccw_req *cqr,
665 			       struct request *req)
666 {
667 	struct list_head *l;
668 	unsigned int counter;
669 	struct dasd_device *device;
670 
671 	/* count the length of the chanq for statistics */
672 	counter = 0;
673 	if (dasd_global_profile_level || block->profile.data)
674 		list_for_each(l, &block->ccw_queue)
675 			if (++counter >= 31)
676 				break;
677 
678 	spin_lock(&dasd_global_profile.lock);
679 	if (dasd_global_profile.data) {
680 		dasd_global_profile.data->dasd_io_nr_req[counter]++;
681 		if (rq_data_dir(req) == READ)
682 			dasd_global_profile.data->dasd_read_nr_req[counter]++;
683 	}
684 	spin_unlock(&dasd_global_profile.lock);
685 
686 	spin_lock(&block->profile.lock);
687 	if (block->profile.data) {
688 		block->profile.data->dasd_io_nr_req[counter]++;
689 		if (rq_data_dir(req) == READ)
690 			block->profile.data->dasd_read_nr_req[counter]++;
691 	}
692 	spin_unlock(&block->profile.lock);
693 
694 	/*
695 	 * We count the request for the start device, even though it may run on
696 	 * some other device due to error recovery. This way we make sure that
697 	 * we count each request only once.
698 	 */
699 	device = cqr->startdev;
700 	if (!device->profile.data)
701 		return;
702 
703 	spin_lock(get_ccwdev_lock(device->cdev));
704 	counter = 1; /* request is not yet queued on the start device */
705 	list_for_each(l, &device->ccw_queue)
706 		if (++counter >= 31)
707 			break;
708 	spin_unlock(get_ccwdev_lock(device->cdev));
709 
710 	spin_lock(&device->profile.lock);
711 	device->profile.data->dasd_io_nr_req[counter]++;
712 	if (rq_data_dir(req) == READ)
713 		device->profile.data->dasd_read_nr_req[counter]++;
714 	spin_unlock(&device->profile.lock);
715 }
716 
717 /*
718  * Add profiling information for cqr after execution.
719  */
720 
721 #define dasd_profile_counter(value, index)			   \
722 {								   \
723 	for (index = 0; index < 31 && value >> (2+index); index++) \
724 		;						   \
725 }
726 
dasd_profile_end_add_data(struct dasd_profile_info * data,int is_alias,int is_tpm,int is_read,long sectors,int sectors_ind,int tottime_ind,int tottimeps_ind,int strtime_ind,int irqtime_ind,int irqtimeps_ind,int endtime_ind)727 static void dasd_profile_end_add_data(struct dasd_profile_info *data,
728 				      int is_alias,
729 				      int is_tpm,
730 				      int is_read,
731 				      long sectors,
732 				      int sectors_ind,
733 				      int tottime_ind,
734 				      int tottimeps_ind,
735 				      int strtime_ind,
736 				      int irqtime_ind,
737 				      int irqtimeps_ind,
738 				      int endtime_ind)
739 {
740 	/* in case of an overflow, reset the whole profile */
741 	if (data->dasd_io_reqs == UINT_MAX) {
742 			memset(data, 0, sizeof(*data));
743 			ktime_get_real_ts64(&data->starttod);
744 	}
745 	data->dasd_io_reqs++;
746 	data->dasd_io_sects += sectors;
747 	if (is_alias)
748 		data->dasd_io_alias++;
749 	if (is_tpm)
750 		data->dasd_io_tpm++;
751 
752 	data->dasd_io_secs[sectors_ind]++;
753 	data->dasd_io_times[tottime_ind]++;
754 	data->dasd_io_timps[tottimeps_ind]++;
755 	data->dasd_io_time1[strtime_ind]++;
756 	data->dasd_io_time2[irqtime_ind]++;
757 	data->dasd_io_time2ps[irqtimeps_ind]++;
758 	data->dasd_io_time3[endtime_ind]++;
759 
760 	if (is_read) {
761 		data->dasd_read_reqs++;
762 		data->dasd_read_sects += sectors;
763 		if (is_alias)
764 			data->dasd_read_alias++;
765 		if (is_tpm)
766 			data->dasd_read_tpm++;
767 		data->dasd_read_secs[sectors_ind]++;
768 		data->dasd_read_times[tottime_ind]++;
769 		data->dasd_read_time1[strtime_ind]++;
770 		data->dasd_read_time2[irqtime_ind]++;
771 		data->dasd_read_time3[endtime_ind]++;
772 	}
773 }
774 
dasd_profile_end(struct dasd_block * block,struct dasd_ccw_req * cqr,struct request * req)775 static void dasd_profile_end(struct dasd_block *block,
776 			     struct dasd_ccw_req *cqr,
777 			     struct request *req)
778 {
779 	unsigned long strtime, irqtime, endtime, tottime;
780 	unsigned long tottimeps, sectors;
781 	struct dasd_device *device;
782 	int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
783 	int irqtime_ind, irqtimeps_ind, endtime_ind;
784 	struct dasd_profile_info *data;
785 
786 	device = cqr->startdev;
787 	if (!(dasd_global_profile_level ||
788 	      block->profile.data ||
789 	      device->profile.data))
790 		return;
791 
792 	sectors = blk_rq_sectors(req);
793 	if (!cqr->buildclk || !cqr->startclk ||
794 	    !cqr->stopclk || !cqr->endclk ||
795 	    !sectors)
796 		return;
797 
798 	strtime = ((cqr->startclk - cqr->buildclk) >> 12);
799 	irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
800 	endtime = ((cqr->endclk - cqr->stopclk) >> 12);
801 	tottime = ((cqr->endclk - cqr->buildclk) >> 12);
802 	tottimeps = tottime / sectors;
803 
804 	dasd_profile_counter(sectors, sectors_ind);
805 	dasd_profile_counter(tottime, tottime_ind);
806 	dasd_profile_counter(tottimeps, tottimeps_ind);
807 	dasd_profile_counter(strtime, strtime_ind);
808 	dasd_profile_counter(irqtime, irqtime_ind);
809 	dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
810 	dasd_profile_counter(endtime, endtime_ind);
811 
812 	spin_lock(&dasd_global_profile.lock);
813 	if (dasd_global_profile.data) {
814 		data = dasd_global_profile.data;
815 		data->dasd_sum_times += tottime;
816 		data->dasd_sum_time_str += strtime;
817 		data->dasd_sum_time_irq += irqtime;
818 		data->dasd_sum_time_end += endtime;
819 		dasd_profile_end_add_data(dasd_global_profile.data,
820 					  cqr->startdev != block->base,
821 					  cqr->cpmode == 1,
822 					  rq_data_dir(req) == READ,
823 					  sectors, sectors_ind, tottime_ind,
824 					  tottimeps_ind, strtime_ind,
825 					  irqtime_ind, irqtimeps_ind,
826 					  endtime_ind);
827 	}
828 	spin_unlock(&dasd_global_profile.lock);
829 
830 	spin_lock(&block->profile.lock);
831 	if (block->profile.data) {
832 		data = block->profile.data;
833 		data->dasd_sum_times += tottime;
834 		data->dasd_sum_time_str += strtime;
835 		data->dasd_sum_time_irq += irqtime;
836 		data->dasd_sum_time_end += endtime;
837 		dasd_profile_end_add_data(block->profile.data,
838 					  cqr->startdev != block->base,
839 					  cqr->cpmode == 1,
840 					  rq_data_dir(req) == READ,
841 					  sectors, sectors_ind, tottime_ind,
842 					  tottimeps_ind, strtime_ind,
843 					  irqtime_ind, irqtimeps_ind,
844 					  endtime_ind);
845 	}
846 	spin_unlock(&block->profile.lock);
847 
848 	spin_lock(&device->profile.lock);
849 	if (device->profile.data) {
850 		data = device->profile.data;
851 		data->dasd_sum_times += tottime;
852 		data->dasd_sum_time_str += strtime;
853 		data->dasd_sum_time_irq += irqtime;
854 		data->dasd_sum_time_end += endtime;
855 		dasd_profile_end_add_data(device->profile.data,
856 					  cqr->startdev != block->base,
857 					  cqr->cpmode == 1,
858 					  rq_data_dir(req) == READ,
859 					  sectors, sectors_ind, tottime_ind,
860 					  tottimeps_ind, strtime_ind,
861 					  irqtime_ind, irqtimeps_ind,
862 					  endtime_ind);
863 	}
864 	spin_unlock(&device->profile.lock);
865 }
866 
dasd_profile_reset(struct dasd_profile * profile)867 void dasd_profile_reset(struct dasd_profile *profile)
868 {
869 	struct dasd_profile_info *data;
870 
871 	spin_lock_bh(&profile->lock);
872 	data = profile->data;
873 	if (!data) {
874 		spin_unlock_bh(&profile->lock);
875 		return;
876 	}
877 	memset(data, 0, sizeof(*data));
878 	ktime_get_real_ts64(&data->starttod);
879 	spin_unlock_bh(&profile->lock);
880 }
881 
dasd_profile_on(struct dasd_profile * profile)882 int dasd_profile_on(struct dasd_profile *profile)
883 {
884 	struct dasd_profile_info *data;
885 
886 	data = kzalloc(sizeof(*data), GFP_KERNEL);
887 	if (!data)
888 		return -ENOMEM;
889 	spin_lock_bh(&profile->lock);
890 	if (profile->data) {
891 		spin_unlock_bh(&profile->lock);
892 		kfree(data);
893 		return 0;
894 	}
895 	ktime_get_real_ts64(&data->starttod);
896 	profile->data = data;
897 	spin_unlock_bh(&profile->lock);
898 	return 0;
899 }
900 
dasd_profile_off(struct dasd_profile * profile)901 void dasd_profile_off(struct dasd_profile *profile)
902 {
903 	spin_lock_bh(&profile->lock);
904 	kfree(profile->data);
905 	profile->data = NULL;
906 	spin_unlock_bh(&profile->lock);
907 }
908 
dasd_get_user_string(const char __user * user_buf,size_t user_len)909 char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
910 {
911 	char *buffer;
912 
913 	buffer = vmalloc(user_len + 1);
914 	if (buffer == NULL)
915 		return ERR_PTR(-ENOMEM);
916 	if (copy_from_user(buffer, user_buf, user_len) != 0) {
917 		vfree(buffer);
918 		return ERR_PTR(-EFAULT);
919 	}
920 	/* got the string, now strip linefeed. */
921 	if (buffer[user_len - 1] == '\n')
922 		buffer[user_len - 1] = 0;
923 	else
924 		buffer[user_len] = 0;
925 	return buffer;
926 }
927 
dasd_stats_write(struct file * file,const char __user * user_buf,size_t user_len,loff_t * pos)928 static ssize_t dasd_stats_write(struct file *file,
929 				const char __user *user_buf,
930 				size_t user_len, loff_t *pos)
931 {
932 	char *buffer, *str;
933 	int rc;
934 	struct seq_file *m = (struct seq_file *)file->private_data;
935 	struct dasd_profile *prof = m->private;
936 
937 	if (user_len > 65536)
938 		user_len = 65536;
939 	buffer = dasd_get_user_string(user_buf, user_len);
940 	if (IS_ERR(buffer))
941 		return PTR_ERR(buffer);
942 
943 	str = skip_spaces(buffer);
944 	rc = user_len;
945 	if (strncmp(str, "reset", 5) == 0) {
946 		dasd_profile_reset(prof);
947 	} else if (strncmp(str, "on", 2) == 0) {
948 		rc = dasd_profile_on(prof);
949 		if (rc)
950 			goto out;
951 		rc = user_len;
952 		if (prof == &dasd_global_profile) {
953 			dasd_profile_reset(prof);
954 			dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
955 		}
956 	} else if (strncmp(str, "off", 3) == 0) {
957 		if (prof == &dasd_global_profile)
958 			dasd_global_profile_level = DASD_PROFILE_OFF;
959 		dasd_profile_off(prof);
960 	} else
961 		rc = -EINVAL;
962 out:
963 	vfree(buffer);
964 	return rc;
965 }
966 
dasd_stats_array(struct seq_file * m,unsigned int * array)967 static void dasd_stats_array(struct seq_file *m, unsigned int *array)
968 {
969 	int i;
970 
971 	for (i = 0; i < 32; i++)
972 		seq_printf(m, "%u ", array[i]);
973 	seq_putc(m, '\n');
974 }
975 
dasd_stats_seq_print(struct seq_file * m,struct dasd_profile_info * data)976 static void dasd_stats_seq_print(struct seq_file *m,
977 				 struct dasd_profile_info *data)
978 {
979 	seq_printf(m, "start_time %lld.%09ld\n",
980 		   (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
981 	seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
982 	seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
983 	seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
984 	seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
985 	seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
986 		   data->dasd_sum_times / data->dasd_io_reqs : 0UL);
987 	seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
988 		   data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
989 	seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
990 		   data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
991 	seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
992 		   data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
993 	seq_puts(m, "histogram_sectors ");
994 	dasd_stats_array(m, data->dasd_io_secs);
995 	seq_puts(m, "histogram_io_times ");
996 	dasd_stats_array(m, data->dasd_io_times);
997 	seq_puts(m, "histogram_io_times_weighted ");
998 	dasd_stats_array(m, data->dasd_io_timps);
999 	seq_puts(m, "histogram_time_build_to_ssch ");
1000 	dasd_stats_array(m, data->dasd_io_time1);
1001 	seq_puts(m, "histogram_time_ssch_to_irq ");
1002 	dasd_stats_array(m, data->dasd_io_time2);
1003 	seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
1004 	dasd_stats_array(m, data->dasd_io_time2ps);
1005 	seq_puts(m, "histogram_time_irq_to_end ");
1006 	dasd_stats_array(m, data->dasd_io_time3);
1007 	seq_puts(m, "histogram_ccw_queue_length ");
1008 	dasd_stats_array(m, data->dasd_io_nr_req);
1009 	seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
1010 	seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
1011 	seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
1012 	seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
1013 	seq_puts(m, "histogram_read_sectors ");
1014 	dasd_stats_array(m, data->dasd_read_secs);
1015 	seq_puts(m, "histogram_read_times ");
1016 	dasd_stats_array(m, data->dasd_read_times);
1017 	seq_puts(m, "histogram_read_time_build_to_ssch ");
1018 	dasd_stats_array(m, data->dasd_read_time1);
1019 	seq_puts(m, "histogram_read_time_ssch_to_irq ");
1020 	dasd_stats_array(m, data->dasd_read_time2);
1021 	seq_puts(m, "histogram_read_time_irq_to_end ");
1022 	dasd_stats_array(m, data->dasd_read_time3);
1023 	seq_puts(m, "histogram_read_ccw_queue_length ");
1024 	dasd_stats_array(m, data->dasd_read_nr_req);
1025 }
1026 
dasd_stats_show(struct seq_file * m,void * v)1027 static int dasd_stats_show(struct seq_file *m, void *v)
1028 {
1029 	struct dasd_profile *profile;
1030 	struct dasd_profile_info *data;
1031 
1032 	profile = m->private;
1033 	spin_lock_bh(&profile->lock);
1034 	data = profile->data;
1035 	if (!data) {
1036 		spin_unlock_bh(&profile->lock);
1037 		seq_puts(m, "disabled\n");
1038 		return 0;
1039 	}
1040 	dasd_stats_seq_print(m, data);
1041 	spin_unlock_bh(&profile->lock);
1042 	return 0;
1043 }
1044 
dasd_stats_open(struct inode * inode,struct file * file)1045 static int dasd_stats_open(struct inode *inode, struct file *file)
1046 {
1047 	struct dasd_profile *profile = inode->i_private;
1048 	return single_open(file, dasd_stats_show, profile);
1049 }
1050 
1051 static const struct file_operations dasd_stats_raw_fops = {
1052 	.owner		= THIS_MODULE,
1053 	.open		= dasd_stats_open,
1054 	.read		= seq_read,
1055 	.llseek		= seq_lseek,
1056 	.release	= single_release,
1057 	.write		= dasd_stats_write,
1058 };
1059 
dasd_profile_init(struct dasd_profile * profile,struct dentry * base_dentry)1060 static void dasd_profile_init(struct dasd_profile *profile,
1061 			      struct dentry *base_dentry)
1062 {
1063 	umode_t mode;
1064 	struct dentry *pde;
1065 
1066 	if (!base_dentry)
1067 		return;
1068 	profile->dentry = NULL;
1069 	profile->data = NULL;
1070 	mode = (S_IRUSR | S_IWUSR | S_IFREG);
1071 	pde = debugfs_create_file("statistics", mode, base_dentry,
1072 				  profile, &dasd_stats_raw_fops);
1073 	if (pde && !IS_ERR(pde))
1074 		profile->dentry = pde;
1075 	return;
1076 }
1077 
dasd_profile_exit(struct dasd_profile * profile)1078 static void dasd_profile_exit(struct dasd_profile *profile)
1079 {
1080 	dasd_profile_off(profile);
1081 	debugfs_remove(profile->dentry);
1082 	profile->dentry = NULL;
1083 }
1084 
dasd_statistics_removeroot(void)1085 static void dasd_statistics_removeroot(void)
1086 {
1087 	dasd_global_profile_level = DASD_PROFILE_OFF;
1088 	dasd_profile_exit(&dasd_global_profile);
1089 	debugfs_remove(dasd_debugfs_global_entry);
1090 	debugfs_remove(dasd_debugfs_root_entry);
1091 }
1092 
dasd_statistics_createroot(void)1093 static void dasd_statistics_createroot(void)
1094 {
1095 	struct dentry *pde;
1096 
1097 	dasd_debugfs_root_entry = NULL;
1098 	pde = debugfs_create_dir("dasd", NULL);
1099 	if (!pde || IS_ERR(pde))
1100 		goto error;
1101 	dasd_debugfs_root_entry = pde;
1102 	pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
1103 	if (!pde || IS_ERR(pde))
1104 		goto error;
1105 	dasd_debugfs_global_entry = pde;
1106 	dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
1107 	return;
1108 
1109 error:
1110 	DBF_EVENT(DBF_ERR, "%s",
1111 		  "Creation of the dasd debugfs interface failed");
1112 	dasd_statistics_removeroot();
1113 	return;
1114 }
1115 
1116 #else
1117 #define dasd_profile_start(block, cqr, req) do {} while (0)
1118 #define dasd_profile_end(block, cqr, req) do {} while (0)
1119 
dasd_statistics_createroot(void)1120 static void dasd_statistics_createroot(void)
1121 {
1122 	return;
1123 }
1124 
dasd_statistics_removeroot(void)1125 static void dasd_statistics_removeroot(void)
1126 {
1127 	return;
1128 }
1129 
dasd_stats_generic_show(struct seq_file * m,void * v)1130 int dasd_stats_generic_show(struct seq_file *m, void *v)
1131 {
1132 	seq_puts(m, "Statistics are not activated in this kernel\n");
1133 	return 0;
1134 }
1135 
dasd_profile_init(struct dasd_profile * profile,struct dentry * base_dentry)1136 static void dasd_profile_init(struct dasd_profile *profile,
1137 			      struct dentry *base_dentry)
1138 {
1139 	return;
1140 }
1141 
dasd_profile_exit(struct dasd_profile * profile)1142 static void dasd_profile_exit(struct dasd_profile *profile)
1143 {
1144 	return;
1145 }
1146 
dasd_profile_on(struct dasd_profile * profile)1147 int dasd_profile_on(struct dasd_profile *profile)
1148 {
1149 	return 0;
1150 }
1151 
1152 #endif				/* CONFIG_DASD_PROFILE */
1153 
dasd_hosts_show(struct seq_file * m,void * v)1154 static int dasd_hosts_show(struct seq_file *m, void *v)
1155 {
1156 	struct dasd_device *device;
1157 	int rc = -EOPNOTSUPP;
1158 
1159 	device = m->private;
1160 	dasd_get_device(device);
1161 
1162 	if (device->discipline->hosts_print)
1163 		rc = device->discipline->hosts_print(device, m);
1164 
1165 	dasd_put_device(device);
1166 	return rc;
1167 }
1168 
1169 DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
1170 
dasd_hosts_exit(struct dasd_device * device)1171 static void dasd_hosts_exit(struct dasd_device *device)
1172 {
1173 	debugfs_remove(device->hosts_dentry);
1174 	device->hosts_dentry = NULL;
1175 }
1176 
dasd_hosts_init(struct dentry * base_dentry,struct dasd_device * device)1177 static void dasd_hosts_init(struct dentry *base_dentry,
1178 			    struct dasd_device *device)
1179 {
1180 	struct dentry *pde;
1181 	umode_t mode;
1182 
1183 	if (!base_dentry)
1184 		return;
1185 
1186 	mode = S_IRUSR | S_IFREG;
1187 	pde = debugfs_create_file("host_access_list", mode, base_dentry,
1188 				  device, &dasd_hosts_fops);
1189 	if (pde && !IS_ERR(pde))
1190 		device->hosts_dentry = pde;
1191 }
1192 
dasd_smalloc_request(int magic,int cplength,int datasize,struct dasd_device * device,struct dasd_ccw_req * cqr)1193 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
1194 					  struct dasd_device *device,
1195 					  struct dasd_ccw_req *cqr)
1196 {
1197 	unsigned long flags;
1198 	char *data, *chunk;
1199 	int size = 0;
1200 
1201 	if (cplength > 0)
1202 		size += cplength * sizeof(struct ccw1);
1203 	if (datasize > 0)
1204 		size += datasize;
1205 	if (!cqr)
1206 		size += (sizeof(*cqr) + 7L) & -8L;
1207 
1208 	spin_lock_irqsave(&device->mem_lock, flags);
1209 	data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
1210 	spin_unlock_irqrestore(&device->mem_lock, flags);
1211 	if (!chunk)
1212 		return ERR_PTR(-ENOMEM);
1213 	if (!cqr) {
1214 		cqr = (void *) data;
1215 		data += (sizeof(*cqr) + 7L) & -8L;
1216 	}
1217 	memset(cqr, 0, sizeof(*cqr));
1218 	cqr->mem_chunk = chunk;
1219 	if (cplength > 0) {
1220 		cqr->cpaddr = data;
1221 		data += cplength * sizeof(struct ccw1);
1222 		memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1223 	}
1224 	if (datasize > 0) {
1225 		cqr->data = data;
1226  		memset(cqr->data, 0, datasize);
1227 	}
1228 	cqr->magic = magic;
1229 	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1230 	dasd_get_device(device);
1231 	return cqr;
1232 }
1233 EXPORT_SYMBOL(dasd_smalloc_request);
1234 
dasd_fmalloc_request(int magic,int cplength,int datasize,struct dasd_device * device)1235 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
1236 					  int datasize,
1237 					  struct dasd_device *device)
1238 {
1239 	struct dasd_ccw_req *cqr;
1240 	unsigned long flags;
1241 	int size, cqr_size;
1242 	char *data;
1243 
1244 	cqr_size = (sizeof(*cqr) + 7L) & -8L;
1245 	size = cqr_size;
1246 	if (cplength > 0)
1247 		size += cplength * sizeof(struct ccw1);
1248 	if (datasize > 0)
1249 		size += datasize;
1250 
1251 	spin_lock_irqsave(&device->mem_lock, flags);
1252 	cqr = dasd_alloc_chunk(&device->ese_chunks, size);
1253 	spin_unlock_irqrestore(&device->mem_lock, flags);
1254 	if (!cqr)
1255 		return ERR_PTR(-ENOMEM);
1256 	memset(cqr, 0, sizeof(*cqr));
1257 	data = (char *)cqr + cqr_size;
1258 	cqr->cpaddr = NULL;
1259 	if (cplength > 0) {
1260 		cqr->cpaddr = data;
1261 		data += cplength * sizeof(struct ccw1);
1262 		memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1263 	}
1264 	cqr->data = NULL;
1265 	if (datasize > 0) {
1266 		cqr->data = data;
1267 		memset(cqr->data, 0, datasize);
1268 	}
1269 
1270 	cqr->magic = magic;
1271 	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1272 	dasd_get_device(device);
1273 
1274 	return cqr;
1275 }
1276 EXPORT_SYMBOL(dasd_fmalloc_request);
1277 
dasd_sfree_request(struct dasd_ccw_req * cqr,struct dasd_device * device)1278 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1279 {
1280 	unsigned long flags;
1281 
1282 	spin_lock_irqsave(&device->mem_lock, flags);
1283 	dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
1284 	spin_unlock_irqrestore(&device->mem_lock, flags);
1285 	dasd_put_device(device);
1286 }
1287 EXPORT_SYMBOL(dasd_sfree_request);
1288 
dasd_ffree_request(struct dasd_ccw_req * cqr,struct dasd_device * device)1289 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1290 {
1291 	unsigned long flags;
1292 
1293 	spin_lock_irqsave(&device->mem_lock, flags);
1294 	dasd_free_chunk(&device->ese_chunks, cqr);
1295 	spin_unlock_irqrestore(&device->mem_lock, flags);
1296 	dasd_put_device(device);
1297 }
1298 EXPORT_SYMBOL(dasd_ffree_request);
1299 
1300 /*
1301  * Check discipline magic in cqr.
1302  */
dasd_check_cqr(struct dasd_ccw_req * cqr)1303 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
1304 {
1305 	struct dasd_device *device;
1306 
1307 	if (cqr == NULL)
1308 		return -EINVAL;
1309 	device = cqr->startdev;
1310 	if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
1311 		DBF_DEV_EVENT(DBF_WARNING, device,
1312 			    " dasd_ccw_req 0x%08x magic doesn't match"
1313 			    " discipline 0x%08x",
1314 			    cqr->magic,
1315 			    *(unsigned int *) device->discipline->name);
1316 		return -EINVAL;
1317 	}
1318 	return 0;
1319 }
1320 
1321 /*
1322  * Terminate the current i/o and set the request to clear_pending.
1323  * Timer keeps device runnig.
1324  * ccw_device_clear can fail if the i/o subsystem
1325  * is in a bad mood.
1326  */
dasd_term_IO(struct dasd_ccw_req * cqr)1327 int dasd_term_IO(struct dasd_ccw_req *cqr)
1328 {
1329 	struct dasd_device *device;
1330 	int retries, rc;
1331 	char errorstring[ERRORLENGTH];
1332 
1333 	/* Check the cqr */
1334 	rc = dasd_check_cqr(cqr);
1335 	if (rc)
1336 		return rc;
1337 	retries = 0;
1338 	device = (struct dasd_device *) cqr->startdev;
1339 	while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
1340 		rc = ccw_device_clear(device->cdev, (long) cqr);
1341 		switch (rc) {
1342 		case 0:	/* termination successful */
1343 			cqr->status = DASD_CQR_CLEAR_PENDING;
1344 			cqr->stopclk = get_tod_clock();
1345 			cqr->starttime = 0;
1346 			DBF_DEV_EVENT(DBF_DEBUG, device,
1347 				      "terminate cqr %p successful",
1348 				      cqr);
1349 			break;
1350 		case -ENODEV:
1351 			DBF_DEV_EVENT(DBF_ERR, device, "%s",
1352 				      "device gone, retry");
1353 			break;
1354 		case -EINVAL:
1355 			/*
1356 			 * device not valid so no I/O could be running
1357 			 * handle CQR as termination successful
1358 			 */
1359 			cqr->status = DASD_CQR_CLEARED;
1360 			cqr->stopclk = get_tod_clock();
1361 			cqr->starttime = 0;
1362 			/* no retries for invalid devices */
1363 			cqr->retries = -1;
1364 			DBF_DEV_EVENT(DBF_ERR, device, "%s",
1365 				      "EINVAL, handle as terminated");
1366 			/* fake rc to success */
1367 			rc = 0;
1368 			break;
1369 		default:
1370 			/* internal error 10 - unknown rc*/
1371 			snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
1372 			dev_err(&device->cdev->dev, "An error occurred in the "
1373 				"DASD device driver, reason=%s\n", errorstring);
1374 			BUG();
1375 			break;
1376 		}
1377 		retries++;
1378 	}
1379 	dasd_schedule_device_bh(device);
1380 	return rc;
1381 }
1382 EXPORT_SYMBOL(dasd_term_IO);
1383 
1384 /*
1385  * Start the i/o. This start_IO can fail if the channel is really busy.
1386  * In that case set up a timer to start the request later.
1387  */
dasd_start_IO(struct dasd_ccw_req * cqr)1388 int dasd_start_IO(struct dasd_ccw_req *cqr)
1389 {
1390 	struct dasd_device *device;
1391 	int rc;
1392 	char errorstring[ERRORLENGTH];
1393 
1394 	/* Check the cqr */
1395 	rc = dasd_check_cqr(cqr);
1396 	if (rc) {
1397 		cqr->intrc = rc;
1398 		return rc;
1399 	}
1400 	device = (struct dasd_device *) cqr->startdev;
1401 	if (((cqr->block &&
1402 	      test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
1403 	     test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
1404 	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1405 		DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
1406 			      "because of stolen lock", cqr);
1407 		cqr->status = DASD_CQR_ERROR;
1408 		cqr->intrc = -EPERM;
1409 		return -EPERM;
1410 	}
1411 	if (cqr->retries < 0) {
1412 		/* internal error 14 - start_IO run out of retries */
1413 		sprintf(errorstring, "14 %p", cqr);
1414 		dev_err(&device->cdev->dev, "An error occurred in the DASD "
1415 			"device driver, reason=%s\n", errorstring);
1416 		cqr->status = DASD_CQR_ERROR;
1417 		return -EIO;
1418 	}
1419 	cqr->startclk = get_tod_clock();
1420 	cqr->starttime = jiffies;
1421 	cqr->retries--;
1422 	if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1423 		cqr->lpm &= dasd_path_get_opm(device);
1424 		if (!cqr->lpm)
1425 			cqr->lpm = dasd_path_get_opm(device);
1426 	}
1427 	/*
1428 	 * remember the amount of formatted tracks to prevent double format on
1429 	 * ESE devices
1430 	 */
1431 	if (cqr->block)
1432 		cqr->trkcount = atomic_read(&cqr->block->trkcount);
1433 
1434 	if (cqr->cpmode == 1) {
1435 		rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
1436 					 (long) cqr, cqr->lpm);
1437 	} else {
1438 		rc = ccw_device_start(device->cdev, cqr->cpaddr,
1439 				      (long) cqr, cqr->lpm, 0);
1440 	}
1441 	switch (rc) {
1442 	case 0:
1443 		cqr->status = DASD_CQR_IN_IO;
1444 		break;
1445 	case -EBUSY:
1446 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1447 			      "start_IO: device busy, retry later");
1448 		break;
1449 	case -EACCES:
1450 		/* -EACCES indicates that the request used only a subset of the
1451 		 * available paths and all these paths are gone. If the lpm of
1452 		 * this request was only a subset of the opm (e.g. the ppm) then
1453 		 * we just do a retry with all available paths.
1454 		 * If we already use the full opm, something is amiss, and we
1455 		 * need a full path verification.
1456 		 */
1457 		if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1458 			DBF_DEV_EVENT(DBF_WARNING, device,
1459 				      "start_IO: selected paths gone (%x)",
1460 				      cqr->lpm);
1461 		} else if (cqr->lpm != dasd_path_get_opm(device)) {
1462 			cqr->lpm = dasd_path_get_opm(device);
1463 			DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
1464 				      "start_IO: selected paths gone,"
1465 				      " retry on all paths");
1466 		} else {
1467 			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1468 				      "start_IO: all paths in opm gone,"
1469 				      " do path verification");
1470 			dasd_generic_last_path_gone(device);
1471 			dasd_path_no_path(device);
1472 			dasd_path_set_tbvpm(device,
1473 					  ccw_device_get_path_mask(
1474 						  device->cdev));
1475 		}
1476 		break;
1477 	case -ENODEV:
1478 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1479 			      "start_IO: -ENODEV device gone, retry");
1480 		break;
1481 	case -EIO:
1482 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1483 			      "start_IO: -EIO device gone, retry");
1484 		break;
1485 	case -EINVAL:
1486 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1487 			      "start_IO: -EINVAL device currently "
1488 			      "not accessible");
1489 		break;
1490 	default:
1491 		/* internal error 11 - unknown rc */
1492 		snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
1493 		dev_err(&device->cdev->dev,
1494 			"An error occurred in the DASD device driver, "
1495 			"reason=%s\n", errorstring);
1496 		BUG();
1497 		break;
1498 	}
1499 	cqr->intrc = rc;
1500 	return rc;
1501 }
1502 EXPORT_SYMBOL(dasd_start_IO);
1503 
1504 /*
1505  * Timeout function for dasd devices. This is used for different purposes
1506  *  1) missing interrupt handler for normal operation
1507  *  2) delayed start of request where start_IO failed with -EBUSY
1508  *  3) timeout for missing state change interrupts
1509  * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
1510  * DASD_CQR_QUEUED for 2) and 3).
1511  */
dasd_device_timeout(struct timer_list * t)1512 static void dasd_device_timeout(struct timer_list *t)
1513 {
1514 	unsigned long flags;
1515 	struct dasd_device *device;
1516 
1517 	device = from_timer(device, t, timer);
1518 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1519 	/* re-activate request queue */
1520 	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1521 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1522 	dasd_schedule_device_bh(device);
1523 }
1524 
1525 /*
1526  * Setup timeout for a device in jiffies.
1527  */
dasd_device_set_timer(struct dasd_device * device,int expires)1528 void dasd_device_set_timer(struct dasd_device *device, int expires)
1529 {
1530 	if (expires == 0)
1531 		del_timer(&device->timer);
1532 	else
1533 		mod_timer(&device->timer, jiffies + expires);
1534 }
1535 EXPORT_SYMBOL(dasd_device_set_timer);
1536 
1537 /*
1538  * Clear timeout for a device.
1539  */
dasd_device_clear_timer(struct dasd_device * device)1540 void dasd_device_clear_timer(struct dasd_device *device)
1541 {
1542 	del_timer(&device->timer);
1543 }
1544 EXPORT_SYMBOL(dasd_device_clear_timer);
1545 
dasd_handle_killed_request(struct ccw_device * cdev,unsigned long intparm)1546 static void dasd_handle_killed_request(struct ccw_device *cdev,
1547 				       unsigned long intparm)
1548 {
1549 	struct dasd_ccw_req *cqr;
1550 	struct dasd_device *device;
1551 
1552 	if (!intparm)
1553 		return;
1554 	cqr = (struct dasd_ccw_req *) intparm;
1555 	if (cqr->status != DASD_CQR_IN_IO) {
1556 		DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1557 				"invalid status in handle_killed_request: "
1558 				"%02x", cqr->status);
1559 		return;
1560 	}
1561 
1562 	device = dasd_device_from_cdev_locked(cdev);
1563 	if (IS_ERR(device)) {
1564 		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1565 				"unable to get device from cdev");
1566 		return;
1567 	}
1568 
1569 	if (!cqr->startdev ||
1570 	    device != cqr->startdev ||
1571 	    strncmp(cqr->startdev->discipline->ebcname,
1572 		    (char *) &cqr->magic, 4)) {
1573 		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1574 				"invalid device in request");
1575 		dasd_put_device(device);
1576 		return;
1577 	}
1578 
1579 	/* Schedule request to be retried. */
1580 	cqr->status = DASD_CQR_QUEUED;
1581 
1582 	dasd_device_clear_timer(device);
1583 	dasd_schedule_device_bh(device);
1584 	dasd_put_device(device);
1585 }
1586 
dasd_generic_handle_state_change(struct dasd_device * device)1587 void dasd_generic_handle_state_change(struct dasd_device *device)
1588 {
1589 	/* First of all start sense subsystem status request. */
1590 	dasd_eer_snss(device);
1591 
1592 	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1593 	dasd_schedule_device_bh(device);
1594 	if (device->block) {
1595 		dasd_schedule_block_bh(device->block);
1596 		if (device->block->request_queue)
1597 			blk_mq_run_hw_queues(device->block->request_queue,
1598 					     true);
1599 	}
1600 }
1601 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
1602 
dasd_check_hpf_error(struct irb * irb)1603 static int dasd_check_hpf_error(struct irb *irb)
1604 {
1605 	return (scsw_tm_is_valid_schxs(&irb->scsw) &&
1606 	    (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
1607 	     irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
1608 }
1609 
dasd_ese_needs_format(struct dasd_block * block,struct irb * irb)1610 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
1611 {
1612 	struct dasd_device *device = NULL;
1613 	u8 *sense = NULL;
1614 
1615 	if (!block)
1616 		return 0;
1617 	device = block->base;
1618 	if (!device || !device->discipline->is_ese)
1619 		return 0;
1620 	if (!device->discipline->is_ese(device))
1621 		return 0;
1622 
1623 	sense = dasd_get_sense(irb);
1624 	if (!sense)
1625 		return 0;
1626 
1627 	return !!(sense[1] & SNS1_NO_REC_FOUND) ||
1628 		!!(sense[1] & SNS1_FILE_PROTECTED) ||
1629 		scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
1630 }
1631 
dasd_ese_oos_cond(u8 * sense)1632 static int dasd_ese_oos_cond(u8 *sense)
1633 {
1634 	return sense[0] & SNS0_EQUIPMENT_CHECK &&
1635 		sense[1] & SNS1_PERM_ERR &&
1636 		sense[1] & SNS1_WRITE_INHIBITED &&
1637 		sense[25] == 0x01;
1638 }
1639 
1640 /*
1641  * Interrupt handler for "normal" ssch-io based dasd devices.
1642  */
dasd_int_handler(struct ccw_device * cdev,unsigned long intparm,struct irb * irb)1643 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1644 		      struct irb *irb)
1645 {
1646 	struct dasd_ccw_req *cqr, *next, *fcqr;
1647 	struct dasd_device *device;
1648 	unsigned long now;
1649 	int nrf_suppressed = 0;
1650 	int fp_suppressed = 0;
1651 	struct request *req;
1652 	u8 *sense = NULL;
1653 	int expires;
1654 
1655 	cqr = (struct dasd_ccw_req *) intparm;
1656 	if (IS_ERR(irb)) {
1657 		switch (PTR_ERR(irb)) {
1658 		case -EIO:
1659 			if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
1660 				device = cqr->startdev;
1661 				cqr->status = DASD_CQR_CLEARED;
1662 				dasd_device_clear_timer(device);
1663 				wake_up(&dasd_flush_wq);
1664 				dasd_schedule_device_bh(device);
1665 				return;
1666 			}
1667 			break;
1668 		case -ETIMEDOUT:
1669 			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1670 					"request timed out\n", __func__);
1671 			break;
1672 		default:
1673 			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1674 					"unknown error %ld\n", __func__,
1675 					PTR_ERR(irb));
1676 		}
1677 		dasd_handle_killed_request(cdev, intparm);
1678 		return;
1679 	}
1680 
1681 	now = get_tod_clock();
1682 	/* check for conditions that should be handled immediately */
1683 	if (!cqr ||
1684 	    !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1685 	      scsw_cstat(&irb->scsw) == 0)) {
1686 		if (cqr)
1687 			memcpy(&cqr->irb, irb, sizeof(*irb));
1688 		device = dasd_device_from_cdev_locked(cdev);
1689 		if (IS_ERR(device))
1690 			return;
1691 		/* ignore unsolicited interrupts for DIAG discipline */
1692 		if (device->discipline == dasd_diag_discipline_pointer) {
1693 			dasd_put_device(device);
1694 			return;
1695 		}
1696 
1697 		/*
1698 		 * In some cases 'File Protected' or 'No Record Found' errors
1699 		 * might be expected and debug log messages for the
1700 		 * corresponding interrupts shouldn't be written then.
1701 		 * Check if either of the according suppress bits is set.
1702 		 */
1703 		sense = dasd_get_sense(irb);
1704 		if (sense) {
1705 			fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
1706 				test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
1707 			nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
1708 				test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
1709 
1710 			/*
1711 			 * Extent pool probably out-of-space.
1712 			 * Stop device and check exhaust level.
1713 			 */
1714 			if (dasd_ese_oos_cond(sense)) {
1715 				dasd_generic_space_exhaust(device, cqr);
1716 				device->discipline->ext_pool_exhaust(device, cqr);
1717 				dasd_put_device(device);
1718 				return;
1719 			}
1720 		}
1721 		if (!(fp_suppressed || nrf_suppressed))
1722 			device->discipline->dump_sense_dbf(device, irb, "int");
1723 
1724 		if (device->features & DASD_FEATURE_ERPLOG)
1725 			device->discipline->dump_sense(device, cqr, irb);
1726 		device->discipline->check_for_device_change(device, cqr, irb);
1727 		dasd_put_device(device);
1728 	}
1729 
1730 	/* check for for attention message */
1731 	if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
1732 		device = dasd_device_from_cdev_locked(cdev);
1733 		if (!IS_ERR(device)) {
1734 			device->discipline->check_attention(device,
1735 							    irb->esw.esw1.lpum);
1736 			dasd_put_device(device);
1737 		}
1738 	}
1739 
1740 	if (!cqr)
1741 		return;
1742 
1743 	device = (struct dasd_device *) cqr->startdev;
1744 	if (!device ||
1745 	    strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1746 		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1747 				"invalid device in request");
1748 		return;
1749 	}
1750 
1751 	if (dasd_ese_needs_format(cqr->block, irb)) {
1752 		req = dasd_get_callback_data(cqr);
1753 		if (!req) {
1754 			cqr->status = DASD_CQR_ERROR;
1755 			return;
1756 		}
1757 		if (rq_data_dir(req) == READ) {
1758 			device->discipline->ese_read(cqr, irb);
1759 			cqr->status = DASD_CQR_SUCCESS;
1760 			cqr->stopclk = now;
1761 			dasd_device_clear_timer(device);
1762 			dasd_schedule_device_bh(device);
1763 			return;
1764 		}
1765 		fcqr = device->discipline->ese_format(device, cqr, irb);
1766 		if (IS_ERR(fcqr)) {
1767 			if (PTR_ERR(fcqr) == -EINVAL) {
1768 				cqr->status = DASD_CQR_ERROR;
1769 				return;
1770 			}
1771 			/*
1772 			 * If we can't format now, let the request go
1773 			 * one extra round. Maybe we can format later.
1774 			 */
1775 			cqr->status = DASD_CQR_QUEUED;
1776 			dasd_schedule_device_bh(device);
1777 			return;
1778 		} else {
1779 			fcqr->status = DASD_CQR_QUEUED;
1780 			cqr->status = DASD_CQR_QUEUED;
1781 			list_add(&fcqr->devlist, &device->ccw_queue);
1782 			dasd_schedule_device_bh(device);
1783 			return;
1784 		}
1785 	}
1786 
1787 	/* Check for clear pending */
1788 	if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1789 	    scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1790 		cqr->status = DASD_CQR_CLEARED;
1791 		dasd_device_clear_timer(device);
1792 		wake_up(&dasd_flush_wq);
1793 		dasd_schedule_device_bh(device);
1794 		return;
1795 	}
1796 
1797 	/* check status - the request might have been killed by dyn detach */
1798 	if (cqr->status != DASD_CQR_IN_IO) {
1799 		DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1800 			      "status %02x", dev_name(&cdev->dev), cqr->status);
1801 		return;
1802 	}
1803 
1804 	next = NULL;
1805 	expires = 0;
1806 	if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1807 	    scsw_cstat(&irb->scsw) == 0) {
1808 		/* request was completed successfully */
1809 		cqr->status = DASD_CQR_SUCCESS;
1810 		cqr->stopclk = now;
1811 		/* Start first request on queue if possible -> fast_io. */
1812 		if (cqr->devlist.next != &device->ccw_queue) {
1813 			next = list_entry(cqr->devlist.next,
1814 					  struct dasd_ccw_req, devlist);
1815 		}
1816 	} else {  /* error */
1817 		/* check for HPF error
1818 		 * call discipline function to requeue all requests
1819 		 * and disable HPF accordingly
1820 		 */
1821 		if (cqr->cpmode && dasd_check_hpf_error(irb) &&
1822 		    device->discipline->handle_hpf_error)
1823 			device->discipline->handle_hpf_error(device, irb);
1824 		/*
1825 		 * If we don't want complex ERP for this request, then just
1826 		 * reset this and retry it in the fastpath
1827 		 */
1828 		if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1829 		    cqr->retries > 0) {
1830 			if (cqr->lpm == dasd_path_get_opm(device))
1831 				DBF_DEV_EVENT(DBF_DEBUG, device,
1832 					      "default ERP in fastpath "
1833 					      "(%i retries left)",
1834 					      cqr->retries);
1835 			if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1836 				cqr->lpm = dasd_path_get_opm(device);
1837 			cqr->status = DASD_CQR_QUEUED;
1838 			next = cqr;
1839 		} else
1840 			cqr->status = DASD_CQR_ERROR;
1841 	}
1842 	if (next && (next->status == DASD_CQR_QUEUED) &&
1843 	    (!device->stopped)) {
1844 		if (device->discipline->start_IO(next) == 0)
1845 			expires = next->expires;
1846 	}
1847 	if (expires != 0)
1848 		dasd_device_set_timer(device, expires);
1849 	else
1850 		dasd_device_clear_timer(device);
1851 	dasd_schedule_device_bh(device);
1852 }
1853 EXPORT_SYMBOL(dasd_int_handler);
1854 
dasd_generic_uc_handler(struct ccw_device * cdev,struct irb * irb)1855 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1856 {
1857 	struct dasd_device *device;
1858 
1859 	device = dasd_device_from_cdev_locked(cdev);
1860 
1861 	if (IS_ERR(device))
1862 		goto out;
1863 	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1864 	   device->state != device->target ||
1865 	   !device->discipline->check_for_device_change){
1866 		dasd_put_device(device);
1867 		goto out;
1868 	}
1869 	if (device->discipline->dump_sense_dbf)
1870 		device->discipline->dump_sense_dbf(device, irb, "uc");
1871 	device->discipline->check_for_device_change(device, NULL, irb);
1872 	dasd_put_device(device);
1873 out:
1874 	return UC_TODO_RETRY;
1875 }
1876 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1877 
1878 /*
1879  * If we have an error on a dasd_block layer request then we cancel
1880  * and return all further requests from the same dasd_block as well.
1881  */
__dasd_device_recovery(struct dasd_device * device,struct dasd_ccw_req * ref_cqr)1882 static void __dasd_device_recovery(struct dasd_device *device,
1883 				   struct dasd_ccw_req *ref_cqr)
1884 {
1885 	struct list_head *l, *n;
1886 	struct dasd_ccw_req *cqr;
1887 
1888 	/*
1889 	 * only requeue request that came from the dasd_block layer
1890 	 */
1891 	if (!ref_cqr->block)
1892 		return;
1893 
1894 	list_for_each_safe(l, n, &device->ccw_queue) {
1895 		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1896 		if (cqr->status == DASD_CQR_QUEUED &&
1897 		    ref_cqr->block == cqr->block) {
1898 			cqr->status = DASD_CQR_CLEARED;
1899 		}
1900 	}
1901 };
1902 
1903 /*
1904  * Remove those ccw requests from the queue that need to be returned
1905  * to the upper layer.
1906  */
__dasd_device_process_ccw_queue(struct dasd_device * device,struct list_head * final_queue)1907 static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1908 					    struct list_head *final_queue)
1909 {
1910 	struct list_head *l, *n;
1911 	struct dasd_ccw_req *cqr;
1912 
1913 	/* Process request with final status. */
1914 	list_for_each_safe(l, n, &device->ccw_queue) {
1915 		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1916 
1917 		/* Skip any non-final request. */
1918 		if (cqr->status == DASD_CQR_QUEUED ||
1919 		    cqr->status == DASD_CQR_IN_IO ||
1920 		    cqr->status == DASD_CQR_CLEAR_PENDING)
1921 			continue;
1922 		if (cqr->status == DASD_CQR_ERROR) {
1923 			__dasd_device_recovery(device, cqr);
1924 		}
1925 		/* Rechain finished requests to final queue */
1926 		list_move_tail(&cqr->devlist, final_queue);
1927 	}
1928 }
1929 
__dasd_process_cqr(struct dasd_device * device,struct dasd_ccw_req * cqr)1930 static void __dasd_process_cqr(struct dasd_device *device,
1931 			       struct dasd_ccw_req *cqr)
1932 {
1933 	char errorstring[ERRORLENGTH];
1934 
1935 	switch (cqr->status) {
1936 	case DASD_CQR_SUCCESS:
1937 		cqr->status = DASD_CQR_DONE;
1938 		break;
1939 	case DASD_CQR_ERROR:
1940 		cqr->status = DASD_CQR_NEED_ERP;
1941 		break;
1942 	case DASD_CQR_CLEARED:
1943 		cqr->status = DASD_CQR_TERMINATED;
1944 		break;
1945 	default:
1946 		/* internal error 12 - wrong cqr status*/
1947 		snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1948 		dev_err(&device->cdev->dev,
1949 			"An error occurred in the DASD device driver, "
1950 			"reason=%s\n", errorstring);
1951 		BUG();
1952 	}
1953 	if (cqr->callback)
1954 		cqr->callback(cqr, cqr->callback_data);
1955 }
1956 
1957 /*
1958  * the cqrs from the final queue are returned to the upper layer
1959  * by setting a dasd_block state and calling the callback function
1960  */
__dasd_device_process_final_queue(struct dasd_device * device,struct list_head * final_queue)1961 static void __dasd_device_process_final_queue(struct dasd_device *device,
1962 					      struct list_head *final_queue)
1963 {
1964 	struct list_head *l, *n;
1965 	struct dasd_ccw_req *cqr;
1966 	struct dasd_block *block;
1967 
1968 	list_for_each_safe(l, n, final_queue) {
1969 		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1970 		list_del_init(&cqr->devlist);
1971 		block = cqr->block;
1972 		if (!block) {
1973 			__dasd_process_cqr(device, cqr);
1974 		} else {
1975 			spin_lock_bh(&block->queue_lock);
1976 			__dasd_process_cqr(device, cqr);
1977 			spin_unlock_bh(&block->queue_lock);
1978 		}
1979 	}
1980 }
1981 
1982 /*
1983  * Take a look at the first request on the ccw queue and check
1984  * if it reached its expire time. If so, terminate the IO.
1985  */
__dasd_device_check_expire(struct dasd_device * device)1986 static void __dasd_device_check_expire(struct dasd_device *device)
1987 {
1988 	struct dasd_ccw_req *cqr;
1989 
1990 	if (list_empty(&device->ccw_queue))
1991 		return;
1992 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1993 	if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1994 	    (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1995 		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1996 			/*
1997 			 * IO in safe offline processing should not
1998 			 * run out of retries
1999 			 */
2000 			cqr->retries++;
2001 		}
2002 		if (device->discipline->term_IO(cqr) != 0) {
2003 			/* Hmpf, try again in 5 sec */
2004 			dev_err(&device->cdev->dev,
2005 				"cqr %p timed out (%lus) but cannot be "
2006 				"ended, retrying in 5 s\n",
2007 				cqr, (cqr->expires/HZ));
2008 			cqr->expires += 5*HZ;
2009 			dasd_device_set_timer(device, 5*HZ);
2010 		} else {
2011 			dev_err(&device->cdev->dev,
2012 				"cqr %p timed out (%lus), %i retries "
2013 				"remaining\n", cqr, (cqr->expires/HZ),
2014 				cqr->retries);
2015 		}
2016 	}
2017 }
2018 
2019 /*
2020  * return 1 when device is not eligible for IO
2021  */
__dasd_device_is_unusable(struct dasd_device * device,struct dasd_ccw_req * cqr)2022 static int __dasd_device_is_unusable(struct dasd_device *device,
2023 				     struct dasd_ccw_req *cqr)
2024 {
2025 	int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC);
2026 
2027 	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
2028 	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
2029 		/*
2030 		 * dasd is being set offline
2031 		 * but it is no safe offline where we have to allow I/O
2032 		 */
2033 		return 1;
2034 	}
2035 	if (device->stopped) {
2036 		if (device->stopped & mask) {
2037 			/* stopped and CQR will not change that. */
2038 			return 1;
2039 		}
2040 		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2041 			/* CQR is not able to change device to
2042 			 * operational. */
2043 			return 1;
2044 		}
2045 		/* CQR required to get device operational. */
2046 	}
2047 	return 0;
2048 }
2049 
2050 /*
2051  * Take a look at the first request on the ccw queue and check
2052  * if it needs to be started.
2053  */
__dasd_device_start_head(struct dasd_device * device)2054 static void __dasd_device_start_head(struct dasd_device *device)
2055 {
2056 	struct dasd_ccw_req *cqr;
2057 	int rc;
2058 
2059 	if (list_empty(&device->ccw_queue))
2060 		return;
2061 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2062 	if (cqr->status != DASD_CQR_QUEUED)
2063 		return;
2064 	/* if device is not usable return request to upper layer */
2065 	if (__dasd_device_is_unusable(device, cqr)) {
2066 		cqr->intrc = -EAGAIN;
2067 		cqr->status = DASD_CQR_CLEARED;
2068 		dasd_schedule_device_bh(device);
2069 		return;
2070 	}
2071 
2072 	rc = device->discipline->start_IO(cqr);
2073 	if (rc == 0)
2074 		dasd_device_set_timer(device, cqr->expires);
2075 	else if (rc == -EACCES) {
2076 		dasd_schedule_device_bh(device);
2077 	} else
2078 		/* Hmpf, try again in 1/2 sec */
2079 		dasd_device_set_timer(device, 50);
2080 }
2081 
__dasd_device_check_path_events(struct dasd_device * device)2082 static void __dasd_device_check_path_events(struct dasd_device *device)
2083 {
2084 	__u8 tbvpm, fcsecpm;
2085 	int rc;
2086 
2087 	tbvpm = dasd_path_get_tbvpm(device);
2088 	fcsecpm = dasd_path_get_fcsecpm(device);
2089 
2090 	if (!tbvpm && !fcsecpm)
2091 		return;
2092 
2093 	if (device->stopped & ~(DASD_STOPPED_DC_WAIT))
2094 		return;
2095 	rc = device->discipline->pe_handler(device, tbvpm, fcsecpm);
2096 	if (rc) {
2097 		dasd_device_set_timer(device, 50);
2098 	} else {
2099 		dasd_path_clear_all_verify(device);
2100 		dasd_path_clear_all_fcsec(device);
2101 	}
2102 };
2103 
2104 /*
2105  * Go through all request on the dasd_device request queue,
2106  * terminate them on the cdev if necessary, and return them to the
2107  * submitting layer via callback.
2108  * Note:
2109  * Make sure that all 'submitting layers' still exist when
2110  * this function is called!. In other words, when 'device' is a base
2111  * device then all block layer requests must have been removed before
2112  * via dasd_flush_block_queue.
2113  */
dasd_flush_device_queue(struct dasd_device * device)2114 int dasd_flush_device_queue(struct dasd_device *device)
2115 {
2116 	struct dasd_ccw_req *cqr, *n;
2117 	int rc;
2118 	struct list_head flush_queue;
2119 
2120 	INIT_LIST_HEAD(&flush_queue);
2121 	spin_lock_irq(get_ccwdev_lock(device->cdev));
2122 	rc = 0;
2123 	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2124 		/* Check status and move request to flush_queue */
2125 		switch (cqr->status) {
2126 		case DASD_CQR_IN_IO:
2127 			rc = device->discipline->term_IO(cqr);
2128 			if (rc) {
2129 				/* unable to terminate requeust */
2130 				dev_err(&device->cdev->dev,
2131 					"Flushing the DASD request queue "
2132 					"failed for request %p\n", cqr);
2133 				/* stop flush processing */
2134 				goto finished;
2135 			}
2136 			break;
2137 		case DASD_CQR_QUEUED:
2138 			cqr->stopclk = get_tod_clock();
2139 			cqr->status = DASD_CQR_CLEARED;
2140 			break;
2141 		default: /* no need to modify the others */
2142 			break;
2143 		}
2144 		list_move_tail(&cqr->devlist, &flush_queue);
2145 	}
2146 finished:
2147 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2148 	/*
2149 	 * After this point all requests must be in state CLEAR_PENDING,
2150 	 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
2151 	 * one of the others.
2152 	 */
2153 	list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
2154 		wait_event(dasd_flush_wq,
2155 			   (cqr->status != DASD_CQR_CLEAR_PENDING));
2156 	/*
2157 	 * Now set each request back to TERMINATED, DONE or NEED_ERP
2158 	 * and call the callback function of flushed requests
2159 	 */
2160 	__dasd_device_process_final_queue(device, &flush_queue);
2161 	return rc;
2162 }
2163 EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2164 
2165 /*
2166  * Acquire the device lock and process queues for the device.
2167  */
dasd_device_tasklet(unsigned long data)2168 static void dasd_device_tasklet(unsigned long data)
2169 {
2170 	struct dasd_device *device = (struct dasd_device *) data;
2171 	struct list_head final_queue;
2172 
2173 	atomic_set (&device->tasklet_scheduled, 0);
2174 	INIT_LIST_HEAD(&final_queue);
2175 	spin_lock_irq(get_ccwdev_lock(device->cdev));
2176 	/* Check expire time of first request on the ccw queue. */
2177 	__dasd_device_check_expire(device);
2178 	/* find final requests on ccw queue */
2179 	__dasd_device_process_ccw_queue(device, &final_queue);
2180 	__dasd_device_check_path_events(device);
2181 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2182 	/* Now call the callback function of requests with final status */
2183 	__dasd_device_process_final_queue(device, &final_queue);
2184 	spin_lock_irq(get_ccwdev_lock(device->cdev));
2185 	/* Now check if the head of the ccw queue needs to be started. */
2186 	__dasd_device_start_head(device);
2187 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2188 	if (waitqueue_active(&shutdown_waitq))
2189 		wake_up(&shutdown_waitq);
2190 	dasd_put_device(device);
2191 }
2192 
2193 /*
2194  * Schedules a call to dasd_tasklet over the device tasklet.
2195  */
dasd_schedule_device_bh(struct dasd_device * device)2196 void dasd_schedule_device_bh(struct dasd_device *device)
2197 {
2198 	/* Protect against rescheduling. */
2199 	if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
2200 		return;
2201 	dasd_get_device(device);
2202 	tasklet_hi_schedule(&device->tasklet);
2203 }
2204 EXPORT_SYMBOL(dasd_schedule_device_bh);
2205 
dasd_device_set_stop_bits(struct dasd_device * device,int bits)2206 void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
2207 {
2208 	device->stopped |= bits;
2209 }
2210 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
2211 
dasd_device_remove_stop_bits(struct dasd_device * device,int bits)2212 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
2213 {
2214 	device->stopped &= ~bits;
2215 	if (!device->stopped)
2216 		wake_up(&generic_waitq);
2217 }
2218 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
2219 
2220 /*
2221  * Queue a request to the head of the device ccw_queue.
2222  * Start the I/O if possible.
2223  */
dasd_add_request_head(struct dasd_ccw_req * cqr)2224 void dasd_add_request_head(struct dasd_ccw_req *cqr)
2225 {
2226 	struct dasd_device *device;
2227 	unsigned long flags;
2228 
2229 	device = cqr->startdev;
2230 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2231 	cqr->status = DASD_CQR_QUEUED;
2232 	list_add(&cqr->devlist, &device->ccw_queue);
2233 	/* let the bh start the request to keep them in order */
2234 	dasd_schedule_device_bh(device);
2235 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2236 }
2237 EXPORT_SYMBOL(dasd_add_request_head);
2238 
2239 /*
2240  * Queue a request to the tail of the device ccw_queue.
2241  * Start the I/O if possible.
2242  */
dasd_add_request_tail(struct dasd_ccw_req * cqr)2243 void dasd_add_request_tail(struct dasd_ccw_req *cqr)
2244 {
2245 	struct dasd_device *device;
2246 	unsigned long flags;
2247 
2248 	device = cqr->startdev;
2249 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2250 	cqr->status = DASD_CQR_QUEUED;
2251 	list_add_tail(&cqr->devlist, &device->ccw_queue);
2252 	/* let the bh start the request to keep them in order */
2253 	dasd_schedule_device_bh(device);
2254 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2255 }
2256 EXPORT_SYMBOL(dasd_add_request_tail);
2257 
2258 /*
2259  * Wakeup helper for the 'sleep_on' functions.
2260  */
dasd_wakeup_cb(struct dasd_ccw_req * cqr,void * data)2261 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
2262 {
2263 	spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2264 	cqr->callback_data = DASD_SLEEPON_END_TAG;
2265 	spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2266 	wake_up(&generic_waitq);
2267 }
2268 EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
2269 
_wait_for_wakeup(struct dasd_ccw_req * cqr)2270 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
2271 {
2272 	struct dasd_device *device;
2273 	int rc;
2274 
2275 	device = cqr->startdev;
2276 	spin_lock_irq(get_ccwdev_lock(device->cdev));
2277 	rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
2278 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2279 	return rc;
2280 }
2281 
2282 /*
2283  * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
2284  */
__dasd_sleep_on_erp(struct dasd_ccw_req * cqr)2285 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
2286 {
2287 	struct dasd_device *device;
2288 	dasd_erp_fn_t erp_fn;
2289 
2290 	if (cqr->status == DASD_CQR_FILLED)
2291 		return 0;
2292 	device = cqr->startdev;
2293 	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2294 		if (cqr->status == DASD_CQR_TERMINATED) {
2295 			device->discipline->handle_terminated_request(cqr);
2296 			return 1;
2297 		}
2298 		if (cqr->status == DASD_CQR_NEED_ERP) {
2299 			erp_fn = device->discipline->erp_action(cqr);
2300 			erp_fn(cqr);
2301 			return 1;
2302 		}
2303 		if (cqr->status == DASD_CQR_FAILED)
2304 			dasd_log_sense(cqr, &cqr->irb);
2305 		if (cqr->refers) {
2306 			__dasd_process_erp(device, cqr);
2307 			return 1;
2308 		}
2309 	}
2310 	return 0;
2311 }
2312 
__dasd_sleep_on_loop_condition(struct dasd_ccw_req * cqr)2313 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
2314 {
2315 	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2316 		if (cqr->refers) /* erp is not done yet */
2317 			return 1;
2318 		return ((cqr->status != DASD_CQR_DONE) &&
2319 			(cqr->status != DASD_CQR_FAILED));
2320 	} else
2321 		return (cqr->status == DASD_CQR_FILLED);
2322 }
2323 
_dasd_sleep_on(struct dasd_ccw_req * maincqr,int interruptible)2324 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
2325 {
2326 	struct dasd_device *device;
2327 	int rc;
2328 	struct list_head ccw_queue;
2329 	struct dasd_ccw_req *cqr;
2330 
2331 	INIT_LIST_HEAD(&ccw_queue);
2332 	maincqr->status = DASD_CQR_FILLED;
2333 	device = maincqr->startdev;
2334 	list_add(&maincqr->blocklist, &ccw_queue);
2335 	for (cqr = maincqr;  __dasd_sleep_on_loop_condition(cqr);
2336 	     cqr = list_first_entry(&ccw_queue,
2337 				    struct dasd_ccw_req, blocklist)) {
2338 
2339 		if (__dasd_sleep_on_erp(cqr))
2340 			continue;
2341 		if (cqr->status != DASD_CQR_FILLED) /* could be failed */
2342 			continue;
2343 		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2344 		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2345 			cqr->status = DASD_CQR_FAILED;
2346 			cqr->intrc = -EPERM;
2347 			continue;
2348 		}
2349 		/* Non-temporary stop condition will trigger fail fast */
2350 		if (device->stopped & ~DASD_STOPPED_PENDING &&
2351 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2352 		    (!dasd_eer_enabled(device))) {
2353 			cqr->status = DASD_CQR_FAILED;
2354 			cqr->intrc = -ENOLINK;
2355 			continue;
2356 		}
2357 		/*
2358 		 * Don't try to start requests if device is in
2359 		 * offline processing, it might wait forever
2360 		 */
2361 		if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2362 			cqr->status = DASD_CQR_FAILED;
2363 			cqr->intrc = -ENODEV;
2364 			continue;
2365 		}
2366 		/*
2367 		 * Don't try to start requests if device is stopped
2368 		 * except path verification requests
2369 		 */
2370 		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2371 			if (interruptible) {
2372 				rc = wait_event_interruptible(
2373 					generic_waitq, !(device->stopped));
2374 				if (rc == -ERESTARTSYS) {
2375 					cqr->status = DASD_CQR_FAILED;
2376 					maincqr->intrc = rc;
2377 					continue;
2378 				}
2379 			} else
2380 				wait_event(generic_waitq, !(device->stopped));
2381 		}
2382 		if (!cqr->callback)
2383 			cqr->callback = dasd_wakeup_cb;
2384 
2385 		cqr->callback_data = DASD_SLEEPON_START_TAG;
2386 		dasd_add_request_tail(cqr);
2387 		if (interruptible) {
2388 			rc = wait_event_interruptible(
2389 				generic_waitq, _wait_for_wakeup(cqr));
2390 			if (rc == -ERESTARTSYS) {
2391 				dasd_cancel_req(cqr);
2392 				/* wait (non-interruptible) for final status */
2393 				wait_event(generic_waitq,
2394 					   _wait_for_wakeup(cqr));
2395 				cqr->status = DASD_CQR_FAILED;
2396 				maincqr->intrc = rc;
2397 				continue;
2398 			}
2399 		} else
2400 			wait_event(generic_waitq, _wait_for_wakeup(cqr));
2401 	}
2402 
2403 	maincqr->endclk = get_tod_clock();
2404 	if ((maincqr->status != DASD_CQR_DONE) &&
2405 	    (maincqr->intrc != -ERESTARTSYS))
2406 		dasd_log_sense(maincqr, &maincqr->irb);
2407 	if (maincqr->status == DASD_CQR_DONE)
2408 		rc = 0;
2409 	else if (maincqr->intrc)
2410 		rc = maincqr->intrc;
2411 	else
2412 		rc = -EIO;
2413 	return rc;
2414 }
2415 
_wait_for_wakeup_queue(struct list_head * ccw_queue)2416 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
2417 {
2418 	struct dasd_ccw_req *cqr;
2419 
2420 	list_for_each_entry(cqr, ccw_queue, blocklist) {
2421 		if (cqr->callback_data != DASD_SLEEPON_END_TAG)
2422 			return 0;
2423 	}
2424 
2425 	return 1;
2426 }
2427 
_dasd_sleep_on_queue(struct list_head * ccw_queue,int interruptible)2428 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
2429 {
2430 	struct dasd_device *device;
2431 	struct dasd_ccw_req *cqr, *n;
2432 	u8 *sense = NULL;
2433 	int rc;
2434 
2435 retry:
2436 	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2437 		device = cqr->startdev;
2438 		if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
2439 			continue;
2440 
2441 		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2442 		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2443 			cqr->status = DASD_CQR_FAILED;
2444 			cqr->intrc = -EPERM;
2445 			continue;
2446 		}
2447 		/*Non-temporary stop condition will trigger fail fast*/
2448 		if (device->stopped & ~DASD_STOPPED_PENDING &&
2449 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2450 		    !dasd_eer_enabled(device)) {
2451 			cqr->status = DASD_CQR_FAILED;
2452 			cqr->intrc = -EAGAIN;
2453 			continue;
2454 		}
2455 
2456 		/*Don't try to start requests if device is stopped*/
2457 		if (interruptible) {
2458 			rc = wait_event_interruptible(
2459 				generic_waitq, !device->stopped);
2460 			if (rc == -ERESTARTSYS) {
2461 				cqr->status = DASD_CQR_FAILED;
2462 				cqr->intrc = rc;
2463 				continue;
2464 			}
2465 		} else
2466 			wait_event(generic_waitq, !(device->stopped));
2467 
2468 		if (!cqr->callback)
2469 			cqr->callback = dasd_wakeup_cb;
2470 		cqr->callback_data = DASD_SLEEPON_START_TAG;
2471 		dasd_add_request_tail(cqr);
2472 	}
2473 
2474 	wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
2475 
2476 	rc = 0;
2477 	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2478 		/*
2479 		 * In some cases the 'File Protected' or 'Incorrect Length'
2480 		 * error might be expected and error recovery would be
2481 		 * unnecessary in these cases.	Check if the according suppress
2482 		 * bit is set.
2483 		 */
2484 		sense = dasd_get_sense(&cqr->irb);
2485 		if (sense && sense[1] & SNS1_FILE_PROTECTED &&
2486 		    test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
2487 			continue;
2488 		if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
2489 		    test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
2490 			continue;
2491 
2492 		/*
2493 		 * for alias devices simplify error recovery and
2494 		 * return to upper layer
2495 		 * do not skip ERP requests
2496 		 */
2497 		if (cqr->startdev != cqr->basedev && !cqr->refers &&
2498 		    (cqr->status == DASD_CQR_TERMINATED ||
2499 		     cqr->status == DASD_CQR_NEED_ERP))
2500 			return -EAGAIN;
2501 
2502 		/* normal recovery for basedev IO */
2503 		if (__dasd_sleep_on_erp(cqr))
2504 			/* handle erp first */
2505 			goto retry;
2506 	}
2507 
2508 	return 0;
2509 }
2510 
2511 /*
2512  * Queue a request to the tail of the device ccw_queue and wait for
2513  * it's completion.
2514  */
dasd_sleep_on(struct dasd_ccw_req * cqr)2515 int dasd_sleep_on(struct dasd_ccw_req *cqr)
2516 {
2517 	return _dasd_sleep_on(cqr, 0);
2518 }
2519 EXPORT_SYMBOL(dasd_sleep_on);
2520 
2521 /*
2522  * Start requests from a ccw_queue and wait for their completion.
2523  */
dasd_sleep_on_queue(struct list_head * ccw_queue)2524 int dasd_sleep_on_queue(struct list_head *ccw_queue)
2525 {
2526 	return _dasd_sleep_on_queue(ccw_queue, 0);
2527 }
2528 EXPORT_SYMBOL(dasd_sleep_on_queue);
2529 
2530 /*
2531  * Start requests from a ccw_queue and wait interruptible for their completion.
2532  */
dasd_sleep_on_queue_interruptible(struct list_head * ccw_queue)2533 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
2534 {
2535 	return _dasd_sleep_on_queue(ccw_queue, 1);
2536 }
2537 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);
2538 
2539 /*
2540  * Queue a request to the tail of the device ccw_queue and wait
2541  * interruptible for it's completion.
2542  */
dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)2543 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
2544 {
2545 	return _dasd_sleep_on(cqr, 1);
2546 }
2547 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2548 
2549 /*
2550  * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
2551  * for eckd devices) the currently running request has to be terminated
2552  * and be put back to status queued, before the special request is added
2553  * to the head of the queue. Then the special request is waited on normally.
2554  */
_dasd_term_running_cqr(struct dasd_device * device)2555 static inline int _dasd_term_running_cqr(struct dasd_device *device)
2556 {
2557 	struct dasd_ccw_req *cqr;
2558 	int rc;
2559 
2560 	if (list_empty(&device->ccw_queue))
2561 		return 0;
2562 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2563 	rc = device->discipline->term_IO(cqr);
2564 	if (!rc)
2565 		/*
2566 		 * CQR terminated because a more important request is pending.
2567 		 * Undo decreasing of retry counter because this is
2568 		 * not an error case.
2569 		 */
2570 		cqr->retries++;
2571 	return rc;
2572 }
2573 
dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)2574 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2575 {
2576 	struct dasd_device *device;
2577 	int rc;
2578 
2579 	device = cqr->startdev;
2580 	if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2581 	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2582 		cqr->status = DASD_CQR_FAILED;
2583 		cqr->intrc = -EPERM;
2584 		return -EIO;
2585 	}
2586 	spin_lock_irq(get_ccwdev_lock(device->cdev));
2587 	rc = _dasd_term_running_cqr(device);
2588 	if (rc) {
2589 		spin_unlock_irq(get_ccwdev_lock(device->cdev));
2590 		return rc;
2591 	}
2592 	cqr->callback = dasd_wakeup_cb;
2593 	cqr->callback_data = DASD_SLEEPON_START_TAG;
2594 	cqr->status = DASD_CQR_QUEUED;
2595 	/*
2596 	 * add new request as second
2597 	 * first the terminated cqr needs to be finished
2598 	 */
2599 	list_add(&cqr->devlist, device->ccw_queue.next);
2600 
2601 	/* let the bh start the request to keep them in order */
2602 	dasd_schedule_device_bh(device);
2603 
2604 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2605 
2606 	wait_event(generic_waitq, _wait_for_wakeup(cqr));
2607 
2608 	if (cqr->status == DASD_CQR_DONE)
2609 		rc = 0;
2610 	else if (cqr->intrc)
2611 		rc = cqr->intrc;
2612 	else
2613 		rc = -EIO;
2614 
2615 	/* kick tasklets */
2616 	dasd_schedule_device_bh(device);
2617 	if (device->block)
2618 		dasd_schedule_block_bh(device->block);
2619 
2620 	return rc;
2621 }
2622 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2623 
2624 /*
2625  * Cancels a request that was started with dasd_sleep_on_req.
2626  * This is useful to timeout requests. The request will be
2627  * terminated if it is currently in i/o.
2628  * Returns 0 if request termination was successful
2629  *	   negative error code if termination failed
2630  * Cancellation of a request is an asynchronous operation! The calling
2631  * function has to wait until the request is properly returned via callback.
2632  */
__dasd_cancel_req(struct dasd_ccw_req * cqr)2633 static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
2634 {
2635 	struct dasd_device *device = cqr->startdev;
2636 	int rc = 0;
2637 
2638 	switch (cqr->status) {
2639 	case DASD_CQR_QUEUED:
2640 		/* request was not started - just set to cleared */
2641 		cqr->status = DASD_CQR_CLEARED;
2642 		break;
2643 	case DASD_CQR_IN_IO:
2644 		/* request in IO - terminate IO and release again */
2645 		rc = device->discipline->term_IO(cqr);
2646 		if (rc) {
2647 			dev_err(&device->cdev->dev,
2648 				"Cancelling request %p failed with rc=%d\n",
2649 				cqr, rc);
2650 		} else {
2651 			cqr->stopclk = get_tod_clock();
2652 		}
2653 		break;
2654 	default: /* already finished or clear pending - do nothing */
2655 		break;
2656 	}
2657 	dasd_schedule_device_bh(device);
2658 	return rc;
2659 }
2660 
dasd_cancel_req(struct dasd_ccw_req * cqr)2661 int dasd_cancel_req(struct dasd_ccw_req *cqr)
2662 {
2663 	struct dasd_device *device = cqr->startdev;
2664 	unsigned long flags;
2665 	int rc;
2666 
2667 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2668 	rc = __dasd_cancel_req(cqr);
2669 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2670 	return rc;
2671 }
2672 
2673 /*
2674  * SECTION: Operations of the dasd_block layer.
2675  */
2676 
2677 /*
2678  * Timeout function for dasd_block. This is used when the block layer
2679  * is waiting for something that may not come reliably, (e.g. a state
2680  * change interrupt)
2681  */
dasd_block_timeout(struct timer_list * t)2682 static void dasd_block_timeout(struct timer_list *t)
2683 {
2684 	unsigned long flags;
2685 	struct dasd_block *block;
2686 
2687 	block = from_timer(block, t, timer);
2688 	spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
2689 	/* re-activate request queue */
2690 	dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
2691 	spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
2692 	dasd_schedule_block_bh(block);
2693 	blk_mq_run_hw_queues(block->request_queue, true);
2694 }
2695 
2696 /*
2697  * Setup timeout for a dasd_block in jiffies.
2698  */
dasd_block_set_timer(struct dasd_block * block,int expires)2699 void dasd_block_set_timer(struct dasd_block *block, int expires)
2700 {
2701 	if (expires == 0)
2702 		del_timer(&block->timer);
2703 	else
2704 		mod_timer(&block->timer, jiffies + expires);
2705 }
2706 EXPORT_SYMBOL(dasd_block_set_timer);
2707 
2708 /*
2709  * Clear timeout for a dasd_block.
2710  */
dasd_block_clear_timer(struct dasd_block * block)2711 void dasd_block_clear_timer(struct dasd_block *block)
2712 {
2713 	del_timer(&block->timer);
2714 }
2715 EXPORT_SYMBOL(dasd_block_clear_timer);
2716 
2717 /*
2718  * Process finished error recovery ccw.
2719  */
__dasd_process_erp(struct dasd_device * device,struct dasd_ccw_req * cqr)2720 static void __dasd_process_erp(struct dasd_device *device,
2721 			       struct dasd_ccw_req *cqr)
2722 {
2723 	dasd_erp_fn_t erp_fn;
2724 
2725 	if (cqr->status == DASD_CQR_DONE)
2726 		DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
2727 	else
2728 		dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
2729 	erp_fn = device->discipline->erp_postaction(cqr);
2730 	erp_fn(cqr);
2731 }
2732 
__dasd_cleanup_cqr(struct dasd_ccw_req * cqr)2733 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
2734 {
2735 	struct request *req;
2736 	blk_status_t error = BLK_STS_OK;
2737 	unsigned int proc_bytes;
2738 	int status;
2739 
2740 	req = (struct request *) cqr->callback_data;
2741 	dasd_profile_end(cqr->block, cqr, req);
2742 
2743 	proc_bytes = cqr->proc_bytes;
2744 	status = cqr->block->base->discipline->free_cp(cqr, req);
2745 	if (status < 0)
2746 		error = errno_to_blk_status(status);
2747 	else if (status == 0) {
2748 		switch (cqr->intrc) {
2749 		case -EPERM:
2750 			error = BLK_STS_NEXUS;
2751 			break;
2752 		case -ENOLINK:
2753 			error = BLK_STS_TRANSPORT;
2754 			break;
2755 		case -ETIMEDOUT:
2756 			error = BLK_STS_TIMEOUT;
2757 			break;
2758 		default:
2759 			error = BLK_STS_IOERR;
2760 			break;
2761 		}
2762 	}
2763 
2764 	/*
2765 	 * We need to take care for ETIMEDOUT errors here since the
2766 	 * complete callback does not get called in this case.
2767 	 * Take care of all errors here and avoid additional code to
2768 	 * transfer the error value to the complete callback.
2769 	 */
2770 	if (error) {
2771 		blk_mq_end_request(req, error);
2772 		blk_mq_run_hw_queues(req->q, true);
2773 	} else {
2774 		/*
2775 		 * Partial completed requests can happen with ESE devices.
2776 		 * During read we might have gotten a NRF error and have to
2777 		 * complete a request partially.
2778 		 */
2779 		if (proc_bytes) {
2780 			blk_update_request(req, BLK_STS_OK, proc_bytes);
2781 			blk_mq_requeue_request(req, true);
2782 		} else if (likely(!blk_should_fake_timeout(req->q))) {
2783 			blk_mq_complete_request(req);
2784 		}
2785 	}
2786 }
2787 
2788 /*
2789  * Process ccw request queue.
2790  */
__dasd_process_block_ccw_queue(struct dasd_block * block,struct list_head * final_queue)2791 static void __dasd_process_block_ccw_queue(struct dasd_block *block,
2792 					   struct list_head *final_queue)
2793 {
2794 	struct list_head *l, *n;
2795 	struct dasd_ccw_req *cqr;
2796 	dasd_erp_fn_t erp_fn;
2797 	unsigned long flags;
2798 	struct dasd_device *base = block->base;
2799 
2800 restart:
2801 	/* Process request with final status. */
2802 	list_for_each_safe(l, n, &block->ccw_queue) {
2803 		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2804 		if (cqr->status != DASD_CQR_DONE &&
2805 		    cqr->status != DASD_CQR_FAILED &&
2806 		    cqr->status != DASD_CQR_NEED_ERP &&
2807 		    cqr->status != DASD_CQR_TERMINATED)
2808 			continue;
2809 
2810 		if (cqr->status == DASD_CQR_TERMINATED) {
2811 			base->discipline->handle_terminated_request(cqr);
2812 			goto restart;
2813 		}
2814 
2815 		/*  Process requests that may be recovered */
2816 		if (cqr->status == DASD_CQR_NEED_ERP) {
2817 			erp_fn = base->discipline->erp_action(cqr);
2818 			if (IS_ERR(erp_fn(cqr)))
2819 				continue;
2820 			goto restart;
2821 		}
2822 
2823 		/* log sense for fatal error */
2824 		if (cqr->status == DASD_CQR_FAILED) {
2825 			dasd_log_sense(cqr, &cqr->irb);
2826 		}
2827 
2828 		/* First of all call extended error reporting. */
2829 		if (dasd_eer_enabled(base) &&
2830 		    cqr->status == DASD_CQR_FAILED) {
2831 			dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
2832 
2833 			/* restart request  */
2834 			cqr->status = DASD_CQR_FILLED;
2835 			cqr->retries = 255;
2836 			spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
2837 			dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
2838 			spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
2839 					       flags);
2840 			goto restart;
2841 		}
2842 
2843 		/* Process finished ERP request. */
2844 		if (cqr->refers) {
2845 			__dasd_process_erp(base, cqr);
2846 			goto restart;
2847 		}
2848 
2849 		/* Rechain finished requests to final queue */
2850 		cqr->endclk = get_tod_clock();
2851 		list_move_tail(&cqr->blocklist, final_queue);
2852 	}
2853 }
2854 
dasd_return_cqr_cb(struct dasd_ccw_req * cqr,void * data)2855 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
2856 {
2857 	dasd_schedule_block_bh(cqr->block);
2858 }
2859 
__dasd_block_start_head(struct dasd_block * block)2860 static void __dasd_block_start_head(struct dasd_block *block)
2861 {
2862 	struct dasd_ccw_req *cqr;
2863 
2864 	if (list_empty(&block->ccw_queue))
2865 		return;
2866 	/* We allways begin with the first requests on the queue, as some
2867 	 * of previously started requests have to be enqueued on a
2868 	 * dasd_device again for error recovery.
2869 	 */
2870 	list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2871 		if (cqr->status != DASD_CQR_FILLED)
2872 			continue;
2873 		if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
2874 		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2875 			cqr->status = DASD_CQR_FAILED;
2876 			cqr->intrc = -EPERM;
2877 			dasd_schedule_block_bh(block);
2878 			continue;
2879 		}
2880 		/* Non-temporary stop condition will trigger fail fast */
2881 		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2882 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2883 		    (!dasd_eer_enabled(block->base))) {
2884 			cqr->status = DASD_CQR_FAILED;
2885 			cqr->intrc = -ENOLINK;
2886 			dasd_schedule_block_bh(block);
2887 			continue;
2888 		}
2889 		/* Don't try to start requests if device is stopped */
2890 		if (block->base->stopped)
2891 			return;
2892 
2893 		/* just a fail safe check, should not happen */
2894 		if (!cqr->startdev)
2895 			cqr->startdev = block->base;
2896 
2897 		/* make sure that the requests we submit find their way back */
2898 		cqr->callback = dasd_return_cqr_cb;
2899 
2900 		dasd_add_request_tail(cqr);
2901 	}
2902 }
2903 
2904 /*
2905  * Central dasd_block layer routine. Takes requests from the generic
2906  * block layer request queue, creates ccw requests, enqueues them on
2907  * a dasd_device and processes ccw requests that have been returned.
2908  */
dasd_block_tasklet(unsigned long data)2909 static void dasd_block_tasklet(unsigned long data)
2910 {
2911 	struct dasd_block *block = (struct dasd_block *) data;
2912 	struct list_head final_queue;
2913 	struct list_head *l, *n;
2914 	struct dasd_ccw_req *cqr;
2915 	struct dasd_queue *dq;
2916 
2917 	atomic_set(&block->tasklet_scheduled, 0);
2918 	INIT_LIST_HEAD(&final_queue);
2919 	spin_lock_irq(&block->queue_lock);
2920 	/* Finish off requests on ccw queue */
2921 	__dasd_process_block_ccw_queue(block, &final_queue);
2922 	spin_unlock_irq(&block->queue_lock);
2923 
2924 	/* Now call the callback function of requests with final status */
2925 	list_for_each_safe(l, n, &final_queue) {
2926 		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2927 		dq = cqr->dq;
2928 		spin_lock_irq(&dq->lock);
2929 		list_del_init(&cqr->blocklist);
2930 		__dasd_cleanup_cqr(cqr);
2931 		spin_unlock_irq(&dq->lock);
2932 	}
2933 
2934 	spin_lock_irq(&block->queue_lock);
2935 	/* Now check if the head of the ccw queue needs to be started. */
2936 	__dasd_block_start_head(block);
2937 	spin_unlock_irq(&block->queue_lock);
2938 
2939 	if (waitqueue_active(&shutdown_waitq))
2940 		wake_up(&shutdown_waitq);
2941 	dasd_put_device(block->base);
2942 }
2943 
_dasd_wake_block_flush_cb(struct dasd_ccw_req * cqr,void * data)2944 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2945 {
2946 	wake_up(&dasd_flush_wq);
2947 }
2948 
2949 /*
2950  * Requeue a request back to the block request queue
2951  * only works for block requests
2952  */
_dasd_requeue_request(struct dasd_ccw_req * cqr)2953 static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
2954 {
2955 	struct request *req;
2956 
2957 	/*
2958 	 * If the request is an ERP request there is nothing to requeue.
2959 	 * This will be done with the remaining original request.
2960 	 */
2961 	if (cqr->refers)
2962 		return;
2963 	spin_lock_irq(&cqr->dq->lock);
2964 	req = (struct request *) cqr->callback_data;
2965 	blk_mq_requeue_request(req, true);
2966 	spin_unlock_irq(&cqr->dq->lock);
2967 
2968 	return;
2969 }
2970 
_dasd_requests_to_flushqueue(struct dasd_block * block,struct list_head * flush_queue)2971 static int _dasd_requests_to_flushqueue(struct dasd_block *block,
2972 					struct list_head *flush_queue)
2973 {
2974 	struct dasd_ccw_req *cqr, *n;
2975 	unsigned long flags;
2976 	int rc, i;
2977 
2978 	spin_lock_irqsave(&block->queue_lock, flags);
2979 	rc = 0;
2980 restart:
2981 	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2982 		/* if this request currently owned by a dasd_device cancel it */
2983 		if (cqr->status >= DASD_CQR_QUEUED)
2984 			rc = dasd_cancel_req(cqr);
2985 		if (rc < 0)
2986 			break;
2987 		/* Rechain request (including erp chain) so it won't be
2988 		 * touched by the dasd_block_tasklet anymore.
2989 		 * Replace the callback so we notice when the request
2990 		 * is returned from the dasd_device layer.
2991 		 */
2992 		cqr->callback = _dasd_wake_block_flush_cb;
2993 		for (i = 0; cqr; cqr = cqr->refers, i++)
2994 			list_move_tail(&cqr->blocklist, flush_queue);
2995 		if (i > 1)
2996 			/* moved more than one request - need to restart */
2997 			goto restart;
2998 	}
2999 	spin_unlock_irqrestore(&block->queue_lock, flags);
3000 
3001 	return rc;
3002 }
3003 
3004 /*
3005  * Go through all request on the dasd_block request queue, cancel them
3006  * on the respective dasd_device, and return them to the generic
3007  * block layer.
3008  */
dasd_flush_block_queue(struct dasd_block * block)3009 static int dasd_flush_block_queue(struct dasd_block *block)
3010 {
3011 	struct dasd_ccw_req *cqr, *n;
3012 	struct list_head flush_queue;
3013 	unsigned long flags;
3014 	int rc;
3015 
3016 	INIT_LIST_HEAD(&flush_queue);
3017 	rc = _dasd_requests_to_flushqueue(block, &flush_queue);
3018 
3019 	/* Now call the callback function of flushed requests */
3020 restart_cb:
3021 	list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
3022 		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
3023 		/* Process finished ERP request. */
3024 		if (cqr->refers) {
3025 			spin_lock_bh(&block->queue_lock);
3026 			__dasd_process_erp(block->base, cqr);
3027 			spin_unlock_bh(&block->queue_lock);
3028 			/* restart list_for_xx loop since dasd_process_erp
3029 			 * might remove multiple elements */
3030 			goto restart_cb;
3031 		}
3032 		/* call the callback function */
3033 		spin_lock_irqsave(&cqr->dq->lock, flags);
3034 		cqr->endclk = get_tod_clock();
3035 		list_del_init(&cqr->blocklist);
3036 		__dasd_cleanup_cqr(cqr);
3037 		spin_unlock_irqrestore(&cqr->dq->lock, flags);
3038 	}
3039 	return rc;
3040 }
3041 
3042 /*
3043  * Schedules a call to dasd_tasklet over the device tasklet.
3044  */
dasd_schedule_block_bh(struct dasd_block * block)3045 void dasd_schedule_block_bh(struct dasd_block *block)
3046 {
3047 	/* Protect against rescheduling. */
3048 	if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
3049 		return;
3050 	/* life cycle of block is bound to it's base device */
3051 	dasd_get_device(block->base);
3052 	tasklet_hi_schedule(&block->tasklet);
3053 }
3054 EXPORT_SYMBOL(dasd_schedule_block_bh);
3055 
3056 
3057 /*
3058  * SECTION: external block device operations
3059  * (request queue handling, open, release, etc.)
3060  */
3061 
3062 /*
3063  * Dasd request queue function. Called from ll_rw_blk.c
3064  */
do_dasd_request(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * qd)3065 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
3066 				    const struct blk_mq_queue_data *qd)
3067 {
3068 	struct dasd_block *block = hctx->queue->queuedata;
3069 	struct dasd_queue *dq = hctx->driver_data;
3070 	struct request *req = qd->rq;
3071 	struct dasd_device *basedev;
3072 	struct dasd_ccw_req *cqr;
3073 	blk_status_t rc = BLK_STS_OK;
3074 
3075 	basedev = block->base;
3076 	spin_lock_irq(&dq->lock);
3077 	if (basedev->state < DASD_STATE_READY ||
3078 	    test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
3079 		DBF_DEV_EVENT(DBF_ERR, basedev,
3080 			      "device not ready for request %p", req);
3081 		rc = BLK_STS_IOERR;
3082 		goto out;
3083 	}
3084 
3085 	/*
3086 	 * if device is stopped do not fetch new requests
3087 	 * except failfast is active which will let requests fail
3088 	 * immediately in __dasd_block_start_head()
3089 	 */
3090 	if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
3091 		DBF_DEV_EVENT(DBF_ERR, basedev,
3092 			      "device stopped request %p", req);
3093 		rc = BLK_STS_RESOURCE;
3094 		goto out;
3095 	}
3096 
3097 	if (basedev->features & DASD_FEATURE_READONLY &&
3098 	    rq_data_dir(req) == WRITE) {
3099 		DBF_DEV_EVENT(DBF_ERR, basedev,
3100 			      "Rejecting write request %p", req);
3101 		rc = BLK_STS_IOERR;
3102 		goto out;
3103 	}
3104 
3105 	if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
3106 	    (basedev->features & DASD_FEATURE_FAILFAST ||
3107 	     blk_noretry_request(req))) {
3108 		DBF_DEV_EVENT(DBF_ERR, basedev,
3109 			      "Rejecting failfast request %p", req);
3110 		rc = BLK_STS_IOERR;
3111 		goto out;
3112 	}
3113 
3114 	cqr = basedev->discipline->build_cp(basedev, block, req);
3115 	if (IS_ERR(cqr)) {
3116 		if (PTR_ERR(cqr) == -EBUSY ||
3117 		    PTR_ERR(cqr) == -ENOMEM ||
3118 		    PTR_ERR(cqr) == -EAGAIN) {
3119 			rc = BLK_STS_RESOURCE;
3120 			goto out;
3121 		}
3122 		DBF_DEV_EVENT(DBF_ERR, basedev,
3123 			      "CCW creation failed (rc=%ld) on request %p",
3124 			      PTR_ERR(cqr), req);
3125 		rc = BLK_STS_IOERR;
3126 		goto out;
3127 	}
3128 	/*
3129 	 *  Note: callback is set to dasd_return_cqr_cb in
3130 	 * __dasd_block_start_head to cover erp requests as well
3131 	 */
3132 	cqr->callback_data = req;
3133 	cqr->status = DASD_CQR_FILLED;
3134 	cqr->dq = dq;
3135 
3136 	blk_mq_start_request(req);
3137 	spin_lock(&block->queue_lock);
3138 	list_add_tail(&cqr->blocklist, &block->ccw_queue);
3139 	INIT_LIST_HEAD(&cqr->devlist);
3140 	dasd_profile_start(block, cqr, req);
3141 	dasd_schedule_block_bh(block);
3142 	spin_unlock(&block->queue_lock);
3143 
3144 out:
3145 	spin_unlock_irq(&dq->lock);
3146 	return rc;
3147 }
3148 
3149 /*
3150  * Block timeout callback, called from the block layer
3151  *
3152  * Return values:
3153  * BLK_EH_RESET_TIMER if the request should be left running
3154  * BLK_EH_DONE if the request is handled or terminated
3155  *		      by the driver.
3156  */
dasd_times_out(struct request * req,bool reserved)3157 enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
3158 {
3159 	struct dasd_block *block = req->q->queuedata;
3160 	struct dasd_device *device;
3161 	struct dasd_ccw_req *cqr;
3162 	unsigned long flags;
3163 	int rc = 0;
3164 
3165 	cqr = blk_mq_rq_to_pdu(req);
3166 	if (!cqr)
3167 		return BLK_EH_DONE;
3168 
3169 	spin_lock_irqsave(&cqr->dq->lock, flags);
3170 	device = cqr->startdev ? cqr->startdev : block->base;
3171 	if (!device->blk_timeout) {
3172 		spin_unlock_irqrestore(&cqr->dq->lock, flags);
3173 		return BLK_EH_RESET_TIMER;
3174 	}
3175 	DBF_DEV_EVENT(DBF_WARNING, device,
3176 		      " dasd_times_out cqr %p status %x",
3177 		      cqr, cqr->status);
3178 
3179 	spin_lock(&block->queue_lock);
3180 	spin_lock(get_ccwdev_lock(device->cdev));
3181 	cqr->retries = -1;
3182 	cqr->intrc = -ETIMEDOUT;
3183 	if (cqr->status >= DASD_CQR_QUEUED) {
3184 		rc = __dasd_cancel_req(cqr);
3185 	} else if (cqr->status == DASD_CQR_FILLED ||
3186 		   cqr->status == DASD_CQR_NEED_ERP) {
3187 		cqr->status = DASD_CQR_TERMINATED;
3188 	} else if (cqr->status == DASD_CQR_IN_ERP) {
3189 		struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
3190 
3191 		list_for_each_entry_safe(searchcqr, nextcqr,
3192 					 &block->ccw_queue, blocklist) {
3193 			tmpcqr = searchcqr;
3194 			while (tmpcqr->refers)
3195 				tmpcqr = tmpcqr->refers;
3196 			if (tmpcqr != cqr)
3197 				continue;
3198 			/* searchcqr is an ERP request for cqr */
3199 			searchcqr->retries = -1;
3200 			searchcqr->intrc = -ETIMEDOUT;
3201 			if (searchcqr->status >= DASD_CQR_QUEUED) {
3202 				rc = __dasd_cancel_req(searchcqr);
3203 			} else if ((searchcqr->status == DASD_CQR_FILLED) ||
3204 				   (searchcqr->status == DASD_CQR_NEED_ERP)) {
3205 				searchcqr->status = DASD_CQR_TERMINATED;
3206 				rc = 0;
3207 			} else if (searchcqr->status == DASD_CQR_IN_ERP) {
3208 				/*
3209 				 * Shouldn't happen; most recent ERP
3210 				 * request is at the front of queue
3211 				 */
3212 				continue;
3213 			}
3214 			break;
3215 		}
3216 	}
3217 	spin_unlock(get_ccwdev_lock(device->cdev));
3218 	dasd_schedule_block_bh(block);
3219 	spin_unlock(&block->queue_lock);
3220 	spin_unlock_irqrestore(&cqr->dq->lock, flags);
3221 
3222 	return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
3223 }
3224 
dasd_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int idx)3225 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
3226 			  unsigned int idx)
3227 {
3228 	struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
3229 
3230 	if (!dq)
3231 		return -ENOMEM;
3232 
3233 	spin_lock_init(&dq->lock);
3234 	hctx->driver_data = dq;
3235 
3236 	return 0;
3237 }
3238 
dasd_exit_hctx(struct blk_mq_hw_ctx * hctx,unsigned int idx)3239 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
3240 {
3241 	kfree(hctx->driver_data);
3242 	hctx->driver_data = NULL;
3243 }
3244 
dasd_request_done(struct request * req)3245 static void dasd_request_done(struct request *req)
3246 {
3247 	blk_mq_end_request(req, 0);
3248 	blk_mq_run_hw_queues(req->q, true);
3249 }
3250 
3251 static struct blk_mq_ops dasd_mq_ops = {
3252 	.queue_rq = do_dasd_request,
3253 	.complete = dasd_request_done,
3254 	.timeout = dasd_times_out,
3255 	.init_hctx = dasd_init_hctx,
3256 	.exit_hctx = dasd_exit_hctx,
3257 };
3258 
3259 /*
3260  * Allocate and initialize request queue and default I/O scheduler.
3261  */
dasd_alloc_queue(struct dasd_block * block)3262 static int dasd_alloc_queue(struct dasd_block *block)
3263 {
3264 	int rc;
3265 
3266 	block->tag_set.ops = &dasd_mq_ops;
3267 	block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
3268 	block->tag_set.nr_hw_queues = nr_hw_queues;
3269 	block->tag_set.queue_depth = queue_depth;
3270 	block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
3271 	block->tag_set.numa_node = NUMA_NO_NODE;
3272 
3273 	rc = blk_mq_alloc_tag_set(&block->tag_set);
3274 	if (rc)
3275 		return rc;
3276 
3277 	block->request_queue = blk_mq_init_queue(&block->tag_set);
3278 	if (IS_ERR(block->request_queue))
3279 		return PTR_ERR(block->request_queue);
3280 
3281 	block->request_queue->queuedata = block;
3282 
3283 	return 0;
3284 }
3285 
3286 /*
3287  * Deactivate and free request queue.
3288  */
dasd_free_queue(struct dasd_block * block)3289 static void dasd_free_queue(struct dasd_block *block)
3290 {
3291 	if (block->request_queue) {
3292 		blk_cleanup_queue(block->request_queue);
3293 		blk_mq_free_tag_set(&block->tag_set);
3294 		block->request_queue = NULL;
3295 	}
3296 }
3297 
dasd_open(struct block_device * bdev,fmode_t mode)3298 static int dasd_open(struct block_device *bdev, fmode_t mode)
3299 {
3300 	struct dasd_device *base;
3301 	int rc;
3302 
3303 	base = dasd_device_from_gendisk(bdev->bd_disk);
3304 	if (!base)
3305 		return -ENODEV;
3306 
3307 	atomic_inc(&base->block->open_count);
3308 	if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
3309 		rc = -ENODEV;
3310 		goto unlock;
3311 	}
3312 
3313 	if (!try_module_get(base->discipline->owner)) {
3314 		rc = -EINVAL;
3315 		goto unlock;
3316 	}
3317 
3318 	if (dasd_probeonly) {
3319 		dev_info(&base->cdev->dev,
3320 			 "Accessing the DASD failed because it is in "
3321 			 "probeonly mode\n");
3322 		rc = -EPERM;
3323 		goto out;
3324 	}
3325 
3326 	if (base->state <= DASD_STATE_BASIC) {
3327 		DBF_DEV_EVENT(DBF_ERR, base, " %s",
3328 			      " Cannot open unrecognized device");
3329 		rc = -ENODEV;
3330 		goto out;
3331 	}
3332 
3333 	if ((mode & FMODE_WRITE) &&
3334 	    (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
3335 	     (base->features & DASD_FEATURE_READONLY))) {
3336 		rc = -EROFS;
3337 		goto out;
3338 	}
3339 
3340 	dasd_put_device(base);
3341 	return 0;
3342 
3343 out:
3344 	module_put(base->discipline->owner);
3345 unlock:
3346 	atomic_dec(&base->block->open_count);
3347 	dasd_put_device(base);
3348 	return rc;
3349 }
3350 
dasd_release(struct gendisk * disk,fmode_t mode)3351 static void dasd_release(struct gendisk *disk, fmode_t mode)
3352 {
3353 	struct dasd_device *base = dasd_device_from_gendisk(disk);
3354 	if (base) {
3355 		atomic_dec(&base->block->open_count);
3356 		module_put(base->discipline->owner);
3357 		dasd_put_device(base);
3358 	}
3359 }
3360 
3361 /*
3362  * Return disk geometry.
3363  */
dasd_getgeo(struct block_device * bdev,struct hd_geometry * geo)3364 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3365 {
3366 	struct dasd_device *base;
3367 
3368 	base = dasd_device_from_gendisk(bdev->bd_disk);
3369 	if (!base)
3370 		return -ENODEV;
3371 
3372 	if (!base->discipline ||
3373 	    !base->discipline->fill_geometry) {
3374 		dasd_put_device(base);
3375 		return -EINVAL;
3376 	}
3377 	base->discipline->fill_geometry(base->block, geo);
3378 	geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
3379 	dasd_put_device(base);
3380 	return 0;
3381 }
3382 
3383 const struct block_device_operations
3384 dasd_device_operations = {
3385 	.owner		= THIS_MODULE,
3386 	.open		= dasd_open,
3387 	.release	= dasd_release,
3388 	.ioctl		= dasd_ioctl,
3389 	.compat_ioctl	= dasd_ioctl,
3390 	.getgeo		= dasd_getgeo,
3391 	.set_read_only	= dasd_set_read_only,
3392 };
3393 
3394 /*******************************************************************************
3395  * end of block device operations
3396  */
3397 
3398 static void
dasd_exit(void)3399 dasd_exit(void)
3400 {
3401 #ifdef CONFIG_PROC_FS
3402 	dasd_proc_exit();
3403 #endif
3404 	dasd_eer_exit();
3405 	kmem_cache_destroy(dasd_page_cache);
3406 	dasd_page_cache = NULL;
3407 	dasd_gendisk_exit();
3408 	dasd_devmap_exit();
3409 	if (dasd_debug_area != NULL) {
3410 		debug_unregister(dasd_debug_area);
3411 		dasd_debug_area = NULL;
3412 	}
3413 	dasd_statistics_removeroot();
3414 }
3415 
3416 /*
3417  * SECTION: common functions for ccw_driver use
3418  */
3419 
3420 /*
3421  * Is the device read-only?
3422  * Note that this function does not report the setting of the
3423  * readonly device attribute, but how it is configured in z/VM.
3424  */
dasd_device_is_ro(struct dasd_device * device)3425 int dasd_device_is_ro(struct dasd_device *device)
3426 {
3427 	struct ccw_dev_id dev_id;
3428 	struct diag210 diag_data;
3429 	int rc;
3430 
3431 	if (!MACHINE_IS_VM)
3432 		return 0;
3433 	ccw_device_get_id(device->cdev, &dev_id);
3434 	memset(&diag_data, 0, sizeof(diag_data));
3435 	diag_data.vrdcdvno = dev_id.devno;
3436 	diag_data.vrdclen = sizeof(diag_data);
3437 	rc = diag210(&diag_data);
3438 	if (rc == 0 || rc == 2) {
3439 		return diag_data.vrdcvfla & 0x80;
3440 	} else {
3441 		DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
3442 			  dev_id.devno, rc);
3443 		return 0;
3444 	}
3445 }
3446 EXPORT_SYMBOL_GPL(dasd_device_is_ro);
3447 
dasd_generic_auto_online(void * data,async_cookie_t cookie)3448 static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
3449 {
3450 	struct ccw_device *cdev = data;
3451 	int ret;
3452 
3453 	ret = ccw_device_set_online(cdev);
3454 	if (ret)
3455 		pr_warn("%s: Setting the DASD online failed with rc=%d\n",
3456 			dev_name(&cdev->dev), ret);
3457 }
3458 
3459 /*
3460  * Initial attempt at a probe function. this can be simplified once
3461  * the other detection code is gone.
3462  */
dasd_generic_probe(struct ccw_device * cdev)3463 int dasd_generic_probe(struct ccw_device *cdev)
3464 {
3465 	cdev->handler = &dasd_int_handler;
3466 
3467 	/*
3468 	 * Automatically online either all dasd devices (dasd_autodetect)
3469 	 * or all devices specified with dasd= parameters during
3470 	 * initial probe.
3471 	 */
3472 	if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
3473 	    (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
3474 		async_schedule(dasd_generic_auto_online, cdev);
3475 	return 0;
3476 }
3477 EXPORT_SYMBOL_GPL(dasd_generic_probe);
3478 
dasd_generic_free_discipline(struct dasd_device * device)3479 void dasd_generic_free_discipline(struct dasd_device *device)
3480 {
3481 	/* Forget the discipline information. */
3482 	if (device->discipline) {
3483 		if (device->discipline->uncheck_device)
3484 			device->discipline->uncheck_device(device);
3485 		module_put(device->discipline->owner);
3486 		device->discipline = NULL;
3487 	}
3488 	if (device->base_discipline) {
3489 		module_put(device->base_discipline->owner);
3490 		device->base_discipline = NULL;
3491 	}
3492 }
3493 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
3494 
3495 /*
3496  * This will one day be called from a global not_oper handler.
3497  * It is also used by driver_unregister during module unload.
3498  */
dasd_generic_remove(struct ccw_device * cdev)3499 void dasd_generic_remove(struct ccw_device *cdev)
3500 {
3501 	struct dasd_device *device;
3502 	struct dasd_block *block;
3503 
3504 	device = dasd_device_from_cdev(cdev);
3505 	if (IS_ERR(device))
3506 		return;
3507 
3508 	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3509 	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3510 		/* Already doing offline processing */
3511 		dasd_put_device(device);
3512 		return;
3513 	}
3514 	/*
3515 	 * This device is removed unconditionally. Set offline
3516 	 * flag to prevent dasd_open from opening it while it is
3517 	 * no quite down yet.
3518 	 */
3519 	dasd_set_target_state(device, DASD_STATE_NEW);
3520 	cdev->handler = NULL;
3521 	/* dasd_delete_device destroys the device reference. */
3522 	block = device->block;
3523 	dasd_delete_device(device);
3524 	/*
3525 	 * life cycle of block is bound to device, so delete it after
3526 	 * device was safely removed
3527 	 */
3528 	if (block)
3529 		dasd_free_block(block);
3530 }
3531 EXPORT_SYMBOL_GPL(dasd_generic_remove);
3532 
3533 /*
3534  * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
3535  * the device is detected for the first time and is supposed to be used
3536  * or the user has started activation through sysfs.
3537  */
dasd_generic_set_online(struct ccw_device * cdev,struct dasd_discipline * base_discipline)3538 int dasd_generic_set_online(struct ccw_device *cdev,
3539 			    struct dasd_discipline *base_discipline)
3540 {
3541 	struct dasd_discipline *discipline;
3542 	struct dasd_device *device;
3543 	int rc;
3544 
3545 	/* first online clears initial online feature flag */
3546 	dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
3547 	device = dasd_create_device(cdev);
3548 	if (IS_ERR(device))
3549 		return PTR_ERR(device);
3550 
3551 	discipline = base_discipline;
3552 	if (device->features & DASD_FEATURE_USEDIAG) {
3553 	  	if (!dasd_diag_discipline_pointer) {
3554 			/* Try to load the required module. */
3555 			rc = request_module(DASD_DIAG_MOD);
3556 			if (rc) {
3557 				pr_warn("%s Setting the DASD online failed "
3558 					"because the required module %s "
3559 					"could not be loaded (rc=%d)\n",
3560 					dev_name(&cdev->dev), DASD_DIAG_MOD,
3561 					rc);
3562 				dasd_delete_device(device);
3563 				return -ENODEV;
3564 			}
3565 		}
3566 		/* Module init could have failed, so check again here after
3567 		 * request_module(). */
3568 		if (!dasd_diag_discipline_pointer) {
3569 			pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
3570 				dev_name(&cdev->dev));
3571 			dasd_delete_device(device);
3572 			return -ENODEV;
3573 		}
3574 		discipline = dasd_diag_discipline_pointer;
3575 	}
3576 	if (!try_module_get(base_discipline->owner)) {
3577 		dasd_delete_device(device);
3578 		return -EINVAL;
3579 	}
3580 	if (!try_module_get(discipline->owner)) {
3581 		module_put(base_discipline->owner);
3582 		dasd_delete_device(device);
3583 		return -EINVAL;
3584 	}
3585 	device->base_discipline = base_discipline;
3586 	device->discipline = discipline;
3587 
3588 	/* check_device will allocate block device if necessary */
3589 	rc = discipline->check_device(device);
3590 	if (rc) {
3591 		pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
3592 			dev_name(&cdev->dev), discipline->name, rc);
3593 		module_put(discipline->owner);
3594 		module_put(base_discipline->owner);
3595 		dasd_delete_device(device);
3596 		return rc;
3597 	}
3598 
3599 	dasd_set_target_state(device, DASD_STATE_ONLINE);
3600 	if (device->state <= DASD_STATE_KNOWN) {
3601 		pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
3602 			dev_name(&cdev->dev));
3603 		rc = -ENODEV;
3604 		dasd_set_target_state(device, DASD_STATE_NEW);
3605 		if (device->block)
3606 			dasd_free_block(device->block);
3607 		dasd_delete_device(device);
3608 	} else
3609 		pr_debug("dasd_generic device %s found\n",
3610 				dev_name(&cdev->dev));
3611 
3612 	wait_event(dasd_init_waitq, _wait_for_device(device));
3613 
3614 	dasd_put_device(device);
3615 	return rc;
3616 }
3617 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
3618 
dasd_generic_set_offline(struct ccw_device * cdev)3619 int dasd_generic_set_offline(struct ccw_device *cdev)
3620 {
3621 	struct dasd_device *device;
3622 	struct dasd_block *block;
3623 	int max_count, open_count, rc;
3624 	unsigned long flags;
3625 
3626 	rc = 0;
3627 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3628 	device = dasd_device_from_cdev_locked(cdev);
3629 	if (IS_ERR(device)) {
3630 		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3631 		return PTR_ERR(device);
3632 	}
3633 
3634 	/*
3635 	 * We must make sure that this device is currently not in use.
3636 	 * The open_count is increased for every opener, that includes
3637 	 * the blkdev_get in dasd_scan_partitions. We are only interested
3638 	 * in the other openers.
3639 	 */
3640 	if (device->block) {
3641 		max_count = device->block->bdev ? 0 : -1;
3642 		open_count = atomic_read(&device->block->open_count);
3643 		if (open_count > max_count) {
3644 			if (open_count > 0)
3645 				pr_warn("%s: The DASD cannot be set offline with open count %i\n",
3646 					dev_name(&cdev->dev), open_count);
3647 			else
3648 				pr_warn("%s: The DASD cannot be set offline while it is in use\n",
3649 					dev_name(&cdev->dev));
3650 			rc = -EBUSY;
3651 			goto out_err;
3652 		}
3653 	}
3654 
3655 	/*
3656 	 * Test if the offline processing is already running and exit if so.
3657 	 * If a safe offline is being processed this could only be a normal
3658 	 * offline that should be able to overtake the safe offline and
3659 	 * cancel any I/O we do not want to wait for any longer
3660 	 */
3661 	if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3662 		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3663 			clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
3664 				  &device->flags);
3665 		} else {
3666 			rc = -EBUSY;
3667 			goto out_err;
3668 		}
3669 	}
3670 	set_bit(DASD_FLAG_OFFLINE, &device->flags);
3671 
3672 	/*
3673 	 * if safe_offline is called set safe_offline_running flag and
3674 	 * clear safe_offline so that a call to normal offline
3675 	 * can overrun safe_offline processing
3676 	 */
3677 	if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
3678 	    !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3679 		/* need to unlock here to wait for outstanding I/O */
3680 		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3681 		/*
3682 		 * If we want to set the device safe offline all IO operations
3683 		 * should be finished before continuing the offline process
3684 		 * so sync bdev first and then wait for our queues to become
3685 		 * empty
3686 		 */
3687 		if (device->block) {
3688 			rc = fsync_bdev(device->block->bdev);
3689 			if (rc != 0)
3690 				goto interrupted;
3691 		}
3692 		dasd_schedule_device_bh(device);
3693 		rc = wait_event_interruptible(shutdown_waitq,
3694 					      _wait_for_empty_queues(device));
3695 		if (rc != 0)
3696 			goto interrupted;
3697 
3698 		/*
3699 		 * check if a normal offline process overtook the offline
3700 		 * processing in this case simply do nothing beside returning
3701 		 * that we got interrupted
3702 		 * otherwise mark safe offline as not running any longer and
3703 		 * continue with normal offline
3704 		 */
3705 		spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3706 		if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3707 			rc = -ERESTARTSYS;
3708 			goto out_err;
3709 		}
3710 		clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3711 	}
3712 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3713 
3714 	dasd_set_target_state(device, DASD_STATE_NEW);
3715 	/* dasd_delete_device destroys the device reference. */
3716 	block = device->block;
3717 	dasd_delete_device(device);
3718 	/*
3719 	 * life cycle of block is bound to device, so delete it after
3720 	 * device was safely removed
3721 	 */
3722 	if (block)
3723 		dasd_free_block(block);
3724 
3725 	return 0;
3726 
3727 interrupted:
3728 	/* interrupted by signal */
3729 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3730 	clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3731 	clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3732 out_err:
3733 	dasd_put_device(device);
3734 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3735 	return rc;
3736 }
3737 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
3738 
dasd_generic_last_path_gone(struct dasd_device * device)3739 int dasd_generic_last_path_gone(struct dasd_device *device)
3740 {
3741 	struct dasd_ccw_req *cqr;
3742 
3743 	dev_warn(&device->cdev->dev, "No operational channel path is left "
3744 		 "for the device\n");
3745 	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
3746 	/* First of all call extended error reporting. */
3747 	dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3748 
3749 	if (device->state < DASD_STATE_BASIC)
3750 		return 0;
3751 	/* Device is active. We want to keep it. */
3752 	list_for_each_entry(cqr, &device->ccw_queue, devlist)
3753 		if ((cqr->status == DASD_CQR_IN_IO) ||
3754 		    (cqr->status == DASD_CQR_CLEAR_PENDING)) {
3755 			cqr->status = DASD_CQR_QUEUED;
3756 			cqr->retries++;
3757 		}
3758 	dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
3759 	dasd_device_clear_timer(device);
3760 	dasd_schedule_device_bh(device);
3761 	return 1;
3762 }
3763 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
3764 
dasd_generic_path_operational(struct dasd_device * device)3765 int dasd_generic_path_operational(struct dasd_device *device)
3766 {
3767 	dev_info(&device->cdev->dev, "A channel path to the device has become "
3768 		 "operational\n");
3769 	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
3770 	dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
3771 	dasd_schedule_device_bh(device);
3772 	if (device->block) {
3773 		dasd_schedule_block_bh(device->block);
3774 		if (device->block->request_queue)
3775 			blk_mq_run_hw_queues(device->block->request_queue,
3776 					     true);
3777 		}
3778 
3779 	if (!device->stopped)
3780 		wake_up(&generic_waitq);
3781 
3782 	return 1;
3783 }
3784 EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
3785 
dasd_generic_notify(struct ccw_device * cdev,int event)3786 int dasd_generic_notify(struct ccw_device *cdev, int event)
3787 {
3788 	struct dasd_device *device;
3789 	int ret;
3790 
3791 	device = dasd_device_from_cdev_locked(cdev);
3792 	if (IS_ERR(device))
3793 		return 0;
3794 	ret = 0;
3795 	switch (event) {
3796 	case CIO_GONE:
3797 	case CIO_BOXED:
3798 	case CIO_NO_PATH:
3799 		dasd_path_no_path(device);
3800 		ret = dasd_generic_last_path_gone(device);
3801 		break;
3802 	case CIO_OPER:
3803 		ret = 1;
3804 		if (dasd_path_get_opm(device))
3805 			ret = dasd_generic_path_operational(device);
3806 		break;
3807 	}
3808 	dasd_put_device(device);
3809 	return ret;
3810 }
3811 EXPORT_SYMBOL_GPL(dasd_generic_notify);
3812 
dasd_generic_path_event(struct ccw_device * cdev,int * path_event)3813 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3814 {
3815 	struct dasd_device *device;
3816 	int chp, oldopm, hpfpm, ifccpm;
3817 
3818 	device = dasd_device_from_cdev_locked(cdev);
3819 	if (IS_ERR(device))
3820 		return;
3821 
3822 	oldopm = dasd_path_get_opm(device);
3823 	for (chp = 0; chp < 8; chp++) {
3824 		if (path_event[chp] & PE_PATH_GONE) {
3825 			dasd_path_notoper(device, chp);
3826 		}
3827 		if (path_event[chp] & PE_PATH_AVAILABLE) {
3828 			dasd_path_available(device, chp);
3829 			dasd_schedule_device_bh(device);
3830 		}
3831 		if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
3832 			if (!dasd_path_is_operational(device, chp) &&
3833 			    !dasd_path_need_verify(device, chp)) {
3834 				/*
3835 				 * we can not establish a pathgroup on an
3836 				 * unavailable path, so trigger a path
3837 				 * verification first
3838 				 */
3839 			dasd_path_available(device, chp);
3840 			dasd_schedule_device_bh(device);
3841 			}
3842 			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3843 				      "Pathgroup re-established\n");
3844 			if (device->discipline->kick_validate)
3845 				device->discipline->kick_validate(device);
3846 		}
3847 		if (path_event[chp] & PE_PATH_FCES_EVENT) {
3848 			dasd_path_fcsec_update(device, chp);
3849 			dasd_schedule_device_bh(device);
3850 		}
3851 	}
3852 	hpfpm = dasd_path_get_hpfpm(device);
3853 	ifccpm = dasd_path_get_ifccpm(device);
3854 	if (!dasd_path_get_opm(device) && hpfpm) {
3855 		/*
3856 		 * device has no operational paths but at least one path is
3857 		 * disabled due to HPF errors
3858 		 * disable HPF at all and use the path(s) again
3859 		 */
3860 		if (device->discipline->disable_hpf)
3861 			device->discipline->disable_hpf(device);
3862 		dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
3863 		dasd_path_set_tbvpm(device, hpfpm);
3864 		dasd_schedule_device_bh(device);
3865 		dasd_schedule_requeue(device);
3866 	} else if (!dasd_path_get_opm(device) && ifccpm) {
3867 		/*
3868 		 * device has no operational paths but at least one path is
3869 		 * disabled due to IFCC errors
3870 		 * trigger path verification on paths with IFCC errors
3871 		 */
3872 		dasd_path_set_tbvpm(device, ifccpm);
3873 		dasd_schedule_device_bh(device);
3874 	}
3875 	if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
3876 		dev_warn(&device->cdev->dev,
3877 			 "No verified channel paths remain for the device\n");
3878 		DBF_DEV_EVENT(DBF_WARNING, device,
3879 			      "%s", "last verified path gone");
3880 		dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3881 		dasd_device_set_stop_bits(device,
3882 					  DASD_STOPPED_DC_WAIT);
3883 	}
3884 	dasd_put_device(device);
3885 }
3886 EXPORT_SYMBOL_GPL(dasd_generic_path_event);
3887 
dasd_generic_verify_path(struct dasd_device * device,__u8 lpm)3888 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
3889 {
3890 	if (!dasd_path_get_opm(device) && lpm) {
3891 		dasd_path_set_opm(device, lpm);
3892 		dasd_generic_path_operational(device);
3893 	} else
3894 		dasd_path_add_opm(device, lpm);
3895 	return 0;
3896 }
3897 EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
3898 
dasd_generic_space_exhaust(struct dasd_device * device,struct dasd_ccw_req * cqr)3899 void dasd_generic_space_exhaust(struct dasd_device *device,
3900 				struct dasd_ccw_req *cqr)
3901 {
3902 	dasd_eer_write(device, NULL, DASD_EER_NOSPC);
3903 
3904 	if (device->state < DASD_STATE_BASIC)
3905 		return;
3906 
3907 	if (cqr->status == DASD_CQR_IN_IO ||
3908 	    cqr->status == DASD_CQR_CLEAR_PENDING) {
3909 		cqr->status = DASD_CQR_QUEUED;
3910 		cqr->retries++;
3911 	}
3912 	dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
3913 	dasd_device_clear_timer(device);
3914 	dasd_schedule_device_bh(device);
3915 }
3916 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);
3917 
dasd_generic_space_avail(struct dasd_device * device)3918 void dasd_generic_space_avail(struct dasd_device *device)
3919 {
3920 	dev_info(&device->cdev->dev, "Extent pool space is available\n");
3921 	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");
3922 
3923 	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
3924 	dasd_schedule_device_bh(device);
3925 
3926 	if (device->block) {
3927 		dasd_schedule_block_bh(device->block);
3928 		if (device->block->request_queue)
3929 			blk_mq_run_hw_queues(device->block->request_queue, true);
3930 	}
3931 	if (!device->stopped)
3932 		wake_up(&generic_waitq);
3933 }
3934 EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
3935 
3936 /*
3937  * clear active requests and requeue them to block layer if possible
3938  */
dasd_generic_requeue_all_requests(struct dasd_device * device)3939 static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3940 {
3941 	struct dasd_block *block = device->block;
3942 	struct list_head requeue_queue;
3943 	struct dasd_ccw_req *cqr, *n;
3944 	int rc;
3945 
3946 	if (!block)
3947 		return 0;
3948 
3949 	INIT_LIST_HEAD(&requeue_queue);
3950 	rc = _dasd_requests_to_flushqueue(block, &requeue_queue);
3951 
3952 	/* Now call the callback function of flushed requests */
3953 restart_cb:
3954 	list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
3955 		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
3956 		/* Process finished ERP request. */
3957 		if (cqr->refers) {
3958 			spin_lock_bh(&block->queue_lock);
3959 			__dasd_process_erp(block->base, cqr);
3960 			spin_unlock_bh(&block->queue_lock);
3961 			/* restart list_for_xx loop since dasd_process_erp
3962 			 * might remove multiple elements
3963 			 */
3964 			goto restart_cb;
3965 		}
3966 		_dasd_requeue_request(cqr);
3967 		list_del_init(&cqr->blocklist);
3968 		cqr->block->base->discipline->free_cp(
3969 			cqr, (struct request *) cqr->callback_data);
3970 	}
3971 	dasd_schedule_device_bh(device);
3972 	return rc;
3973 }
3974 
do_requeue_requests(struct work_struct * work)3975 static void do_requeue_requests(struct work_struct *work)
3976 {
3977 	struct dasd_device *device = container_of(work, struct dasd_device,
3978 						  requeue_requests);
3979 	dasd_generic_requeue_all_requests(device);
3980 	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
3981 	if (device->block)
3982 		dasd_schedule_block_bh(device->block);
3983 	dasd_put_device(device);
3984 }
3985 
dasd_schedule_requeue(struct dasd_device * device)3986 void dasd_schedule_requeue(struct dasd_device *device)
3987 {
3988 	dasd_get_device(device);
3989 	/* queue call to dasd_reload_device to the kernel event daemon. */
3990 	if (!schedule_work(&device->requeue_requests))
3991 		dasd_put_device(device);
3992 }
3993 EXPORT_SYMBOL(dasd_schedule_requeue);
3994 
dasd_generic_build_rdc(struct dasd_device * device,int rdc_buffer_size,int magic)3995 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
3996 						   int rdc_buffer_size,
3997 						   int magic)
3998 {
3999 	struct dasd_ccw_req *cqr;
4000 	struct ccw1 *ccw;
4001 
4002 	cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
4003 				   NULL);
4004 
4005 	if (IS_ERR(cqr)) {
4006 		/* internal error 13 - Allocating the RDC request failed*/
4007 		dev_err(&device->cdev->dev,
4008 			 "An error occurred in the DASD device driver, "
4009 			 "reason=%s\n", "13");
4010 		return cqr;
4011 	}
4012 
4013 	ccw = cqr->cpaddr;
4014 	ccw->cmd_code = CCW_CMD_RDC;
4015 	ccw->cda = (__u32)(addr_t) cqr->data;
4016 	ccw->flags = 0;
4017 	ccw->count = rdc_buffer_size;
4018 	cqr->startdev = device;
4019 	cqr->memdev = device;
4020 	cqr->expires = 10*HZ;
4021 	cqr->retries = 256;
4022 	cqr->buildclk = get_tod_clock();
4023 	cqr->status = DASD_CQR_FILLED;
4024 	return cqr;
4025 }
4026 
4027 
dasd_generic_read_dev_chars(struct dasd_device * device,int magic,void * rdc_buffer,int rdc_buffer_size)4028 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
4029 				void *rdc_buffer, int rdc_buffer_size)
4030 {
4031 	int ret;
4032 	struct dasd_ccw_req *cqr;
4033 
4034 	cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
4035 	if (IS_ERR(cqr))
4036 		return PTR_ERR(cqr);
4037 
4038 	ret = dasd_sleep_on(cqr);
4039 	if (ret == 0)
4040 		memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
4041 	dasd_sfree_request(cqr, cqr->memdev);
4042 	return ret;
4043 }
4044 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
4045 
4046 /*
4047  *   In command mode and transport mode we need to look for sense
4048  *   data in different places. The sense data itself is allways
4049  *   an array of 32 bytes, so we can unify the sense data access
4050  *   for both modes.
4051  */
dasd_get_sense(struct irb * irb)4052 char *dasd_get_sense(struct irb *irb)
4053 {
4054 	struct tsb *tsb = NULL;
4055 	char *sense = NULL;
4056 
4057 	if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
4058 		if (irb->scsw.tm.tcw)
4059 			tsb = tcw_get_tsb((struct tcw *)(unsigned long)
4060 					  irb->scsw.tm.tcw);
4061 		if (tsb && tsb->length == 64 && tsb->flags)
4062 			switch (tsb->flags & 0x07) {
4063 			case 1:	/* tsa_iostat */
4064 				sense = tsb->tsa.iostat.sense;
4065 				break;
4066 			case 2: /* tsa_ddpc */
4067 				sense = tsb->tsa.ddpc.sense;
4068 				break;
4069 			default:
4070 				/* currently we don't use interrogate data */
4071 				break;
4072 			}
4073 	} else if (irb->esw.esw0.erw.cons) {
4074 		sense = irb->ecw;
4075 	}
4076 	return sense;
4077 }
4078 EXPORT_SYMBOL_GPL(dasd_get_sense);
4079 
dasd_generic_shutdown(struct ccw_device * cdev)4080 void dasd_generic_shutdown(struct ccw_device *cdev)
4081 {
4082 	struct dasd_device *device;
4083 
4084 	device = dasd_device_from_cdev(cdev);
4085 	if (IS_ERR(device))
4086 		return;
4087 
4088 	if (device->block)
4089 		dasd_schedule_block_bh(device->block);
4090 
4091 	dasd_schedule_device_bh(device);
4092 
4093 	wait_event(shutdown_waitq, _wait_for_empty_queues(device));
4094 }
4095 EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
4096 
dasd_init(void)4097 static int __init dasd_init(void)
4098 {
4099 	int rc;
4100 
4101 	init_waitqueue_head(&dasd_init_waitq);
4102 	init_waitqueue_head(&dasd_flush_wq);
4103 	init_waitqueue_head(&generic_waitq);
4104 	init_waitqueue_head(&shutdown_waitq);
4105 
4106 	/* register 'common' DASD debug area, used for all DBF_XXX calls */
4107 	dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
4108 	if (dasd_debug_area == NULL) {
4109 		rc = -ENOMEM;
4110 		goto failed;
4111 	}
4112 	debug_register_view(dasd_debug_area, &debug_sprintf_view);
4113 	debug_set_level(dasd_debug_area, DBF_WARNING);
4114 
4115 	DBF_EVENT(DBF_EMERG, "%s", "debug area created");
4116 
4117 	dasd_diag_discipline_pointer = NULL;
4118 
4119 	dasd_statistics_createroot();
4120 
4121 	rc = dasd_devmap_init();
4122 	if (rc)
4123 		goto failed;
4124 	rc = dasd_gendisk_init();
4125 	if (rc)
4126 		goto failed;
4127 	rc = dasd_parse();
4128 	if (rc)
4129 		goto failed;
4130 	rc = dasd_eer_init();
4131 	if (rc)
4132 		goto failed;
4133 #ifdef CONFIG_PROC_FS
4134 	rc = dasd_proc_init();
4135 	if (rc)
4136 		goto failed;
4137 #endif
4138 
4139 	return 0;
4140 failed:
4141 	pr_info("The DASD device driver could not be initialized\n");
4142 	dasd_exit();
4143 	return rc;
4144 }
4145 
4146 module_init(dasd_init);
4147 module_exit(dasd_exit);
4148