• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  drivers/s390/char/tape_block.c
3  *    block device frontend for tape device driver
4  *
5  *  S390 and zSeries version
6  *    Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
9  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
10  *		 Stefan Bader <shbader@de.ibm.com>
11  */
12 
13 #include <linux/fs.h>
14 #include <linux/module.h>
15 #include <linux/blkdev.h>
16 #include <linux/interrupt.h>
17 #include <linux/buffer_head.h>
18 #include <linux/kernel.h>
19 
20 #include <asm/debug.h>
21 
22 #define TAPE_DBF_AREA	tape_core_dbf
23 
24 #include "tape.h"
25 
26 #define PRINTK_HEADER "TAPE_BLOCK: "
27 
28 #define TAPEBLOCK_MAX_SEC	100
29 #define TAPEBLOCK_MIN_REQUEUE	3
30 
31 /*
32  * 2003/11/25  Stefan Bader <shbader@de.ibm.com>
33  *
34  * In 2.5/2.6 the block device request function is very likely to be called
35  * with disabled interrupts (e.g. generic_unplug_device). So the driver can't
36  * just call any function that tries to allocate CCW requests from that con-
37  * text since it might sleep. There are two choices to work around this:
38  *	a) do not allocate with kmalloc but use its own memory pool
39  *      b) take requests from the queue outside that context, knowing that
40  *         allocation might sleep
41  */
42 
43 /*
44  * file operation structure for tape block frontend
45  */
46 static int tapeblock_open(struct block_device *, fmode_t);
47 static int tapeblock_release(struct gendisk *, fmode_t);
48 static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int,
49 				unsigned long);
50 static int tapeblock_medium_changed(struct gendisk *);
51 static int tapeblock_revalidate_disk(struct gendisk *);
52 
53 static struct block_device_operations tapeblock_fops = {
54 	.owner		 = THIS_MODULE,
55 	.open		 = tapeblock_open,
56 	.release	 = tapeblock_release,
57 	.locked_ioctl           = tapeblock_ioctl,
58 	.media_changed   = tapeblock_medium_changed,
59 	.revalidate_disk = tapeblock_revalidate_disk,
60 };
61 
62 static int tapeblock_major = 0;
63 
64 static void
tapeblock_trigger_requeue(struct tape_device * device)65 tapeblock_trigger_requeue(struct tape_device *device)
66 {
67 	/* Protect against rescheduling. */
68 	if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0)
69 		return;
70 	schedule_work(&device->blk_data.requeue_task);
71 }
72 
73 /*
74  * Post finished request.
75  */
76 static void
tapeblock_end_request(struct request * req,int error)77 tapeblock_end_request(struct request *req, int error)
78 {
79 	if (blk_end_request(req, error, blk_rq_bytes(req)))
80 		BUG();
81 }
82 
83 static void
__tapeblock_end_request(struct tape_request * ccw_req,void * data)84 __tapeblock_end_request(struct tape_request *ccw_req, void *data)
85 {
86 	struct tape_device *device;
87 	struct request *req;
88 
89 	DBF_LH(6, "__tapeblock_end_request()\n");
90 
91 	device = ccw_req->device;
92 	req = (struct request *) data;
93 	tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO);
94 	if (ccw_req->rc == 0)
95 		/* Update position. */
96 		device->blk_data.block_position =
97 			(req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B;
98 	else
99 		/* We lost the position information due to an error. */
100 		device->blk_data.block_position = -1;
101 	device->discipline->free_bread(ccw_req);
102 	if (!list_empty(&device->req_queue) ||
103 	    elv_next_request(device->blk_data.request_queue))
104 		tapeblock_trigger_requeue(device);
105 }
106 
107 /*
108  * Feed the tape device CCW queue with requests supplied in a list.
109  */
110 static int
tapeblock_start_request(struct tape_device * device,struct request * req)111 tapeblock_start_request(struct tape_device *device, struct request *req)
112 {
113 	struct tape_request *	ccw_req;
114 	int			rc;
115 
116 	DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req);
117 
118 	ccw_req = device->discipline->bread(device, req);
119 	if (IS_ERR(ccw_req)) {
120 		DBF_EVENT(1, "TBLOCK: bread failed\n");
121 		tapeblock_end_request(req, -EIO);
122 		return PTR_ERR(ccw_req);
123 	}
124 	ccw_req->callback = __tapeblock_end_request;
125 	ccw_req->callback_data = (void *) req;
126 	ccw_req->retries = TAPEBLOCK_RETRIES;
127 
128 	rc = tape_do_io_async(device, ccw_req);
129 	if (rc) {
130 		/*
131 		 * Start/enqueueing failed. No retries in
132 		 * this case.
133 		 */
134 		tapeblock_end_request(req, -EIO);
135 		device->discipline->free_bread(ccw_req);
136 	}
137 
138 	return rc;
139 }
140 
141 /*
142  * Move requests from the block device request queue to the tape device ccw
143  * queue.
144  */
145 static void
tapeblock_requeue(struct work_struct * work)146 tapeblock_requeue(struct work_struct *work) {
147 	struct tape_blk_data *	blkdat;
148 	struct tape_device *	device;
149 	struct request_queue *	queue;
150 	int			nr_queued;
151 	struct request *	req;
152 	struct list_head *	l;
153 	int			rc;
154 
155 	blkdat = container_of(work, struct tape_blk_data, requeue_task);
156 	device = blkdat->device;
157 	if (!device)
158 		return;
159 
160 	spin_lock_irq(get_ccwdev_lock(device->cdev));
161 	queue  = device->blk_data.request_queue;
162 
163 	/* Count number of requests on ccw queue. */
164 	nr_queued = 0;
165 	list_for_each(l, &device->req_queue)
166 		nr_queued++;
167 	spin_unlock(get_ccwdev_lock(device->cdev));
168 
169 	spin_lock_irq(&device->blk_data.request_queue_lock);
170 	while (
171 		!blk_queue_plugged(queue) &&
172 		elv_next_request(queue)   &&
173 		nr_queued < TAPEBLOCK_MIN_REQUEUE
174 	) {
175 		req = elv_next_request(queue);
176 		if (rq_data_dir(req) == WRITE) {
177 			DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
178 			blkdev_dequeue_request(req);
179 			spin_unlock_irq(&device->blk_data.request_queue_lock);
180 			tapeblock_end_request(req, -EIO);
181 			spin_lock_irq(&device->blk_data.request_queue_lock);
182 			continue;
183 		}
184 		blkdev_dequeue_request(req);
185 		nr_queued++;
186 		spin_unlock_irq(&device->blk_data.request_queue_lock);
187 		rc = tapeblock_start_request(device, req);
188 		spin_lock_irq(&device->blk_data.request_queue_lock);
189 	}
190 	spin_unlock_irq(&device->blk_data.request_queue_lock);
191 	atomic_set(&device->blk_data.requeue_scheduled, 0);
192 }
193 
194 /*
195  * Tape request queue function. Called from ll_rw_blk.c
196  */
197 static void
tapeblock_request_fn(struct request_queue * queue)198 tapeblock_request_fn(struct request_queue *queue)
199 {
200 	struct tape_device *device;
201 
202 	device = (struct tape_device *) queue->queuedata;
203 	DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
204 	BUG_ON(device == NULL);
205 	tapeblock_trigger_requeue(device);
206 }
207 
208 /*
209  * This function is called for every new tapedevice
210  */
211 int
tapeblock_setup_device(struct tape_device * device)212 tapeblock_setup_device(struct tape_device * device)
213 {
214 	struct tape_blk_data *	blkdat;
215 	struct gendisk *	disk;
216 	int			rc;
217 
218 	blkdat = &device->blk_data;
219 	blkdat->device = device;
220 	spin_lock_init(&blkdat->request_queue_lock);
221 	atomic_set(&blkdat->requeue_scheduled, 0);
222 
223 	blkdat->request_queue = blk_init_queue(
224 		tapeblock_request_fn,
225 		&blkdat->request_queue_lock
226 	);
227 	if (!blkdat->request_queue)
228 		return -ENOMEM;
229 
230 	elevator_exit(blkdat->request_queue->elevator);
231 	rc = elevator_init(blkdat->request_queue, "noop");
232 	if (rc)
233 		goto cleanup_queue;
234 
235 	blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
236 	blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
237 	blk_queue_max_phys_segments(blkdat->request_queue, -1L);
238 	blk_queue_max_hw_segments(blkdat->request_queue, -1L);
239 	blk_queue_max_segment_size(blkdat->request_queue, -1L);
240 	blk_queue_segment_boundary(blkdat->request_queue, -1L);
241 
242 	disk = alloc_disk(1);
243 	if (!disk) {
244 		rc = -ENOMEM;
245 		goto cleanup_queue;
246 	}
247 
248 	disk->major = tapeblock_major;
249 	disk->first_minor = device->first_minor;
250 	disk->fops = &tapeblock_fops;
251 	disk->private_data = tape_get_device_reference(device);
252 	disk->queue = blkdat->request_queue;
253 	set_capacity(disk, 0);
254 	sprintf(disk->disk_name, "btibm%d",
255 		device->first_minor / TAPE_MINORS_PER_DEV);
256 
257 	blkdat->disk = disk;
258 	blkdat->medium_changed = 1;
259 	blkdat->request_queue->queuedata = tape_get_device_reference(device);
260 
261 	add_disk(disk);
262 
263 	tape_get_device_reference(device);
264 	INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
265 
266 	return 0;
267 
268 cleanup_queue:
269 	blk_cleanup_queue(blkdat->request_queue);
270 	blkdat->request_queue = NULL;
271 
272 	return rc;
273 }
274 
275 void
tapeblock_cleanup_device(struct tape_device * device)276 tapeblock_cleanup_device(struct tape_device *device)
277 {
278 	flush_scheduled_work();
279 	tape_put_device(device);
280 
281 	if (!device->blk_data.disk) {
282 		PRINT_ERR("(%s): No gendisk to clean up!\n",
283 			dev_name(&device->cdev->dev));
284 		goto cleanup_queue;
285 	}
286 
287 	del_gendisk(device->blk_data.disk);
288 	device->blk_data.disk->private_data =
289 		tape_put_device(device->blk_data.disk->private_data);
290 	put_disk(device->blk_data.disk);
291 
292 	device->blk_data.disk = NULL;
293 cleanup_queue:
294 	device->blk_data.request_queue->queuedata = tape_put_device(device);
295 
296 	blk_cleanup_queue(device->blk_data.request_queue);
297 	device->blk_data.request_queue = NULL;
298 }
299 
300 /*
301  * Detect number of blocks of the tape.
302  * FIXME: can we extent this to detect the blocks size as well ?
303  */
304 static int
tapeblock_revalidate_disk(struct gendisk * disk)305 tapeblock_revalidate_disk(struct gendisk *disk)
306 {
307 	struct tape_device *	device;
308 	unsigned int		nr_of_blks;
309 	int			rc;
310 
311 	device = (struct tape_device *) disk->private_data;
312 	BUG_ON(!device);
313 
314 	if (!device->blk_data.medium_changed)
315 		return 0;
316 
317 	PRINT_INFO("Detecting media size...\n");
318 	rc = tape_mtop(device, MTFSFM, 1);
319 	if (rc)
320 		return rc;
321 
322 	rc = tape_mtop(device, MTTELL, 1);
323 	if (rc < 0)
324 		return rc;
325 
326 	DBF_LH(3, "Image file ends at %d\n", rc);
327 	nr_of_blks = rc;
328 
329 	/* This will fail for the first file. Catch the error by checking the
330 	 * position. */
331 	tape_mtop(device, MTBSF, 1);
332 
333 	rc = tape_mtop(device, MTTELL, 1);
334 	if (rc < 0)
335 		return rc;
336 
337 	if (rc > nr_of_blks)
338 		return -EINVAL;
339 
340 	DBF_LH(3, "Image file starts at %d\n", rc);
341 	device->bof = rc;
342 	nr_of_blks -= rc;
343 
344 	PRINT_INFO("Found %i blocks on media\n", nr_of_blks);
345 	set_capacity(device->blk_data.disk,
346 		nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
347 
348 	device->blk_data.block_position = 0;
349 	device->blk_data.medium_changed = 0;
350 	return 0;
351 }
352 
353 static int
tapeblock_medium_changed(struct gendisk * disk)354 tapeblock_medium_changed(struct gendisk *disk)
355 {
356 	struct tape_device *device;
357 
358 	device = (struct tape_device *) disk->private_data;
359 	DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
360 		device, device->blk_data.medium_changed);
361 
362 	return device->blk_data.medium_changed;
363 }
364 
365 /*
366  * Block frontend tape device open function.
367  */
368 static int
tapeblock_open(struct block_device * bdev,fmode_t mode)369 tapeblock_open(struct block_device *bdev, fmode_t mode)
370 {
371 	struct gendisk *	disk = bdev->bd_disk;
372 	struct tape_device *	device;
373 	int			rc;
374 
375 	device = tape_get_device_reference(disk->private_data);
376 
377 	if (device->required_tapemarks) {
378 		DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
379 		PRINT_ERR("TBLOCK: Refusing to open tape with missing"
380 			" end of file marks.\n");
381 		rc = -EPERM;
382 		goto put_device;
383 	}
384 
385 	rc = tape_open(device);
386 	if (rc)
387 		goto put_device;
388 
389 	rc = tapeblock_revalidate_disk(disk);
390 	if (rc)
391 		goto release;
392 
393 	/*
394 	 * Note: The reference to <device> is hold until the release function
395 	 *       is called.
396 	 */
397 	tape_state_set(device, TS_BLKUSE);
398 	return 0;
399 
400 release:
401 	tape_release(device);
402  put_device:
403 	tape_put_device(device);
404 	return rc;
405 }
406 
407 /*
408  * Block frontend tape device release function.
409  *
410  * Note: One reference to the tape device was made by the open function. So
411  *       we just get the pointer here and release the reference.
412  */
413 static int
tapeblock_release(struct gendisk * disk,fmode_t mode)414 tapeblock_release(struct gendisk *disk, fmode_t mode)
415 {
416 	struct tape_device *device = disk->private_data;
417 
418 	tape_state_set(device, TS_IN_USE);
419 	tape_release(device);
420 	tape_put_device(device);
421 
422 	return 0;
423 }
424 
425 /*
426  * Support of some generic block device IOCTLs.
427  */
428 static int
tapeblock_ioctl(struct block_device * bdev,fmode_t mode,unsigned int command,unsigned long arg)429 tapeblock_ioctl(
430 	struct block_device *	bdev,
431 	fmode_t			mode,
432 	unsigned int		command,
433 	unsigned long		arg
434 ) {
435 	int rc;
436 	int minor;
437 	struct gendisk *disk = bdev->bd_disk;
438 	struct tape_device *device;
439 
440 	rc     = 0;
441 	BUG_ON(!disk);
442 	device = disk->private_data;
443 	BUG_ON(!device);
444 	minor  = MINOR(bdev->bd_dev);
445 
446 	DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
447 	DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
448 
449 	switch (command) {
450 		/* Refuse some IOCTL calls without complaining (mount). */
451 		case 0x5310:		/* CDROMMULTISESSION */
452 			rc = -EINVAL;
453 			break;
454 		default:
455 			PRINT_WARN("invalid ioctl 0x%x\n", command);
456 			rc = -EINVAL;
457 	}
458 
459 	return rc;
460 }
461 
462 /*
463  * Initialize block device frontend.
464  */
465 int
tapeblock_init(void)466 tapeblock_init(void)
467 {
468 	int rc;
469 
470 	/* Register the tape major number to the kernel */
471 	rc = register_blkdev(tapeblock_major, "tBLK");
472 	if (rc < 0)
473 		return rc;
474 
475 	if (tapeblock_major == 0)
476 		tapeblock_major = rc;
477 	PRINT_INFO("tape gets major %d for block device\n", tapeblock_major);
478 	return 0;
479 }
480 
481 /*
482  * Deregister major for block device frontend
483  */
484 void
tapeblock_exit(void)485 tapeblock_exit(void)
486 {
487 	unregister_blkdev(tapeblock_major, "tBLK");
488 }
489