1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11 */
12
13 #define KMSG_COMPONENT "dasd-eckd"
14
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h> /* HDIO_GETGEO */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/compat.h>
22 #include <linux/init.h>
23 #include <linux/seq_file.h>
24
25 #include <asm/css_chars.h>
26 #include <asm/debug.h>
27 #include <asm/idals.h>
28 #include <asm/ebcdic.h>
29 #include <asm/io.h>
30 #include <linux/uaccess.h>
31 #include <asm/cio.h>
32 #include <asm/ccwdev.h>
33 #include <asm/itcw.h>
34 #include <asm/schid.h>
35 #include <asm/chpid.h>
36
37 #include "dasd_int.h"
38 #include "dasd_eckd.h"
39
40 #ifdef PRINTK_HEADER
41 #undef PRINTK_HEADER
42 #endif /* PRINTK_HEADER */
43 #define PRINTK_HEADER "dasd(eckd):"
44
45 /*
46 * raw track access always map to 64k in memory
47 * so it maps to 16 blocks of 4k per track
48 */
49 #define DASD_RAW_BLOCK_PER_TRACK 16
50 #define DASD_RAW_BLOCKSIZE 4096
51 /* 64k are 128 x 512 byte sectors */
52 #define DASD_RAW_SECTORS_PER_TRACK 128
53
54 MODULE_LICENSE("GPL");
55
56 static struct dasd_discipline dasd_eckd_discipline;
57
58 /* The ccw bus type uses this table to find devices that it sends to
59 * dasd_eckd_probe */
60 static struct ccw_device_id dasd_eckd_ids[] = {
61 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
62 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
63 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
64 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
65 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
66 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
67 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
68 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
69 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
70 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
71 { /* end of list */ },
72 };
73
74 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
75
76 static struct ccw_driver dasd_eckd_driver; /* see below */
77
78 static void *rawpadpage;
79
80 #define INIT_CQR_OK 0
81 #define INIT_CQR_UNFORMATTED 1
82 #define INIT_CQR_ERROR 2
83
84 /* emergency request for reserve/release */
85 static struct {
86 struct dasd_ccw_req cqr;
87 struct ccw1 ccw;
88 char data[32];
89 } *dasd_reserve_req;
90 static DEFINE_MUTEX(dasd_reserve_mutex);
91
92 static struct {
93 struct dasd_ccw_req cqr;
94 struct ccw1 ccw[2];
95 char data[40];
96 } *dasd_vol_info_req;
97 static DEFINE_MUTEX(dasd_vol_info_mutex);
98
99 struct ext_pool_exhaust_work_data {
100 struct work_struct worker;
101 struct dasd_device *device;
102 struct dasd_device *base;
103 };
104
105 /* definitions for the path verification worker */
106 struct pe_handler_work_data {
107 struct work_struct worker;
108 struct dasd_device *device;
109 struct dasd_ccw_req cqr;
110 struct ccw1 ccw;
111 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
112 int isglobal;
113 __u8 tbvpm;
114 __u8 fcsecpm;
115 };
116 static struct pe_handler_work_data *pe_handler_worker;
117 static DEFINE_MUTEX(dasd_pe_handler_mutex);
118
119 struct check_attention_work_data {
120 struct work_struct worker;
121 struct dasd_device *device;
122 __u8 lpum;
123 };
124
125 static int dasd_eckd_ext_pool_id(struct dasd_device *);
126 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
127 struct dasd_device *, struct dasd_device *,
128 unsigned int, int, unsigned int, unsigned int,
129 unsigned int, unsigned int);
130
131 /* initial attempt at a probe function. this can be simplified once
132 * the other detection code is gone */
133 static int
dasd_eckd_probe(struct ccw_device * cdev)134 dasd_eckd_probe (struct ccw_device *cdev)
135 {
136 int ret;
137
138 /* set ECKD specific ccw-device options */
139 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
140 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
141 if (ret) {
142 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
143 "dasd_eckd_probe: could not set "
144 "ccw-device options");
145 return ret;
146 }
147 ret = dasd_generic_probe(cdev);
148 return ret;
149 }
150
151 static int
dasd_eckd_set_online(struct ccw_device * cdev)152 dasd_eckd_set_online(struct ccw_device *cdev)
153 {
154 return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
155 }
156
157 static const int sizes_trk0[] = { 28, 148, 84 };
158 #define LABEL_SIZE 140
159
160 /* head and record addresses of count_area read in analysis ccw */
161 static const int count_area_head[] = { 0, 0, 0, 0, 1 };
162 static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
163
164 static inline unsigned int
ceil_quot(unsigned int d1,unsigned int d2)165 ceil_quot(unsigned int d1, unsigned int d2)
166 {
167 return (d1 + (d2 - 1)) / d2;
168 }
169
170 static unsigned int
recs_per_track(struct dasd_eckd_characteristics * rdc,unsigned int kl,unsigned int dl)171 recs_per_track(struct dasd_eckd_characteristics * rdc,
172 unsigned int kl, unsigned int dl)
173 {
174 int dn, kn;
175
176 switch (rdc->dev_type) {
177 case 0x3380:
178 if (kl)
179 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
180 ceil_quot(dl + 12, 32));
181 else
182 return 1499 / (15 + ceil_quot(dl + 12, 32));
183 case 0x3390:
184 dn = ceil_quot(dl + 6, 232) + 1;
185 if (kl) {
186 kn = ceil_quot(kl + 6, 232) + 1;
187 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
188 9 + ceil_quot(dl + 6 * dn, 34));
189 } else
190 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
191 case 0x9345:
192 dn = ceil_quot(dl + 6, 232) + 1;
193 if (kl) {
194 kn = ceil_quot(kl + 6, 232) + 1;
195 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
196 ceil_quot(dl + 6 * dn, 34));
197 } else
198 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
199 }
200 return 0;
201 }
202
set_ch_t(struct ch_t * geo,__u32 cyl,__u8 head)203 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
204 {
205 geo->cyl = (__u16) cyl;
206 geo->head = cyl >> 16;
207 geo->head <<= 4;
208 geo->head |= head;
209 }
210
211 /*
212 * calculate failing track from sense data depending if
213 * it is an EAV device or not
214 */
dasd_eckd_track_from_irb(struct irb * irb,struct dasd_device * device,sector_t * track)215 static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
216 sector_t *track)
217 {
218 struct dasd_eckd_private *private = device->private;
219 u8 *sense = NULL;
220 u32 cyl;
221 u8 head;
222
223 sense = dasd_get_sense(irb);
224 if (!sense) {
225 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
226 "ESE error no sense data\n");
227 return -EINVAL;
228 }
229 if (!(sense[27] & DASD_SENSE_BIT_2)) {
230 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
231 "ESE error no valid track data\n");
232 return -EINVAL;
233 }
234
235 if (sense[27] & DASD_SENSE_BIT_3) {
236 /* enhanced addressing */
237 cyl = sense[30] << 20;
238 cyl |= (sense[31] & 0xF0) << 12;
239 cyl |= sense[28] << 8;
240 cyl |= sense[29];
241 } else {
242 cyl = sense[29] << 8;
243 cyl |= sense[30];
244 }
245 head = sense[31] & 0x0F;
246 *track = cyl * private->rdc_data.trk_per_cyl + head;
247 return 0;
248 }
249
set_timestamp(struct ccw1 * ccw,struct DE_eckd_data * data,struct dasd_device * device)250 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
251 struct dasd_device *device)
252 {
253 struct dasd_eckd_private *private = device->private;
254 int rc;
255
256 rc = get_phys_clock(&data->ep_sys_time);
257 /*
258 * Ignore return code if XRC is not supported or
259 * sync clock is switched off
260 */
261 if ((rc && !private->rdc_data.facilities.XRC_supported) ||
262 rc == -EOPNOTSUPP || rc == -EACCES)
263 return 0;
264
265 /* switch on System Time Stamp - needed for XRC Support */
266 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
267 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
268
269 if (ccw) {
270 ccw->count = sizeof(struct DE_eckd_data);
271 ccw->flags |= CCW_FLAG_SLI;
272 }
273
274 return rc;
275 }
276
277 static int
define_extent(struct ccw1 * ccw,struct DE_eckd_data * data,unsigned int trk,unsigned int totrk,int cmd,struct dasd_device * device,int blksize)278 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
279 unsigned int totrk, int cmd, struct dasd_device *device,
280 int blksize)
281 {
282 struct dasd_eckd_private *private = device->private;
283 u16 heads, beghead, endhead;
284 u32 begcyl, endcyl;
285 int rc = 0;
286
287 if (ccw) {
288 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
289 ccw->flags = 0;
290 ccw->count = 16;
291 ccw->cda = (__u32)__pa(data);
292 }
293
294 memset(data, 0, sizeof(struct DE_eckd_data));
295 switch (cmd) {
296 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
297 case DASD_ECKD_CCW_READ_RECORD_ZERO:
298 case DASD_ECKD_CCW_READ:
299 case DASD_ECKD_CCW_READ_MT:
300 case DASD_ECKD_CCW_READ_CKD:
301 case DASD_ECKD_CCW_READ_CKD_MT:
302 case DASD_ECKD_CCW_READ_KD:
303 case DASD_ECKD_CCW_READ_KD_MT:
304 data->mask.perm = 0x1;
305 data->attributes.operation = private->attrib.operation;
306 break;
307 case DASD_ECKD_CCW_READ_COUNT:
308 data->mask.perm = 0x1;
309 data->attributes.operation = DASD_BYPASS_CACHE;
310 break;
311 case DASD_ECKD_CCW_READ_TRACK:
312 case DASD_ECKD_CCW_READ_TRACK_DATA:
313 data->mask.perm = 0x1;
314 data->attributes.operation = private->attrib.operation;
315 data->blk_size = 0;
316 break;
317 case DASD_ECKD_CCW_WRITE:
318 case DASD_ECKD_CCW_WRITE_MT:
319 case DASD_ECKD_CCW_WRITE_KD:
320 case DASD_ECKD_CCW_WRITE_KD_MT:
321 data->mask.perm = 0x02;
322 data->attributes.operation = private->attrib.operation;
323 rc = set_timestamp(ccw, data, device);
324 break;
325 case DASD_ECKD_CCW_WRITE_CKD:
326 case DASD_ECKD_CCW_WRITE_CKD_MT:
327 data->attributes.operation = DASD_BYPASS_CACHE;
328 rc = set_timestamp(ccw, data, device);
329 break;
330 case DASD_ECKD_CCW_ERASE:
331 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
332 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
333 data->mask.perm = 0x3;
334 data->mask.auth = 0x1;
335 data->attributes.operation = DASD_BYPASS_CACHE;
336 rc = set_timestamp(ccw, data, device);
337 break;
338 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
339 data->mask.perm = 0x03;
340 data->attributes.operation = private->attrib.operation;
341 data->blk_size = 0;
342 break;
343 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
344 data->mask.perm = 0x02;
345 data->attributes.operation = private->attrib.operation;
346 data->blk_size = blksize;
347 rc = set_timestamp(ccw, data, device);
348 break;
349 default:
350 dev_err(&device->cdev->dev,
351 "0x%x is not a known command\n", cmd);
352 break;
353 }
354
355 data->attributes.mode = 0x3; /* ECKD */
356
357 if ((private->rdc_data.cu_type == 0x2105 ||
358 private->rdc_data.cu_type == 0x2107 ||
359 private->rdc_data.cu_type == 0x1750)
360 && !(private->uses_cdl && trk < 2))
361 data->ga_extended |= 0x40; /* Regular Data Format Mode */
362
363 heads = private->rdc_data.trk_per_cyl;
364 begcyl = trk / heads;
365 beghead = trk % heads;
366 endcyl = totrk / heads;
367 endhead = totrk % heads;
368
369 /* check for sequential prestage - enhance cylinder range */
370 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
371 data->attributes.operation == DASD_SEQ_ACCESS) {
372
373 if (endcyl + private->attrib.nr_cyl < private->real_cyl)
374 endcyl += private->attrib.nr_cyl;
375 else
376 endcyl = (private->real_cyl - 1);
377 }
378
379 set_ch_t(&data->beg_ext, begcyl, beghead);
380 set_ch_t(&data->end_ext, endcyl, endhead);
381 return rc;
382 }
383
384
locate_record_ext(struct ccw1 * ccw,struct LRE_eckd_data * data,unsigned int trk,unsigned int rec_on_trk,int count,int cmd,struct dasd_device * device,unsigned int reclen,unsigned int tlf)385 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
386 unsigned int trk, unsigned int rec_on_trk,
387 int count, int cmd, struct dasd_device *device,
388 unsigned int reclen, unsigned int tlf)
389 {
390 struct dasd_eckd_private *private = device->private;
391 int sector;
392 int dn, d;
393
394 if (ccw) {
395 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
396 ccw->flags = 0;
397 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
398 ccw->count = 22;
399 else
400 ccw->count = 20;
401 ccw->cda = (__u32)__pa(data);
402 }
403
404 memset(data, 0, sizeof(*data));
405 sector = 0;
406 if (rec_on_trk) {
407 switch (private->rdc_data.dev_type) {
408 case 0x3390:
409 dn = ceil_quot(reclen + 6, 232);
410 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
411 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
412 break;
413 case 0x3380:
414 d = 7 + ceil_quot(reclen + 12, 32);
415 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
416 break;
417 }
418 }
419 data->sector = sector;
420 /* note: meaning of count depends on the operation
421 * for record based I/O it's the number of records, but for
422 * track based I/O it's the number of tracks
423 */
424 data->count = count;
425 switch (cmd) {
426 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
427 data->operation.orientation = 0x3;
428 data->operation.operation = 0x03;
429 break;
430 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
431 data->operation.orientation = 0x3;
432 data->operation.operation = 0x16;
433 break;
434 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
435 data->operation.orientation = 0x1;
436 data->operation.operation = 0x03;
437 data->count++;
438 break;
439 case DASD_ECKD_CCW_READ_RECORD_ZERO:
440 data->operation.orientation = 0x3;
441 data->operation.operation = 0x16;
442 data->count++;
443 break;
444 case DASD_ECKD_CCW_WRITE:
445 case DASD_ECKD_CCW_WRITE_MT:
446 case DASD_ECKD_CCW_WRITE_KD:
447 case DASD_ECKD_CCW_WRITE_KD_MT:
448 data->auxiliary.length_valid = 0x1;
449 data->length = reclen;
450 data->operation.operation = 0x01;
451 break;
452 case DASD_ECKD_CCW_WRITE_CKD:
453 case DASD_ECKD_CCW_WRITE_CKD_MT:
454 data->auxiliary.length_valid = 0x1;
455 data->length = reclen;
456 data->operation.operation = 0x03;
457 break;
458 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
459 data->operation.orientation = 0x0;
460 data->operation.operation = 0x3F;
461 data->extended_operation = 0x11;
462 data->length = 0;
463 data->extended_parameter_length = 0x02;
464 if (data->count > 8) {
465 data->extended_parameter[0] = 0xFF;
466 data->extended_parameter[1] = 0xFF;
467 data->extended_parameter[1] <<= (16 - count);
468 } else {
469 data->extended_parameter[0] = 0xFF;
470 data->extended_parameter[0] <<= (8 - count);
471 data->extended_parameter[1] = 0x00;
472 }
473 data->sector = 0xFF;
474 break;
475 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
476 data->auxiliary.length_valid = 0x1;
477 data->length = reclen; /* not tlf, as one might think */
478 data->operation.operation = 0x3F;
479 data->extended_operation = 0x23;
480 break;
481 case DASD_ECKD_CCW_READ:
482 case DASD_ECKD_CCW_READ_MT:
483 case DASD_ECKD_CCW_READ_KD:
484 case DASD_ECKD_CCW_READ_KD_MT:
485 data->auxiliary.length_valid = 0x1;
486 data->length = reclen;
487 data->operation.operation = 0x06;
488 break;
489 case DASD_ECKD_CCW_READ_CKD:
490 case DASD_ECKD_CCW_READ_CKD_MT:
491 data->auxiliary.length_valid = 0x1;
492 data->length = reclen;
493 data->operation.operation = 0x16;
494 break;
495 case DASD_ECKD_CCW_READ_COUNT:
496 data->operation.operation = 0x06;
497 break;
498 case DASD_ECKD_CCW_READ_TRACK:
499 data->operation.orientation = 0x1;
500 data->operation.operation = 0x0C;
501 data->extended_parameter_length = 0;
502 data->sector = 0xFF;
503 break;
504 case DASD_ECKD_CCW_READ_TRACK_DATA:
505 data->auxiliary.length_valid = 0x1;
506 data->length = tlf;
507 data->operation.operation = 0x0C;
508 break;
509 case DASD_ECKD_CCW_ERASE:
510 data->length = reclen;
511 data->auxiliary.length_valid = 0x1;
512 data->operation.operation = 0x0b;
513 break;
514 default:
515 DBF_DEV_EVENT(DBF_ERR, device,
516 "fill LRE unknown opcode 0x%x", cmd);
517 BUG();
518 }
519 set_ch_t(&data->seek_addr,
520 trk / private->rdc_data.trk_per_cyl,
521 trk % private->rdc_data.trk_per_cyl);
522 data->search_arg.cyl = data->seek_addr.cyl;
523 data->search_arg.head = data->seek_addr.head;
524 data->search_arg.record = rec_on_trk;
525 }
526
prefix_LRE(struct ccw1 * ccw,struct PFX_eckd_data * pfxdata,unsigned int trk,unsigned int totrk,int cmd,struct dasd_device * basedev,struct dasd_device * startdev,unsigned int format,unsigned int rec_on_trk,int count,unsigned int blksize,unsigned int tlf)527 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
528 unsigned int trk, unsigned int totrk, int cmd,
529 struct dasd_device *basedev, struct dasd_device *startdev,
530 unsigned int format, unsigned int rec_on_trk, int count,
531 unsigned int blksize, unsigned int tlf)
532 {
533 struct dasd_eckd_private *basepriv, *startpriv;
534 struct LRE_eckd_data *lredata;
535 struct DE_eckd_data *dedata;
536 int rc = 0;
537
538 basepriv = basedev->private;
539 startpriv = startdev->private;
540 dedata = &pfxdata->define_extent;
541 lredata = &pfxdata->locate_record;
542
543 ccw->cmd_code = DASD_ECKD_CCW_PFX;
544 ccw->flags = 0;
545 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
546 ccw->count = sizeof(*pfxdata) + 2;
547 ccw->cda = (__u32) __pa(pfxdata);
548 memset(pfxdata, 0, sizeof(*pfxdata) + 2);
549 } else {
550 ccw->count = sizeof(*pfxdata);
551 ccw->cda = (__u32) __pa(pfxdata);
552 memset(pfxdata, 0, sizeof(*pfxdata));
553 }
554
555 /* prefix data */
556 if (format > 1) {
557 DBF_DEV_EVENT(DBF_ERR, basedev,
558 "PFX LRE unknown format 0x%x", format);
559 BUG();
560 return -EINVAL;
561 }
562 pfxdata->format = format;
563 pfxdata->base_address = basepriv->ned->unit_addr;
564 pfxdata->base_lss = basepriv->ned->ID;
565 pfxdata->validity.define_extent = 1;
566
567 /* private uid is kept up to date, conf_data may be outdated */
568 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
569 pfxdata->validity.verify_base = 1;
570
571 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
572 pfxdata->validity.verify_base = 1;
573 pfxdata->validity.hyper_pav = 1;
574 }
575
576 rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
577
578 /*
579 * For some commands the System Time Stamp is set in the define extent
580 * data when XRC is supported. The validity of the time stamp must be
581 * reflected in the prefix data as well.
582 */
583 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
584 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
585
586 if (format == 1) {
587 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
588 basedev, blksize, tlf);
589 }
590
591 return rc;
592 }
593
prefix(struct ccw1 * ccw,struct PFX_eckd_data * pfxdata,unsigned int trk,unsigned int totrk,int cmd,struct dasd_device * basedev,struct dasd_device * startdev)594 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
595 unsigned int trk, unsigned int totrk, int cmd,
596 struct dasd_device *basedev, struct dasd_device *startdev)
597 {
598 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
599 0, 0, 0, 0, 0);
600 }
601
602 static void
locate_record(struct ccw1 * ccw,struct LO_eckd_data * data,unsigned int trk,unsigned int rec_on_trk,int no_rec,int cmd,struct dasd_device * device,int reclen)603 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
604 unsigned int rec_on_trk, int no_rec, int cmd,
605 struct dasd_device * device, int reclen)
606 {
607 struct dasd_eckd_private *private = device->private;
608 int sector;
609 int dn, d;
610
611 DBF_DEV_EVENT(DBF_INFO, device,
612 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
613 trk, rec_on_trk, no_rec, cmd, reclen);
614
615 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
616 ccw->flags = 0;
617 ccw->count = 16;
618 ccw->cda = (__u32) __pa(data);
619
620 memset(data, 0, sizeof(struct LO_eckd_data));
621 sector = 0;
622 if (rec_on_trk) {
623 switch (private->rdc_data.dev_type) {
624 case 0x3390:
625 dn = ceil_quot(reclen + 6, 232);
626 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
627 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
628 break;
629 case 0x3380:
630 d = 7 + ceil_quot(reclen + 12, 32);
631 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
632 break;
633 }
634 }
635 data->sector = sector;
636 data->count = no_rec;
637 switch (cmd) {
638 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
639 data->operation.orientation = 0x3;
640 data->operation.operation = 0x03;
641 break;
642 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
643 data->operation.orientation = 0x3;
644 data->operation.operation = 0x16;
645 break;
646 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
647 data->operation.orientation = 0x1;
648 data->operation.operation = 0x03;
649 data->count++;
650 break;
651 case DASD_ECKD_CCW_READ_RECORD_ZERO:
652 data->operation.orientation = 0x3;
653 data->operation.operation = 0x16;
654 data->count++;
655 break;
656 case DASD_ECKD_CCW_WRITE:
657 case DASD_ECKD_CCW_WRITE_MT:
658 case DASD_ECKD_CCW_WRITE_KD:
659 case DASD_ECKD_CCW_WRITE_KD_MT:
660 data->auxiliary.last_bytes_used = 0x1;
661 data->length = reclen;
662 data->operation.operation = 0x01;
663 break;
664 case DASD_ECKD_CCW_WRITE_CKD:
665 case DASD_ECKD_CCW_WRITE_CKD_MT:
666 data->auxiliary.last_bytes_used = 0x1;
667 data->length = reclen;
668 data->operation.operation = 0x03;
669 break;
670 case DASD_ECKD_CCW_READ:
671 case DASD_ECKD_CCW_READ_MT:
672 case DASD_ECKD_CCW_READ_KD:
673 case DASD_ECKD_CCW_READ_KD_MT:
674 data->auxiliary.last_bytes_used = 0x1;
675 data->length = reclen;
676 data->operation.operation = 0x06;
677 break;
678 case DASD_ECKD_CCW_READ_CKD:
679 case DASD_ECKD_CCW_READ_CKD_MT:
680 data->auxiliary.last_bytes_used = 0x1;
681 data->length = reclen;
682 data->operation.operation = 0x16;
683 break;
684 case DASD_ECKD_CCW_READ_COUNT:
685 data->operation.operation = 0x06;
686 break;
687 case DASD_ECKD_CCW_ERASE:
688 data->length = reclen;
689 data->auxiliary.last_bytes_used = 0x1;
690 data->operation.operation = 0x0b;
691 break;
692 default:
693 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
694 "opcode 0x%x", cmd);
695 }
696 set_ch_t(&data->seek_addr,
697 trk / private->rdc_data.trk_per_cyl,
698 trk % private->rdc_data.trk_per_cyl);
699 data->search_arg.cyl = data->seek_addr.cyl;
700 data->search_arg.head = data->seek_addr.head;
701 data->search_arg.record = rec_on_trk;
702 }
703
704 /*
705 * Returns 1 if the block is one of the special blocks that needs
706 * to get read/written with the KD variant of the command.
707 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
708 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
709 * Luckily the KD variants differ only by one bit (0x08) from the
710 * normal variant. So don't wonder about code like:
711 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
712 * ccw->cmd_code |= 0x8;
713 */
714 static inline int
dasd_eckd_cdl_special(int blk_per_trk,int recid)715 dasd_eckd_cdl_special(int blk_per_trk, int recid)
716 {
717 if (recid < 3)
718 return 1;
719 if (recid < blk_per_trk)
720 return 0;
721 if (recid < 2 * blk_per_trk)
722 return 1;
723 return 0;
724 }
725
726 /*
727 * Returns the record size for the special blocks of the cdl format.
728 * Only returns something useful if dasd_eckd_cdl_special is true
729 * for the recid.
730 */
731 static inline int
dasd_eckd_cdl_reclen(int recid)732 dasd_eckd_cdl_reclen(int recid)
733 {
734 if (recid < 3)
735 return sizes_trk0[recid];
736 return LABEL_SIZE;
737 }
738 /* create unique id from private structure. */
create_uid(struct dasd_eckd_private * private)739 static void create_uid(struct dasd_eckd_private *private)
740 {
741 int count;
742 struct dasd_uid *uid;
743
744 uid = &private->uid;
745 memset(uid, 0, sizeof(struct dasd_uid));
746 memcpy(uid->vendor, private->ned->HDA_manufacturer,
747 sizeof(uid->vendor) - 1);
748 EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
749 memcpy(uid->serial, &private->ned->serial,
750 sizeof(uid->serial) - 1);
751 EBCASC(uid->serial, sizeof(uid->serial) - 1);
752 uid->ssid = private->gneq->subsystemID;
753 uid->real_unit_addr = private->ned->unit_addr;
754 if (private->sneq) {
755 uid->type = private->sneq->sua_flags;
756 if (uid->type == UA_BASE_PAV_ALIAS)
757 uid->base_unit_addr = private->sneq->base_unit_addr;
758 } else {
759 uid->type = UA_BASE_DEVICE;
760 }
761 if (private->vdsneq) {
762 for (count = 0; count < 16; count++) {
763 sprintf(uid->vduit+2*count, "%02x",
764 private->vdsneq->uit[count]);
765 }
766 }
767 }
768
769 /*
770 * Generate device unique id that specifies the physical device.
771 */
dasd_eckd_generate_uid(struct dasd_device * device)772 static int dasd_eckd_generate_uid(struct dasd_device *device)
773 {
774 struct dasd_eckd_private *private = device->private;
775 unsigned long flags;
776
777 if (!private)
778 return -ENODEV;
779 if (!private->ned || !private->gneq)
780 return -ENODEV;
781 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
782 create_uid(private);
783 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
784 return 0;
785 }
786
dasd_eckd_get_uid(struct dasd_device * device,struct dasd_uid * uid)787 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
788 {
789 struct dasd_eckd_private *private = device->private;
790 unsigned long flags;
791
792 if (private) {
793 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
794 *uid = private->uid;
795 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
796 return 0;
797 }
798 return -EINVAL;
799 }
800
801 /*
802 * compare device UID with data of a given dasd_eckd_private structure
803 * return 0 for match
804 */
dasd_eckd_compare_path_uid(struct dasd_device * device,struct dasd_eckd_private * private)805 static int dasd_eckd_compare_path_uid(struct dasd_device *device,
806 struct dasd_eckd_private *private)
807 {
808 struct dasd_uid device_uid;
809
810 create_uid(private);
811 dasd_eckd_get_uid(device, &device_uid);
812
813 return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
814 }
815
dasd_eckd_fill_rcd_cqr(struct dasd_device * device,struct dasd_ccw_req * cqr,__u8 * rcd_buffer,__u8 lpm)816 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
817 struct dasd_ccw_req *cqr,
818 __u8 *rcd_buffer,
819 __u8 lpm)
820 {
821 struct ccw1 *ccw;
822 /*
823 * buffer has to start with EBCDIC "V1.0" to show
824 * support for virtual device SNEQ
825 */
826 rcd_buffer[0] = 0xE5;
827 rcd_buffer[1] = 0xF1;
828 rcd_buffer[2] = 0x4B;
829 rcd_buffer[3] = 0xF0;
830
831 ccw = cqr->cpaddr;
832 ccw->cmd_code = DASD_ECKD_CCW_RCD;
833 ccw->flags = 0;
834 ccw->cda = (__u32)(addr_t)rcd_buffer;
835 ccw->count = DASD_ECKD_RCD_DATA_SIZE;
836 cqr->magic = DASD_ECKD_MAGIC;
837
838 cqr->startdev = device;
839 cqr->memdev = device;
840 cqr->block = NULL;
841 cqr->expires = 10*HZ;
842 cqr->lpm = lpm;
843 cqr->retries = 256;
844 cqr->buildclk = get_tod_clock();
845 cqr->status = DASD_CQR_FILLED;
846 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
847 }
848
849 /*
850 * Wakeup helper for read_conf
851 * if the cqr is not done and needs some error recovery
852 * the buffer has to be re-initialized with the EBCDIC "V1.0"
853 * to show support for virtual device SNEQ
854 */
read_conf_cb(struct dasd_ccw_req * cqr,void * data)855 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
856 {
857 struct ccw1 *ccw;
858 __u8 *rcd_buffer;
859
860 if (cqr->status != DASD_CQR_DONE) {
861 ccw = cqr->cpaddr;
862 rcd_buffer = (__u8 *)((addr_t) ccw->cda);
863 memset(rcd_buffer, 0, sizeof(*rcd_buffer));
864
865 rcd_buffer[0] = 0xE5;
866 rcd_buffer[1] = 0xF1;
867 rcd_buffer[2] = 0x4B;
868 rcd_buffer[3] = 0xF0;
869 }
870 dasd_wakeup_cb(cqr, data);
871 }
872
dasd_eckd_read_conf_immediately(struct dasd_device * device,struct dasd_ccw_req * cqr,__u8 * rcd_buffer,__u8 lpm)873 static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
874 struct dasd_ccw_req *cqr,
875 __u8 *rcd_buffer,
876 __u8 lpm)
877 {
878 struct ciw *ciw;
879 int rc;
880 /*
881 * sanity check: scan for RCD command in extended SenseID data
882 * some devices do not support RCD
883 */
884 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
885 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
886 return -EOPNOTSUPP;
887
888 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
889 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
890 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
891 cqr->retries = 5;
892 cqr->callback = read_conf_cb;
893 rc = dasd_sleep_on_immediatly(cqr);
894 return rc;
895 }
896
dasd_eckd_read_conf_lpm(struct dasd_device * device,void ** rcd_buffer,int * rcd_buffer_size,__u8 lpm)897 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
898 void **rcd_buffer,
899 int *rcd_buffer_size, __u8 lpm)
900 {
901 struct ciw *ciw;
902 char *rcd_buf = NULL;
903 int ret;
904 struct dasd_ccw_req *cqr;
905
906 /*
907 * sanity check: scan for RCD command in extended SenseID data
908 * some devices do not support RCD
909 */
910 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
911 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
912 ret = -EOPNOTSUPP;
913 goto out_error;
914 }
915 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
916 if (!rcd_buf) {
917 ret = -ENOMEM;
918 goto out_error;
919 }
920 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
921 0, /* use rcd_buf as data ara */
922 device, NULL);
923 if (IS_ERR(cqr)) {
924 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
925 "Could not allocate RCD request");
926 ret = -ENOMEM;
927 goto out_error;
928 }
929 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
930 cqr->callback = read_conf_cb;
931 ret = dasd_sleep_on(cqr);
932 /*
933 * on success we update the user input parms
934 */
935 dasd_sfree_request(cqr, cqr->memdev);
936 if (ret)
937 goto out_error;
938
939 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
940 *rcd_buffer = rcd_buf;
941 return 0;
942 out_error:
943 kfree(rcd_buf);
944 *rcd_buffer = NULL;
945 *rcd_buffer_size = 0;
946 return ret;
947 }
948
dasd_eckd_identify_conf_parts(struct dasd_eckd_private * private)949 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
950 {
951
952 struct dasd_sneq *sneq;
953 int i, count;
954
955 private->ned = NULL;
956 private->sneq = NULL;
957 private->vdsneq = NULL;
958 private->gneq = NULL;
959 count = private->conf_len / sizeof(struct dasd_sneq);
960 sneq = (struct dasd_sneq *)private->conf_data;
961 for (i = 0; i < count; ++i) {
962 if (sneq->flags.identifier == 1 && sneq->format == 1)
963 private->sneq = sneq;
964 else if (sneq->flags.identifier == 1 && sneq->format == 4)
965 private->vdsneq = (struct vd_sneq *)sneq;
966 else if (sneq->flags.identifier == 2)
967 private->gneq = (struct dasd_gneq *)sneq;
968 else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
969 private->ned = (struct dasd_ned *)sneq;
970 sneq++;
971 }
972 if (!private->ned || !private->gneq) {
973 private->ned = NULL;
974 private->sneq = NULL;
975 private->vdsneq = NULL;
976 private->gneq = NULL;
977 return -EINVAL;
978 }
979 return 0;
980
981 };
982
dasd_eckd_path_access(void * conf_data,int conf_len)983 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
984 {
985 struct dasd_gneq *gneq;
986 int i, count, found;
987
988 count = conf_len / sizeof(*gneq);
989 gneq = (struct dasd_gneq *)conf_data;
990 found = 0;
991 for (i = 0; i < count; ++i) {
992 if (gneq->flags.identifier == 2) {
993 found = 1;
994 break;
995 }
996 gneq++;
997 }
998 if (found)
999 return ((char *)gneq)[18] & 0x07;
1000 else
1001 return 0;
1002 }
1003
dasd_eckd_store_conf_data(struct dasd_device * device,struct dasd_conf_data * conf_data,int chp)1004 static void dasd_eckd_store_conf_data(struct dasd_device *device,
1005 struct dasd_conf_data *conf_data, int chp)
1006 {
1007 struct dasd_eckd_private *private = device->private;
1008 struct channel_path_desc_fmt0 *chp_desc;
1009 struct subchannel_id sch_id;
1010 void *cdp;
1011
1012 /*
1013 * path handling and read_conf allocate data
1014 * free it before replacing the pointer
1015 * also replace the old private->conf_data pointer
1016 * with the new one if this points to the same data
1017 */
1018 cdp = device->path[chp].conf_data;
1019 if (private->conf_data == cdp) {
1020 private->conf_data = (void *)conf_data;
1021 dasd_eckd_identify_conf_parts(private);
1022 }
1023 ccw_device_get_schid(device->cdev, &sch_id);
1024 device->path[chp].conf_data = conf_data;
1025 device->path[chp].cssid = sch_id.cssid;
1026 device->path[chp].ssid = sch_id.ssid;
1027 chp_desc = ccw_device_get_chp_desc(device->cdev, chp);
1028 if (chp_desc)
1029 device->path[chp].chpid = chp_desc->chpid;
1030 kfree(chp_desc);
1031 kfree(cdp);
1032 }
1033
dasd_eckd_clear_conf_data(struct dasd_device * device)1034 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
1035 {
1036 struct dasd_eckd_private *private = device->private;
1037 int i;
1038
1039 private->conf_data = NULL;
1040 private->conf_len = 0;
1041 for (i = 0; i < 8; i++) {
1042 kfree(device->path[i].conf_data);
1043 device->path[i].conf_data = NULL;
1044 device->path[i].cssid = 0;
1045 device->path[i].ssid = 0;
1046 device->path[i].chpid = 0;
1047 dasd_path_notoper(device, i);
1048 }
1049 }
1050
dasd_eckd_read_fc_security(struct dasd_device * device)1051 static void dasd_eckd_read_fc_security(struct dasd_device *device)
1052 {
1053 struct dasd_eckd_private *private = device->private;
1054 u8 esm_valid;
1055 u8 esm[8];
1056 int chp;
1057 int rc;
1058
1059 rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid);
1060 if (rc) {
1061 for (chp = 0; chp < 8; chp++)
1062 device->path[chp].fc_security = 0;
1063 return;
1064 }
1065
1066 for (chp = 0; chp < 8; chp++) {
1067 if (esm_valid & (0x80 >> chp))
1068 device->path[chp].fc_security = esm[chp];
1069 else
1070 device->path[chp].fc_security = 0;
1071 }
1072 }
1073
dasd_eckd_read_conf(struct dasd_device * device)1074 static int dasd_eckd_read_conf(struct dasd_device *device)
1075 {
1076 void *conf_data;
1077 int conf_len, conf_data_saved;
1078 int rc, path_err, pos;
1079 __u8 lpm, opm;
1080 struct dasd_eckd_private *private, path_private;
1081 struct dasd_uid *uid;
1082 char print_path_uid[60], print_device_uid[60];
1083
1084 private = device->private;
1085 opm = ccw_device_get_path_mask(device->cdev);
1086 conf_data_saved = 0;
1087 path_err = 0;
1088 /* get configuration data per operational path */
1089 for (lpm = 0x80; lpm; lpm>>= 1) {
1090 if (!(lpm & opm))
1091 continue;
1092 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
1093 &conf_len, lpm);
1094 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
1095 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1096 "Read configuration data returned "
1097 "error %d", rc);
1098 return rc;
1099 }
1100 if (conf_data == NULL) {
1101 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1102 "No configuration data "
1103 "retrieved");
1104 /* no further analysis possible */
1105 dasd_path_add_opm(device, opm);
1106 continue; /* no error */
1107 }
1108 /* save first valid configuration data */
1109 if (!conf_data_saved) {
1110 /* initially clear previously stored conf_data */
1111 dasd_eckd_clear_conf_data(device);
1112 private->conf_data = conf_data;
1113 private->conf_len = conf_len;
1114 if (dasd_eckd_identify_conf_parts(private)) {
1115 private->conf_data = NULL;
1116 private->conf_len = 0;
1117 kfree(conf_data);
1118 continue;
1119 }
1120 /*
1121 * build device UID that other path data
1122 * can be compared to it
1123 */
1124 dasd_eckd_generate_uid(device);
1125 conf_data_saved++;
1126 } else {
1127 path_private.conf_data = conf_data;
1128 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1129 if (dasd_eckd_identify_conf_parts(
1130 &path_private)) {
1131 path_private.conf_data = NULL;
1132 path_private.conf_len = 0;
1133 kfree(conf_data);
1134 continue;
1135 }
1136 if (dasd_eckd_compare_path_uid(
1137 device, &path_private)) {
1138 uid = &path_private.uid;
1139 if (strlen(uid->vduit) > 0)
1140 snprintf(print_path_uid,
1141 sizeof(print_path_uid),
1142 "%s.%s.%04x.%02x.%s",
1143 uid->vendor, uid->serial,
1144 uid->ssid, uid->real_unit_addr,
1145 uid->vduit);
1146 else
1147 snprintf(print_path_uid,
1148 sizeof(print_path_uid),
1149 "%s.%s.%04x.%02x",
1150 uid->vendor, uid->serial,
1151 uid->ssid,
1152 uid->real_unit_addr);
1153 uid = &private->uid;
1154 if (strlen(uid->vduit) > 0)
1155 snprintf(print_device_uid,
1156 sizeof(print_device_uid),
1157 "%s.%s.%04x.%02x.%s",
1158 uid->vendor, uid->serial,
1159 uid->ssid, uid->real_unit_addr,
1160 uid->vduit);
1161 else
1162 snprintf(print_device_uid,
1163 sizeof(print_device_uid),
1164 "%s.%s.%04x.%02x",
1165 uid->vendor, uid->serial,
1166 uid->ssid,
1167 uid->real_unit_addr);
1168 dev_err(&device->cdev->dev,
1169 "Not all channel paths lead to "
1170 "the same device, path %02X leads to "
1171 "device %s instead of %s\n", lpm,
1172 print_path_uid, print_device_uid);
1173 path_err = -EINVAL;
1174 dasd_path_add_cablepm(device, lpm);
1175 continue;
1176 }
1177 path_private.conf_data = NULL;
1178 path_private.conf_len = 0;
1179 }
1180
1181 pos = pathmask_to_pos(lpm);
1182 dasd_eckd_store_conf_data(device, conf_data, pos);
1183
1184 switch (dasd_eckd_path_access(conf_data, conf_len)) {
1185 case 0x02:
1186 dasd_path_add_nppm(device, lpm);
1187 break;
1188 case 0x03:
1189 dasd_path_add_ppm(device, lpm);
1190 break;
1191 }
1192 if (!dasd_path_get_opm(device)) {
1193 dasd_path_set_opm(device, lpm);
1194 dasd_generic_path_operational(device);
1195 } else {
1196 dasd_path_add_opm(device, lpm);
1197 }
1198 }
1199
1200 dasd_eckd_read_fc_security(device);
1201
1202 return path_err;
1203 }
1204
get_fcx_max_data(struct dasd_device * device)1205 static u32 get_fcx_max_data(struct dasd_device *device)
1206 {
1207 struct dasd_eckd_private *private = device->private;
1208 int fcx_in_css, fcx_in_gneq, fcx_in_features;
1209 unsigned int mdc;
1210 int tpm;
1211
1212 if (dasd_nofcx)
1213 return 0;
1214 /* is transport mode supported? */
1215 fcx_in_css = css_general_characteristics.fcx;
1216 fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1217 fcx_in_features = private->features.feature[40] & 0x80;
1218 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1219
1220 if (!tpm)
1221 return 0;
1222
1223 mdc = ccw_device_get_mdc(device->cdev, 0);
1224 if (mdc == 0) {
1225 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1226 return 0;
1227 } else {
1228 return (u32)mdc * FCX_MAX_DATA_FACTOR;
1229 }
1230 }
1231
verify_fcx_max_data(struct dasd_device * device,__u8 lpm)1232 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1233 {
1234 struct dasd_eckd_private *private = device->private;
1235 unsigned int mdc;
1236 u32 fcx_max_data;
1237
1238 if (private->fcx_max_data) {
1239 mdc = ccw_device_get_mdc(device->cdev, lpm);
1240 if (mdc == 0) {
1241 dev_warn(&device->cdev->dev,
1242 "Detecting the maximum data size for zHPF "
1243 "requests failed (rc=%d) for a new path %x\n",
1244 mdc, lpm);
1245 return mdc;
1246 }
1247 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
1248 if (fcx_max_data < private->fcx_max_data) {
1249 dev_warn(&device->cdev->dev,
1250 "The maximum data size for zHPF requests %u "
1251 "on a new path %x is below the active maximum "
1252 "%u\n", fcx_max_data, lpm,
1253 private->fcx_max_data);
1254 return -EACCES;
1255 }
1256 }
1257 return 0;
1258 }
1259
rebuild_device_uid(struct dasd_device * device,struct pe_handler_work_data * data)1260 static int rebuild_device_uid(struct dasd_device *device,
1261 struct pe_handler_work_data *data)
1262 {
1263 struct dasd_eckd_private *private = device->private;
1264 __u8 lpm, opm = dasd_path_get_opm(device);
1265 int rc = -ENODEV;
1266
1267 for (lpm = 0x80; lpm; lpm >>= 1) {
1268 if (!(lpm & opm))
1269 continue;
1270 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1271 memset(&data->cqr, 0, sizeof(data->cqr));
1272 data->cqr.cpaddr = &data->ccw;
1273 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1274 data->rcd_buffer,
1275 lpm);
1276
1277 if (rc) {
1278 if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
1279 continue;
1280 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1281 "Read configuration data "
1282 "returned error %d", rc);
1283 break;
1284 }
1285 memcpy(private->conf_data, data->rcd_buffer,
1286 DASD_ECKD_RCD_DATA_SIZE);
1287 if (dasd_eckd_identify_conf_parts(private)) {
1288 rc = -ENODEV;
1289 } else /* first valid path is enough */
1290 break;
1291 }
1292
1293 if (!rc)
1294 rc = dasd_eckd_generate_uid(device);
1295
1296 return rc;
1297 }
1298
dasd_eckd_path_available_action(struct dasd_device * device,struct pe_handler_work_data * data)1299 static void dasd_eckd_path_available_action(struct dasd_device *device,
1300 struct pe_handler_work_data *data)
1301 {
1302 struct dasd_eckd_private path_private;
1303 struct dasd_uid *uid;
1304 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1305 __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
1306 struct dasd_conf_data *conf_data;
1307 unsigned long flags;
1308 char print_uid[60];
1309 int rc, pos;
1310
1311 opm = 0;
1312 npm = 0;
1313 ppm = 0;
1314 epm = 0;
1315 hpfpm = 0;
1316 cablepm = 0;
1317
1318 for (lpm = 0x80; lpm; lpm >>= 1) {
1319 if (!(lpm & data->tbvpm))
1320 continue;
1321 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1322 memset(&data->cqr, 0, sizeof(data->cqr));
1323 data->cqr.cpaddr = &data->ccw;
1324 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1325 data->rcd_buffer,
1326 lpm);
1327 if (!rc) {
1328 switch (dasd_eckd_path_access(data->rcd_buffer,
1329 DASD_ECKD_RCD_DATA_SIZE)
1330 ) {
1331 case 0x02:
1332 npm |= lpm;
1333 break;
1334 case 0x03:
1335 ppm |= lpm;
1336 break;
1337 }
1338 opm |= lpm;
1339 } else if (rc == -EOPNOTSUPP) {
1340 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1341 "path verification: No configuration "
1342 "data retrieved");
1343 opm |= lpm;
1344 } else if (rc == -EAGAIN) {
1345 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1346 "path verification: device is stopped,"
1347 " try again later");
1348 epm |= lpm;
1349 } else {
1350 dev_warn(&device->cdev->dev,
1351 "Reading device feature codes failed "
1352 "(rc=%d) for new path %x\n", rc, lpm);
1353 continue;
1354 }
1355 if (verify_fcx_max_data(device, lpm)) {
1356 opm &= ~lpm;
1357 npm &= ~lpm;
1358 ppm &= ~lpm;
1359 hpfpm |= lpm;
1360 continue;
1361 }
1362
1363 /*
1364 * save conf_data for comparison after
1365 * rebuild_device_uid may have changed
1366 * the original data
1367 */
1368 memcpy(&path_rcd_buf, data->rcd_buffer,
1369 DASD_ECKD_RCD_DATA_SIZE);
1370 path_private.conf_data = (void *) &path_rcd_buf;
1371 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1372 if (dasd_eckd_identify_conf_parts(&path_private)) {
1373 path_private.conf_data = NULL;
1374 path_private.conf_len = 0;
1375 continue;
1376 }
1377
1378 /*
1379 * compare path UID with device UID only if at least
1380 * one valid path is left
1381 * in other case the device UID may have changed and
1382 * the first working path UID will be used as device UID
1383 */
1384 if (dasd_path_get_opm(device) &&
1385 dasd_eckd_compare_path_uid(device, &path_private)) {
1386 /*
1387 * the comparison was not successful
1388 * rebuild the device UID with at least one
1389 * known path in case a z/VM hyperswap command
1390 * has changed the device
1391 *
1392 * after this compare again
1393 *
1394 * if either the rebuild or the recompare fails
1395 * the path can not be used
1396 */
1397 if (rebuild_device_uid(device, data) ||
1398 dasd_eckd_compare_path_uid(
1399 device, &path_private)) {
1400 uid = &path_private.uid;
1401 if (strlen(uid->vduit) > 0)
1402 snprintf(print_uid, sizeof(print_uid),
1403 "%s.%s.%04x.%02x.%s",
1404 uid->vendor, uid->serial,
1405 uid->ssid, uid->real_unit_addr,
1406 uid->vduit);
1407 else
1408 snprintf(print_uid, sizeof(print_uid),
1409 "%s.%s.%04x.%02x",
1410 uid->vendor, uid->serial,
1411 uid->ssid,
1412 uid->real_unit_addr);
1413 dev_err(&device->cdev->dev,
1414 "The newly added channel path %02X "
1415 "will not be used because it leads "
1416 "to a different device %s\n",
1417 lpm, print_uid);
1418 opm &= ~lpm;
1419 npm &= ~lpm;
1420 ppm &= ~lpm;
1421 cablepm |= lpm;
1422 continue;
1423 }
1424 }
1425
1426 conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL);
1427 if (conf_data) {
1428 memcpy(conf_data, data->rcd_buffer,
1429 DASD_ECKD_RCD_DATA_SIZE);
1430 }
1431 pos = pathmask_to_pos(lpm);
1432 dasd_eckd_store_conf_data(device, conf_data, pos);
1433
1434 /*
1435 * There is a small chance that a path is lost again between
1436 * above path verification and the following modification of
1437 * the device opm mask. We could avoid that race here by using
1438 * yet another path mask, but we rather deal with this unlikely
1439 * situation in dasd_start_IO.
1440 */
1441 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1442 if (!dasd_path_get_opm(device) && opm) {
1443 dasd_path_set_opm(device, opm);
1444 dasd_generic_path_operational(device);
1445 } else {
1446 dasd_path_add_opm(device, opm);
1447 }
1448 dasd_path_add_nppm(device, npm);
1449 dasd_path_add_ppm(device, ppm);
1450 dasd_path_add_tbvpm(device, epm);
1451 dasd_path_add_cablepm(device, cablepm);
1452 dasd_path_add_nohpfpm(device, hpfpm);
1453 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1454
1455 dasd_path_create_kobj(device, pos);
1456 }
1457 }
1458
do_pe_handler_work(struct work_struct * work)1459 static void do_pe_handler_work(struct work_struct *work)
1460 {
1461 struct pe_handler_work_data *data;
1462 struct dasd_device *device;
1463
1464 data = container_of(work, struct pe_handler_work_data, worker);
1465 device = data->device;
1466
1467 /* delay path verification until device was resumed */
1468 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1469 schedule_work(work);
1470 return;
1471 }
1472 /* check if path verification already running and delay if so */
1473 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
1474 schedule_work(work);
1475 return;
1476 }
1477
1478 if (data->tbvpm)
1479 dasd_eckd_path_available_action(device, data);
1480 if (data->fcsecpm)
1481 dasd_eckd_read_fc_security(device);
1482
1483 clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
1484 dasd_put_device(device);
1485 if (data->isglobal)
1486 mutex_unlock(&dasd_pe_handler_mutex);
1487 else
1488 kfree(data);
1489 }
1490
dasd_eckd_pe_handler(struct dasd_device * device,__u8 tbvpm,__u8 fcsecpm)1491 static int dasd_eckd_pe_handler(struct dasd_device *device,
1492 __u8 tbvpm, __u8 fcsecpm)
1493 {
1494 struct pe_handler_work_data *data;
1495
1496 data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1497 if (!data) {
1498 if (mutex_trylock(&dasd_pe_handler_mutex)) {
1499 data = pe_handler_worker;
1500 data->isglobal = 1;
1501 } else {
1502 return -ENOMEM;
1503 }
1504 } else {
1505 memset(data, 0, sizeof(*data));
1506 data->isglobal = 0;
1507 }
1508 INIT_WORK(&data->worker, do_pe_handler_work);
1509 dasd_get_device(device);
1510 data->device = device;
1511 data->tbvpm = tbvpm;
1512 data->fcsecpm = fcsecpm;
1513 schedule_work(&data->worker);
1514 return 0;
1515 }
1516
dasd_eckd_reset_path(struct dasd_device * device,__u8 pm)1517 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1518 {
1519 struct dasd_eckd_private *private = device->private;
1520 unsigned long flags;
1521
1522 if (!private->fcx_max_data)
1523 private->fcx_max_data = get_fcx_max_data(device);
1524 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1525 dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1526 dasd_schedule_device_bh(device);
1527 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1528 }
1529
dasd_eckd_read_features(struct dasd_device * device)1530 static int dasd_eckd_read_features(struct dasd_device *device)
1531 {
1532 struct dasd_eckd_private *private = device->private;
1533 struct dasd_psf_prssd_data *prssdp;
1534 struct dasd_rssd_features *features;
1535 struct dasd_ccw_req *cqr;
1536 struct ccw1 *ccw;
1537 int rc;
1538
1539 memset(&private->features, 0, sizeof(struct dasd_rssd_features));
1540 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
1541 (sizeof(struct dasd_psf_prssd_data) +
1542 sizeof(struct dasd_rssd_features)),
1543 device, NULL);
1544 if (IS_ERR(cqr)) {
1545 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1546 "allocate initialization request");
1547 return PTR_ERR(cqr);
1548 }
1549 cqr->startdev = device;
1550 cqr->memdev = device;
1551 cqr->block = NULL;
1552 cqr->retries = 256;
1553 cqr->expires = 10 * HZ;
1554
1555 /* Prepare for Read Subsystem Data */
1556 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1557 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1558 prssdp->order = PSF_ORDER_PRSSD;
1559 prssdp->suborder = 0x41; /* Read Feature Codes */
1560 /* all other bytes of prssdp must be zero */
1561
1562 ccw = cqr->cpaddr;
1563 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1564 ccw->count = sizeof(struct dasd_psf_prssd_data);
1565 ccw->flags |= CCW_FLAG_CC;
1566 ccw->cda = (__u32)(addr_t) prssdp;
1567
1568 /* Read Subsystem Data - feature codes */
1569 features = (struct dasd_rssd_features *) (prssdp + 1);
1570 memset(features, 0, sizeof(struct dasd_rssd_features));
1571
1572 ccw++;
1573 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1574 ccw->count = sizeof(struct dasd_rssd_features);
1575 ccw->cda = (__u32)(addr_t) features;
1576
1577 cqr->buildclk = get_tod_clock();
1578 cqr->status = DASD_CQR_FILLED;
1579 rc = dasd_sleep_on(cqr);
1580 if (rc == 0) {
1581 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1582 features = (struct dasd_rssd_features *) (prssdp + 1);
1583 memcpy(&private->features, features,
1584 sizeof(struct dasd_rssd_features));
1585 } else
1586 dev_warn(&device->cdev->dev, "Reading device feature codes"
1587 " failed with rc=%d\n", rc);
1588 dasd_sfree_request(cqr, cqr->memdev);
1589 return rc;
1590 }
1591
1592 /* Read Volume Information - Volume Storage Query */
dasd_eckd_read_vol_info(struct dasd_device * device)1593 static int dasd_eckd_read_vol_info(struct dasd_device *device)
1594 {
1595 struct dasd_eckd_private *private = device->private;
1596 struct dasd_psf_prssd_data *prssdp;
1597 struct dasd_rssd_vsq *vsq;
1598 struct dasd_ccw_req *cqr;
1599 struct ccw1 *ccw;
1600 int useglobal;
1601 int rc;
1602
1603 /* This command cannot be executed on an alias device */
1604 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1605 private->uid.type == UA_HYPER_PAV_ALIAS)
1606 return 0;
1607
1608 useglobal = 0;
1609 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1610 sizeof(*prssdp) + sizeof(*vsq), device, NULL);
1611 if (IS_ERR(cqr)) {
1612 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1613 "Could not allocate initialization request");
1614 mutex_lock(&dasd_vol_info_mutex);
1615 useglobal = 1;
1616 cqr = &dasd_vol_info_req->cqr;
1617 memset(cqr, 0, sizeof(*cqr));
1618 memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
1619 cqr->cpaddr = &dasd_vol_info_req->ccw;
1620 cqr->data = &dasd_vol_info_req->data;
1621 cqr->magic = DASD_ECKD_MAGIC;
1622 }
1623
1624 /* Prepare for Read Subsystem Data */
1625 prssdp = cqr->data;
1626 prssdp->order = PSF_ORDER_PRSSD;
1627 prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */
1628 prssdp->lss = private->ned->ID;
1629 prssdp->volume = private->ned->unit_addr;
1630
1631 ccw = cqr->cpaddr;
1632 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1633 ccw->count = sizeof(*prssdp);
1634 ccw->flags |= CCW_FLAG_CC;
1635 ccw->cda = (__u32)(addr_t)prssdp;
1636
1637 /* Read Subsystem Data - Volume Storage Query */
1638 vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
1639 memset(vsq, 0, sizeof(*vsq));
1640
1641 ccw++;
1642 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1643 ccw->count = sizeof(*vsq);
1644 ccw->flags |= CCW_FLAG_SLI;
1645 ccw->cda = (__u32)(addr_t)vsq;
1646
1647 cqr->buildclk = get_tod_clock();
1648 cqr->status = DASD_CQR_FILLED;
1649 cqr->startdev = device;
1650 cqr->memdev = device;
1651 cqr->block = NULL;
1652 cqr->retries = 256;
1653 cqr->expires = device->default_expires * HZ;
1654 /* The command might not be supported. Suppress the error output */
1655 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1656
1657 rc = dasd_sleep_on_interruptible(cqr);
1658 if (rc == 0) {
1659 memcpy(&private->vsq, vsq, sizeof(*vsq));
1660 } else {
1661 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1662 "Reading the volume storage information failed with rc=%d", rc);
1663 }
1664
1665 if (useglobal)
1666 mutex_unlock(&dasd_vol_info_mutex);
1667 else
1668 dasd_sfree_request(cqr, cqr->memdev);
1669
1670 return rc;
1671 }
1672
dasd_eckd_is_ese(struct dasd_device * device)1673 static int dasd_eckd_is_ese(struct dasd_device *device)
1674 {
1675 struct dasd_eckd_private *private = device->private;
1676
1677 return private->vsq.vol_info.ese;
1678 }
1679
dasd_eckd_ext_pool_id(struct dasd_device * device)1680 static int dasd_eckd_ext_pool_id(struct dasd_device *device)
1681 {
1682 struct dasd_eckd_private *private = device->private;
1683
1684 return private->vsq.extent_pool_id;
1685 }
1686
1687 /*
1688 * This value represents the total amount of available space. As more space is
1689 * allocated by ESE volumes, this value will decrease.
1690 * The data for this value is therefore updated on any call.
1691 */
dasd_eckd_space_configured(struct dasd_device * device)1692 static int dasd_eckd_space_configured(struct dasd_device *device)
1693 {
1694 struct dasd_eckd_private *private = device->private;
1695 int rc;
1696
1697 rc = dasd_eckd_read_vol_info(device);
1698
1699 return rc ? : private->vsq.space_configured;
1700 }
1701
1702 /*
1703 * The value of space allocated by an ESE volume may have changed and is
1704 * therefore updated on any call.
1705 */
dasd_eckd_space_allocated(struct dasd_device * device)1706 static int dasd_eckd_space_allocated(struct dasd_device *device)
1707 {
1708 struct dasd_eckd_private *private = device->private;
1709 int rc;
1710
1711 rc = dasd_eckd_read_vol_info(device);
1712
1713 return rc ? : private->vsq.space_allocated;
1714 }
1715
dasd_eckd_logical_capacity(struct dasd_device * device)1716 static int dasd_eckd_logical_capacity(struct dasd_device *device)
1717 {
1718 struct dasd_eckd_private *private = device->private;
1719
1720 return private->vsq.logical_capacity;
1721 }
1722
dasd_eckd_ext_pool_exhaust_work(struct work_struct * work)1723 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
1724 {
1725 struct ext_pool_exhaust_work_data *data;
1726 struct dasd_device *device;
1727 struct dasd_device *base;
1728
1729 data = container_of(work, struct ext_pool_exhaust_work_data, worker);
1730 device = data->device;
1731 base = data->base;
1732
1733 if (!base)
1734 base = device;
1735 if (dasd_eckd_space_configured(base) != 0) {
1736 dasd_generic_space_avail(device);
1737 } else {
1738 dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
1739 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
1740 }
1741
1742 dasd_put_device(device);
1743 kfree(data);
1744 }
1745
dasd_eckd_ext_pool_exhaust(struct dasd_device * device,struct dasd_ccw_req * cqr)1746 static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
1747 struct dasd_ccw_req *cqr)
1748 {
1749 struct ext_pool_exhaust_work_data *data;
1750
1751 data = kzalloc(sizeof(*data), GFP_ATOMIC);
1752 if (!data)
1753 return -ENOMEM;
1754 INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
1755 dasd_get_device(device);
1756 data->device = device;
1757
1758 if (cqr->block)
1759 data->base = cqr->block->base;
1760 else if (cqr->basedev)
1761 data->base = cqr->basedev;
1762 else
1763 data->base = NULL;
1764
1765 schedule_work(&data->worker);
1766
1767 return 0;
1768 }
1769
dasd_eckd_cpy_ext_pool_data(struct dasd_device * device,struct dasd_rssd_lcq * lcq)1770 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
1771 struct dasd_rssd_lcq *lcq)
1772 {
1773 struct dasd_eckd_private *private = device->private;
1774 int pool_id = dasd_eckd_ext_pool_id(device);
1775 struct dasd_ext_pool_sum eps;
1776 int i;
1777
1778 for (i = 0; i < lcq->pool_count; i++) {
1779 eps = lcq->ext_pool_sum[i];
1780 if (eps.pool_id == pool_id) {
1781 memcpy(&private->eps, &eps,
1782 sizeof(struct dasd_ext_pool_sum));
1783 }
1784 }
1785 }
1786
1787 /* Read Extent Pool Information - Logical Configuration Query */
dasd_eckd_read_ext_pool_info(struct dasd_device * device)1788 static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
1789 {
1790 struct dasd_eckd_private *private = device->private;
1791 struct dasd_psf_prssd_data *prssdp;
1792 struct dasd_rssd_lcq *lcq;
1793 struct dasd_ccw_req *cqr;
1794 struct ccw1 *ccw;
1795 int rc;
1796
1797 /* This command cannot be executed on an alias device */
1798 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1799 private->uid.type == UA_HYPER_PAV_ALIAS)
1800 return 0;
1801
1802 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1803 sizeof(*prssdp) + sizeof(*lcq), device, NULL);
1804 if (IS_ERR(cqr)) {
1805 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1806 "Could not allocate initialization request");
1807 return PTR_ERR(cqr);
1808 }
1809
1810 /* Prepare for Read Subsystem Data */
1811 prssdp = cqr->data;
1812 memset(prssdp, 0, sizeof(*prssdp));
1813 prssdp->order = PSF_ORDER_PRSSD;
1814 prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */
1815
1816 ccw = cqr->cpaddr;
1817 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1818 ccw->count = sizeof(*prssdp);
1819 ccw->flags |= CCW_FLAG_CC;
1820 ccw->cda = (__u32)(addr_t)prssdp;
1821
1822 lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
1823 memset(lcq, 0, sizeof(*lcq));
1824
1825 ccw++;
1826 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1827 ccw->count = sizeof(*lcq);
1828 ccw->flags |= CCW_FLAG_SLI;
1829 ccw->cda = (__u32)(addr_t)lcq;
1830
1831 cqr->buildclk = get_tod_clock();
1832 cqr->status = DASD_CQR_FILLED;
1833 cqr->startdev = device;
1834 cqr->memdev = device;
1835 cqr->block = NULL;
1836 cqr->retries = 256;
1837 cqr->expires = device->default_expires * HZ;
1838 /* The command might not be supported. Suppress the error output */
1839 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1840
1841 rc = dasd_sleep_on_interruptible(cqr);
1842 if (rc == 0) {
1843 dasd_eckd_cpy_ext_pool_data(device, lcq);
1844 } else {
1845 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1846 "Reading the logical configuration failed with rc=%d", rc);
1847 }
1848
1849 dasd_sfree_request(cqr, cqr->memdev);
1850
1851 return rc;
1852 }
1853
1854 /*
1855 * Depending on the device type, the extent size is specified either as
1856 * cylinders per extent (CKD) or size per extent (FBA)
1857 * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
1858 */
dasd_eckd_ext_size(struct dasd_device * device)1859 static int dasd_eckd_ext_size(struct dasd_device *device)
1860 {
1861 struct dasd_eckd_private *private = device->private;
1862 struct dasd_ext_pool_sum eps = private->eps;
1863
1864 if (!eps.flags.extent_size_valid)
1865 return 0;
1866 if (eps.extent_size.size_1G)
1867 return 1113;
1868 if (eps.extent_size.size_16M)
1869 return 21;
1870
1871 return 0;
1872 }
1873
dasd_eckd_ext_pool_warn_thrshld(struct dasd_device * device)1874 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
1875 {
1876 struct dasd_eckd_private *private = device->private;
1877
1878 return private->eps.warn_thrshld;
1879 }
1880
dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device * device)1881 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
1882 {
1883 struct dasd_eckd_private *private = device->private;
1884
1885 return private->eps.flags.capacity_at_warnlevel;
1886 }
1887
1888 /*
1889 * Extent Pool out of space
1890 */
dasd_eckd_ext_pool_oos(struct dasd_device * device)1891 static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
1892 {
1893 struct dasd_eckd_private *private = device->private;
1894
1895 return private->eps.flags.pool_oos;
1896 }
1897
1898 /*
1899 * Build CP for Perform Subsystem Function - SSC.
1900 */
dasd_eckd_build_psf_ssc(struct dasd_device * device,int enable_pav)1901 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1902 int enable_pav)
1903 {
1904 struct dasd_ccw_req *cqr;
1905 struct dasd_psf_ssc_data *psf_ssc_data;
1906 struct ccw1 *ccw;
1907
1908 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1909 sizeof(struct dasd_psf_ssc_data),
1910 device, NULL);
1911
1912 if (IS_ERR(cqr)) {
1913 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1914 "Could not allocate PSF-SSC request");
1915 return cqr;
1916 }
1917 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1918 psf_ssc_data->order = PSF_ORDER_SSC;
1919 psf_ssc_data->suborder = 0xc0;
1920 if (enable_pav) {
1921 psf_ssc_data->suborder |= 0x08;
1922 psf_ssc_data->reserved[0] = 0x88;
1923 }
1924 ccw = cqr->cpaddr;
1925 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1926 ccw->cda = (__u32)(addr_t)psf_ssc_data;
1927 ccw->count = 66;
1928
1929 cqr->startdev = device;
1930 cqr->memdev = device;
1931 cqr->block = NULL;
1932 cqr->retries = 256;
1933 cqr->expires = 10*HZ;
1934 cqr->buildclk = get_tod_clock();
1935 cqr->status = DASD_CQR_FILLED;
1936 return cqr;
1937 }
1938
1939 /*
1940 * Perform Subsystem Function.
1941 * It is necessary to trigger CIO for channel revalidation since this
1942 * call might change behaviour of DASD devices.
1943 */
1944 static int
dasd_eckd_psf_ssc(struct dasd_device * device,int enable_pav,unsigned long flags)1945 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1946 unsigned long flags)
1947 {
1948 struct dasd_ccw_req *cqr;
1949 int rc;
1950
1951 cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1952 if (IS_ERR(cqr))
1953 return PTR_ERR(cqr);
1954
1955 /*
1956 * set flags e.g. turn on failfast, to prevent blocking
1957 * the calling function should handle failed requests
1958 */
1959 cqr->flags |= flags;
1960
1961 rc = dasd_sleep_on(cqr);
1962 if (!rc)
1963 /* trigger CIO to reprobe devices */
1964 css_schedule_reprobe();
1965 else if (cqr->intrc == -EAGAIN)
1966 rc = -EAGAIN;
1967
1968 dasd_sfree_request(cqr, cqr->memdev);
1969 return rc;
1970 }
1971
1972 /*
1973 * Valide storage server of current device.
1974 */
dasd_eckd_validate_server(struct dasd_device * device,unsigned long flags)1975 static int dasd_eckd_validate_server(struct dasd_device *device,
1976 unsigned long flags)
1977 {
1978 struct dasd_eckd_private *private = device->private;
1979 int enable_pav, rc;
1980
1981 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1982 private->uid.type == UA_HYPER_PAV_ALIAS)
1983 return 0;
1984 if (dasd_nopav || MACHINE_IS_VM)
1985 enable_pav = 0;
1986 else
1987 enable_pav = 1;
1988 rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
1989
1990 /* may be requested feature is not available on server,
1991 * therefore just report error and go ahead */
1992 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1993 "returned rc=%d", private->uid.ssid, rc);
1994 return rc;
1995 }
1996
1997 /*
1998 * worker to do a validate server in case of a lost pathgroup
1999 */
dasd_eckd_do_validate_server(struct work_struct * work)2000 static void dasd_eckd_do_validate_server(struct work_struct *work)
2001 {
2002 struct dasd_device *device = container_of(work, struct dasd_device,
2003 kick_validate);
2004 unsigned long flags = 0;
2005
2006 set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
2007 if (dasd_eckd_validate_server(device, flags)
2008 == -EAGAIN) {
2009 /* schedule worker again if failed */
2010 schedule_work(&device->kick_validate);
2011 return;
2012 }
2013
2014 dasd_put_device(device);
2015 }
2016
dasd_eckd_kick_validate_server(struct dasd_device * device)2017 static void dasd_eckd_kick_validate_server(struct dasd_device *device)
2018 {
2019 dasd_get_device(device);
2020 /* exit if device not online or in offline processing */
2021 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
2022 device->state < DASD_STATE_ONLINE) {
2023 dasd_put_device(device);
2024 return;
2025 }
2026 /* queue call to do_validate_server to the kernel event daemon. */
2027 if (!schedule_work(&device->kick_validate))
2028 dasd_put_device(device);
2029 }
2030
2031 /*
2032 * Check device characteristics.
2033 * If the device is accessible using ECKD discipline, the device is enabled.
2034 */
2035 static int
dasd_eckd_check_characteristics(struct dasd_device * device)2036 dasd_eckd_check_characteristics(struct dasd_device *device)
2037 {
2038 struct dasd_eckd_private *private = device->private;
2039 struct dasd_block *block;
2040 struct dasd_uid temp_uid;
2041 int rc, i;
2042 int readonly;
2043 unsigned long value;
2044
2045 /* setup work queue for validate server*/
2046 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
2047 /* setup work queue for summary unit check */
2048 INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
2049
2050 if (!ccw_device_is_pathgroup(device->cdev)) {
2051 dev_warn(&device->cdev->dev,
2052 "A channel path group could not be established\n");
2053 return -EIO;
2054 }
2055 if (!ccw_device_is_multipath(device->cdev)) {
2056 dev_info(&device->cdev->dev,
2057 "The DASD is not operating in multipath mode\n");
2058 }
2059 if (!private) {
2060 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
2061 if (!private) {
2062 dev_warn(&device->cdev->dev,
2063 "Allocating memory for private DASD data "
2064 "failed\n");
2065 return -ENOMEM;
2066 }
2067 device->private = private;
2068 } else {
2069 memset(private, 0, sizeof(*private));
2070 }
2071 /* Invalidate status of initial analysis. */
2072 private->init_cqr_status = -1;
2073 /* Set default cache operations. */
2074 private->attrib.operation = DASD_NORMAL_CACHE;
2075 private->attrib.nr_cyl = 0;
2076
2077 /* Read Configuration Data */
2078 rc = dasd_eckd_read_conf(device);
2079 if (rc)
2080 goto out_err1;
2081
2082 /* set some default values */
2083 device->default_expires = DASD_EXPIRES;
2084 device->default_retries = DASD_RETRIES;
2085 device->path_thrhld = DASD_ECKD_PATH_THRHLD;
2086 device->path_interval = DASD_ECKD_PATH_INTERVAL;
2087
2088 if (private->gneq) {
2089 value = 1;
2090 for (i = 0; i < private->gneq->timeout.value; i++)
2091 value = 10 * value;
2092 value = value * private->gneq->timeout.number;
2093 /* do not accept useless values */
2094 if (value != 0 && value <= DASD_EXPIRES_MAX)
2095 device->default_expires = value;
2096 }
2097
2098 dasd_eckd_get_uid(device, &temp_uid);
2099 if (temp_uid.type == UA_BASE_DEVICE) {
2100 block = dasd_alloc_block();
2101 if (IS_ERR(block)) {
2102 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
2103 "could not allocate dasd "
2104 "block structure");
2105 rc = PTR_ERR(block);
2106 goto out_err1;
2107 }
2108 device->block = block;
2109 block->base = device;
2110 }
2111
2112 /* register lcu with alias handling, enable PAV */
2113 rc = dasd_alias_make_device_known_to_lcu(device);
2114 if (rc)
2115 goto out_err2;
2116
2117 dasd_eckd_validate_server(device, 0);
2118
2119 /* device may report different configuration data after LCU setup */
2120 rc = dasd_eckd_read_conf(device);
2121 if (rc)
2122 goto out_err3;
2123
2124 dasd_path_create_kobjects(device);
2125
2126 /* Read Feature Codes */
2127 dasd_eckd_read_features(device);
2128
2129 /* Read Volume Information */
2130 dasd_eckd_read_vol_info(device);
2131
2132 /* Read Extent Pool Information */
2133 dasd_eckd_read_ext_pool_info(device);
2134
2135 /* Read Device Characteristics */
2136 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
2137 &private->rdc_data, 64);
2138 if (rc) {
2139 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
2140 "Read device characteristic failed, rc=%d", rc);
2141 goto out_err3;
2142 }
2143
2144 if ((device->features & DASD_FEATURE_USERAW) &&
2145 !(private->rdc_data.facilities.RT_in_LR)) {
2146 dev_err(&device->cdev->dev, "The storage server does not "
2147 "support raw-track access\n");
2148 rc = -EINVAL;
2149 goto out_err3;
2150 }
2151
2152 /* find the valid cylinder size */
2153 if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
2154 private->rdc_data.long_no_cyl)
2155 private->real_cyl = private->rdc_data.long_no_cyl;
2156 else
2157 private->real_cyl = private->rdc_data.no_cyl;
2158
2159 private->fcx_max_data = get_fcx_max_data(device);
2160
2161 readonly = dasd_device_is_ro(device);
2162 if (readonly)
2163 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
2164
2165 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
2166 "with %d cylinders, %d heads, %d sectors%s\n",
2167 private->rdc_data.dev_type,
2168 private->rdc_data.dev_model,
2169 private->rdc_data.cu_type,
2170 private->rdc_data.cu_model.model,
2171 private->real_cyl,
2172 private->rdc_data.trk_per_cyl,
2173 private->rdc_data.sec_per_trk,
2174 readonly ? ", read-only device" : "");
2175 return 0;
2176
2177 out_err3:
2178 dasd_alias_disconnect_device_from_lcu(device);
2179 out_err2:
2180 dasd_free_block(device->block);
2181 device->block = NULL;
2182 out_err1:
2183 dasd_eckd_clear_conf_data(device);
2184 dasd_path_remove_kobjects(device);
2185 kfree(device->private);
2186 device->private = NULL;
2187 return rc;
2188 }
2189
dasd_eckd_uncheck_device(struct dasd_device * device)2190 static void dasd_eckd_uncheck_device(struct dasd_device *device)
2191 {
2192 struct dasd_eckd_private *private = device->private;
2193
2194 if (!private)
2195 return;
2196
2197 dasd_alias_disconnect_device_from_lcu(device);
2198 private->ned = NULL;
2199 private->sneq = NULL;
2200 private->vdsneq = NULL;
2201 private->gneq = NULL;
2202 dasd_eckd_clear_conf_data(device);
2203 dasd_path_remove_kobjects(device);
2204 }
2205
2206 static struct dasd_ccw_req *
dasd_eckd_analysis_ccw(struct dasd_device * device)2207 dasd_eckd_analysis_ccw(struct dasd_device *device)
2208 {
2209 struct dasd_eckd_private *private = device->private;
2210 struct eckd_count *count_data;
2211 struct LO_eckd_data *LO_data;
2212 struct dasd_ccw_req *cqr;
2213 struct ccw1 *ccw;
2214 int cplength, datasize;
2215 int i;
2216
2217 cplength = 8;
2218 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
2219 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2220 NULL);
2221 if (IS_ERR(cqr))
2222 return cqr;
2223 ccw = cqr->cpaddr;
2224 /* Define extent for the first 2 tracks. */
2225 define_extent(ccw++, cqr->data, 0, 1,
2226 DASD_ECKD_CCW_READ_COUNT, device, 0);
2227 LO_data = cqr->data + sizeof(struct DE_eckd_data);
2228 /* Locate record for the first 4 records on track 0. */
2229 ccw[-1].flags |= CCW_FLAG_CC;
2230 locate_record(ccw++, LO_data++, 0, 0, 4,
2231 DASD_ECKD_CCW_READ_COUNT, device, 0);
2232
2233 count_data = private->count_area;
2234 for (i = 0; i < 4; i++) {
2235 ccw[-1].flags |= CCW_FLAG_CC;
2236 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2237 ccw->flags = 0;
2238 ccw->count = 8;
2239 ccw->cda = (__u32)(addr_t) count_data;
2240 ccw++;
2241 count_data++;
2242 }
2243
2244 /* Locate record for the first record on track 1. */
2245 ccw[-1].flags |= CCW_FLAG_CC;
2246 locate_record(ccw++, LO_data++, 1, 0, 1,
2247 DASD_ECKD_CCW_READ_COUNT, device, 0);
2248 /* Read count ccw. */
2249 ccw[-1].flags |= CCW_FLAG_CC;
2250 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2251 ccw->flags = 0;
2252 ccw->count = 8;
2253 ccw->cda = (__u32)(addr_t) count_data;
2254
2255 cqr->block = NULL;
2256 cqr->startdev = device;
2257 cqr->memdev = device;
2258 cqr->retries = 255;
2259 cqr->buildclk = get_tod_clock();
2260 cqr->status = DASD_CQR_FILLED;
2261 /* Set flags to suppress output for expected errors */
2262 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2263
2264 return cqr;
2265 }
2266
2267 /* differentiate between 'no record found' and any other error */
dasd_eckd_analysis_evaluation(struct dasd_ccw_req * init_cqr)2268 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
2269 {
2270 char *sense;
2271 if (init_cqr->status == DASD_CQR_DONE)
2272 return INIT_CQR_OK;
2273 else if (init_cqr->status == DASD_CQR_NEED_ERP ||
2274 init_cqr->status == DASD_CQR_FAILED) {
2275 sense = dasd_get_sense(&init_cqr->irb);
2276 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
2277 return INIT_CQR_UNFORMATTED;
2278 else
2279 return INIT_CQR_ERROR;
2280 } else
2281 return INIT_CQR_ERROR;
2282 }
2283
2284 /*
2285 * This is the callback function for the init_analysis cqr. It saves
2286 * the status of the initial analysis ccw before it frees it and kicks
2287 * the device to continue the startup sequence. This will call
2288 * dasd_eckd_do_analysis again (if the devices has not been marked
2289 * for deletion in the meantime).
2290 */
dasd_eckd_analysis_callback(struct dasd_ccw_req * init_cqr,void * data)2291 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
2292 void *data)
2293 {
2294 struct dasd_device *device = init_cqr->startdev;
2295 struct dasd_eckd_private *private = device->private;
2296
2297 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
2298 dasd_sfree_request(init_cqr, device);
2299 dasd_kick_device(device);
2300 }
2301
dasd_eckd_start_analysis(struct dasd_block * block)2302 static int dasd_eckd_start_analysis(struct dasd_block *block)
2303 {
2304 struct dasd_ccw_req *init_cqr;
2305
2306 init_cqr = dasd_eckd_analysis_ccw(block->base);
2307 if (IS_ERR(init_cqr))
2308 return PTR_ERR(init_cqr);
2309 init_cqr->callback = dasd_eckd_analysis_callback;
2310 init_cqr->callback_data = NULL;
2311 init_cqr->expires = 5*HZ;
2312 /* first try without ERP, so we can later handle unformatted
2313 * devices as special case
2314 */
2315 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
2316 init_cqr->retries = 0;
2317 dasd_add_request_head(init_cqr);
2318 return -EAGAIN;
2319 }
2320
dasd_eckd_end_analysis(struct dasd_block * block)2321 static int dasd_eckd_end_analysis(struct dasd_block *block)
2322 {
2323 struct dasd_device *device = block->base;
2324 struct dasd_eckd_private *private = device->private;
2325 struct eckd_count *count_area;
2326 unsigned int sb, blk_per_trk;
2327 int status, i;
2328 struct dasd_ccw_req *init_cqr;
2329
2330 status = private->init_cqr_status;
2331 private->init_cqr_status = -1;
2332 if (status == INIT_CQR_ERROR) {
2333 /* try again, this time with full ERP */
2334 init_cqr = dasd_eckd_analysis_ccw(device);
2335 dasd_sleep_on(init_cqr);
2336 status = dasd_eckd_analysis_evaluation(init_cqr);
2337 dasd_sfree_request(init_cqr, device);
2338 }
2339
2340 if (device->features & DASD_FEATURE_USERAW) {
2341 block->bp_block = DASD_RAW_BLOCKSIZE;
2342 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
2343 block->s2b_shift = 3;
2344 goto raw;
2345 }
2346
2347 if (status == INIT_CQR_UNFORMATTED) {
2348 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
2349 return -EMEDIUMTYPE;
2350 } else if (status == INIT_CQR_ERROR) {
2351 dev_err(&device->cdev->dev,
2352 "Detecting the DASD disk layout failed because "
2353 "of an I/O error\n");
2354 return -EIO;
2355 }
2356
2357 private->uses_cdl = 1;
2358 /* Check Track 0 for Compatible Disk Layout */
2359 count_area = NULL;
2360 for (i = 0; i < 3; i++) {
2361 if (private->count_area[i].kl != 4 ||
2362 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
2363 private->count_area[i].cyl != 0 ||
2364 private->count_area[i].head != count_area_head[i] ||
2365 private->count_area[i].record != count_area_rec[i]) {
2366 private->uses_cdl = 0;
2367 break;
2368 }
2369 }
2370 if (i == 3)
2371 count_area = &private->count_area[3];
2372
2373 if (private->uses_cdl == 0) {
2374 for (i = 0; i < 5; i++) {
2375 if ((private->count_area[i].kl != 0) ||
2376 (private->count_area[i].dl !=
2377 private->count_area[0].dl) ||
2378 private->count_area[i].cyl != 0 ||
2379 private->count_area[i].head != count_area_head[i] ||
2380 private->count_area[i].record != count_area_rec[i])
2381 break;
2382 }
2383 if (i == 5)
2384 count_area = &private->count_area[0];
2385 } else {
2386 if (private->count_area[3].record == 1)
2387 dev_warn(&device->cdev->dev,
2388 "Track 0 has no records following the VTOC\n");
2389 }
2390
2391 if (count_area != NULL && count_area->kl == 0) {
2392 /* we found notthing violating our disk layout */
2393 if (dasd_check_blocksize(count_area->dl) == 0)
2394 block->bp_block = count_area->dl;
2395 }
2396 if (block->bp_block == 0) {
2397 dev_warn(&device->cdev->dev,
2398 "The disk layout of the DASD is not supported\n");
2399 return -EMEDIUMTYPE;
2400 }
2401 block->s2b_shift = 0; /* bits to shift 512 to get a block */
2402 for (sb = 512; sb < block->bp_block; sb = sb << 1)
2403 block->s2b_shift++;
2404
2405 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
2406
2407 raw:
2408 block->blocks = ((unsigned long) private->real_cyl *
2409 private->rdc_data.trk_per_cyl *
2410 blk_per_trk);
2411
2412 dev_info(&device->cdev->dev,
2413 "DASD with %u KB/block, %lu KB total size, %u KB/track, "
2414 "%s\n", (block->bp_block >> 10),
2415 (((unsigned long) private->real_cyl *
2416 private->rdc_data.trk_per_cyl *
2417 blk_per_trk * (block->bp_block >> 9)) >> 1),
2418 ((blk_per_trk * block->bp_block) >> 10),
2419 private->uses_cdl ?
2420 "compatible disk layout" : "linux disk layout");
2421
2422 return 0;
2423 }
2424
dasd_eckd_do_analysis(struct dasd_block * block)2425 static int dasd_eckd_do_analysis(struct dasd_block *block)
2426 {
2427 struct dasd_eckd_private *private = block->base->private;
2428
2429 if (private->init_cqr_status < 0)
2430 return dasd_eckd_start_analysis(block);
2431 else
2432 return dasd_eckd_end_analysis(block);
2433 }
2434
dasd_eckd_basic_to_ready(struct dasd_device * device)2435 static int dasd_eckd_basic_to_ready(struct dasd_device *device)
2436 {
2437 return dasd_alias_add_device(device);
2438 };
2439
dasd_eckd_online_to_ready(struct dasd_device * device)2440 static int dasd_eckd_online_to_ready(struct dasd_device *device)
2441 {
2442 if (cancel_work_sync(&device->reload_device))
2443 dasd_put_device(device);
2444 if (cancel_work_sync(&device->kick_validate))
2445 dasd_put_device(device);
2446
2447 return 0;
2448 };
2449
dasd_eckd_basic_to_known(struct dasd_device * device)2450 static int dasd_eckd_basic_to_known(struct dasd_device *device)
2451 {
2452 return dasd_alias_remove_device(device);
2453 };
2454
2455 static int
dasd_eckd_fill_geometry(struct dasd_block * block,struct hd_geometry * geo)2456 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2457 {
2458 struct dasd_eckd_private *private = block->base->private;
2459
2460 if (dasd_check_blocksize(block->bp_block) == 0) {
2461 geo->sectors = recs_per_track(&private->rdc_data,
2462 0, block->bp_block);
2463 }
2464 geo->cylinders = private->rdc_data.no_cyl;
2465 geo->heads = private->rdc_data.trk_per_cyl;
2466 return 0;
2467 }
2468
2469 /*
2470 * Build the TCW request for the format check
2471 */
2472 static struct dasd_ccw_req *
dasd_eckd_build_check_tcw(struct dasd_device * base,struct format_data_t * fdata,int enable_pav,struct eckd_count * fmt_buffer,int rpt)2473 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2474 int enable_pav, struct eckd_count *fmt_buffer,
2475 int rpt)
2476 {
2477 struct dasd_eckd_private *start_priv;
2478 struct dasd_device *startdev = NULL;
2479 struct tidaw *last_tidaw = NULL;
2480 struct dasd_ccw_req *cqr;
2481 struct itcw *itcw;
2482 int itcw_size;
2483 int count;
2484 int rc;
2485 int i;
2486
2487 if (enable_pav)
2488 startdev = dasd_alias_get_start_dev(base);
2489
2490 if (!startdev)
2491 startdev = base;
2492
2493 start_priv = startdev->private;
2494
2495 count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2496
2497 /*
2498 * we're adding 'count' amount of tidaw to the itcw.
2499 * calculate the corresponding itcw_size
2500 */
2501 itcw_size = itcw_calc_size(0, count, 0);
2502
2503 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2504 if (IS_ERR(cqr))
2505 return cqr;
2506
2507 start_priv->count++;
2508
2509 itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2510 if (IS_ERR(itcw)) {
2511 rc = -EINVAL;
2512 goto out_err;
2513 }
2514
2515 cqr->cpaddr = itcw_get_tcw(itcw);
2516 rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
2517 DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
2518 sizeof(struct eckd_count),
2519 count * sizeof(struct eckd_count), 0, rpt);
2520 if (rc)
2521 goto out_err;
2522
2523 for (i = 0; i < count; i++) {
2524 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
2525 sizeof(struct eckd_count));
2526 if (IS_ERR(last_tidaw)) {
2527 rc = -EINVAL;
2528 goto out_err;
2529 }
2530 }
2531
2532 last_tidaw->flags |= TIDAW_FLAGS_LAST;
2533 itcw_finalize(itcw);
2534
2535 cqr->cpmode = 1;
2536 cqr->startdev = startdev;
2537 cqr->memdev = startdev;
2538 cqr->basedev = base;
2539 cqr->retries = startdev->default_retries;
2540 cqr->expires = startdev->default_expires * HZ;
2541 cqr->buildclk = get_tod_clock();
2542 cqr->status = DASD_CQR_FILLED;
2543 /* Set flags to suppress output for expected errors */
2544 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2545 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2546
2547 return cqr;
2548
2549 out_err:
2550 dasd_sfree_request(cqr, startdev);
2551
2552 return ERR_PTR(rc);
2553 }
2554
2555 /*
2556 * Build the CCW request for the format check
2557 */
2558 static struct dasd_ccw_req *
dasd_eckd_build_check(struct dasd_device * base,struct format_data_t * fdata,int enable_pav,struct eckd_count * fmt_buffer,int rpt)2559 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2560 int enable_pav, struct eckd_count *fmt_buffer, int rpt)
2561 {
2562 struct dasd_eckd_private *start_priv;
2563 struct dasd_eckd_private *base_priv;
2564 struct dasd_device *startdev = NULL;
2565 struct dasd_ccw_req *cqr;
2566 struct ccw1 *ccw;
2567 void *data;
2568 int cplength, datasize;
2569 int use_prefix;
2570 int count;
2571 int i;
2572
2573 if (enable_pav)
2574 startdev = dasd_alias_get_start_dev(base);
2575
2576 if (!startdev)
2577 startdev = base;
2578
2579 start_priv = startdev->private;
2580 base_priv = base->private;
2581
2582 count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2583
2584 use_prefix = base_priv->features.feature[8] & 0x01;
2585
2586 if (use_prefix) {
2587 cplength = 1;
2588 datasize = sizeof(struct PFX_eckd_data);
2589 } else {
2590 cplength = 2;
2591 datasize = sizeof(struct DE_eckd_data) +
2592 sizeof(struct LO_eckd_data);
2593 }
2594 cplength += count;
2595
2596 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2597 if (IS_ERR(cqr))
2598 return cqr;
2599
2600 start_priv->count++;
2601 data = cqr->data;
2602 ccw = cqr->cpaddr;
2603
2604 if (use_prefix) {
2605 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
2606 DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
2607 count, 0, 0);
2608 } else {
2609 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
2610 DASD_ECKD_CCW_READ_COUNT, startdev, 0);
2611
2612 data += sizeof(struct DE_eckd_data);
2613 ccw[-1].flags |= CCW_FLAG_CC;
2614
2615 locate_record(ccw++, data, fdata->start_unit, 0, count,
2616 DASD_ECKD_CCW_READ_COUNT, base, 0);
2617 }
2618
2619 for (i = 0; i < count; i++) {
2620 ccw[-1].flags |= CCW_FLAG_CC;
2621 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2622 ccw->flags = CCW_FLAG_SLI;
2623 ccw->count = 8;
2624 ccw->cda = (__u32)(addr_t) fmt_buffer;
2625 ccw++;
2626 fmt_buffer++;
2627 }
2628
2629 cqr->startdev = startdev;
2630 cqr->memdev = startdev;
2631 cqr->basedev = base;
2632 cqr->retries = DASD_RETRIES;
2633 cqr->expires = startdev->default_expires * HZ;
2634 cqr->buildclk = get_tod_clock();
2635 cqr->status = DASD_CQR_FILLED;
2636 /* Set flags to suppress output for expected errors */
2637 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2638
2639 return cqr;
2640 }
2641
2642 static struct dasd_ccw_req *
dasd_eckd_build_format(struct dasd_device * base,struct dasd_device * startdev,struct format_data_t * fdata,int enable_pav)2643 dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
2644 struct format_data_t *fdata, int enable_pav)
2645 {
2646 struct dasd_eckd_private *base_priv;
2647 struct dasd_eckd_private *start_priv;
2648 struct dasd_ccw_req *fcp;
2649 struct eckd_count *ect;
2650 struct ch_t address;
2651 struct ccw1 *ccw;
2652 void *data;
2653 int rpt;
2654 int cplength, datasize;
2655 int i, j;
2656 int intensity = 0;
2657 int r0_perm;
2658 int nr_tracks;
2659 int use_prefix;
2660
2661 if (enable_pav)
2662 startdev = dasd_alias_get_start_dev(base);
2663
2664 if (!startdev)
2665 startdev = base;
2666
2667 start_priv = startdev->private;
2668 base_priv = base->private;
2669
2670 rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
2671
2672 nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
2673
2674 /*
2675 * fdata->intensity is a bit string that tells us what to do:
2676 * Bit 0: write record zero
2677 * Bit 1: write home address, currently not supported
2678 * Bit 2: invalidate tracks
2679 * Bit 3: use OS/390 compatible disk layout (cdl)
2680 * Bit 4: do not allow storage subsystem to modify record zero
2681 * Only some bit combinations do make sense.
2682 */
2683 if (fdata->intensity & 0x10) {
2684 r0_perm = 0;
2685 intensity = fdata->intensity & ~0x10;
2686 } else {
2687 r0_perm = 1;
2688 intensity = fdata->intensity;
2689 }
2690
2691 use_prefix = base_priv->features.feature[8] & 0x01;
2692
2693 switch (intensity) {
2694 case 0x00: /* Normal format */
2695 case 0x08: /* Normal format, use cdl. */
2696 cplength = 2 + (rpt*nr_tracks);
2697 if (use_prefix)
2698 datasize = sizeof(struct PFX_eckd_data) +
2699 sizeof(struct LO_eckd_data) +
2700 rpt * nr_tracks * sizeof(struct eckd_count);
2701 else
2702 datasize = sizeof(struct DE_eckd_data) +
2703 sizeof(struct LO_eckd_data) +
2704 rpt * nr_tracks * sizeof(struct eckd_count);
2705 break;
2706 case 0x01: /* Write record zero and format track. */
2707 case 0x09: /* Write record zero and format track, use cdl. */
2708 cplength = 2 + rpt * nr_tracks;
2709 if (use_prefix)
2710 datasize = sizeof(struct PFX_eckd_data) +
2711 sizeof(struct LO_eckd_data) +
2712 sizeof(struct eckd_count) +
2713 rpt * nr_tracks * sizeof(struct eckd_count);
2714 else
2715 datasize = sizeof(struct DE_eckd_data) +
2716 sizeof(struct LO_eckd_data) +
2717 sizeof(struct eckd_count) +
2718 rpt * nr_tracks * sizeof(struct eckd_count);
2719 break;
2720 case 0x04: /* Invalidate track. */
2721 case 0x0c: /* Invalidate track, use cdl. */
2722 cplength = 3;
2723 if (use_prefix)
2724 datasize = sizeof(struct PFX_eckd_data) +
2725 sizeof(struct LO_eckd_data) +
2726 sizeof(struct eckd_count);
2727 else
2728 datasize = sizeof(struct DE_eckd_data) +
2729 sizeof(struct LO_eckd_data) +
2730 sizeof(struct eckd_count);
2731 break;
2732 default:
2733 dev_warn(&startdev->cdev->dev,
2734 "An I/O control call used incorrect flags 0x%x\n",
2735 fdata->intensity);
2736 return ERR_PTR(-EINVAL);
2737 }
2738
2739 fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2740 if (IS_ERR(fcp))
2741 return fcp;
2742
2743 start_priv->count++;
2744 data = fcp->data;
2745 ccw = fcp->cpaddr;
2746
2747 switch (intensity & ~0x08) {
2748 case 0x00: /* Normal format. */
2749 if (use_prefix) {
2750 prefix(ccw++, (struct PFX_eckd_data *) data,
2751 fdata->start_unit, fdata->stop_unit,
2752 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2753 /* grant subsystem permission to format R0 */
2754 if (r0_perm)
2755 ((struct PFX_eckd_data *)data)
2756 ->define_extent.ga_extended |= 0x04;
2757 data += sizeof(struct PFX_eckd_data);
2758 } else {
2759 define_extent(ccw++, (struct DE_eckd_data *) data,
2760 fdata->start_unit, fdata->stop_unit,
2761 DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2762 /* grant subsystem permission to format R0 */
2763 if (r0_perm)
2764 ((struct DE_eckd_data *) data)
2765 ->ga_extended |= 0x04;
2766 data += sizeof(struct DE_eckd_data);
2767 }
2768 ccw[-1].flags |= CCW_FLAG_CC;
2769 locate_record(ccw++, (struct LO_eckd_data *) data,
2770 fdata->start_unit, 0, rpt*nr_tracks,
2771 DASD_ECKD_CCW_WRITE_CKD, base,
2772 fdata->blksize);
2773 data += sizeof(struct LO_eckd_data);
2774 break;
2775 case 0x01: /* Write record zero + format track. */
2776 if (use_prefix) {
2777 prefix(ccw++, (struct PFX_eckd_data *) data,
2778 fdata->start_unit, fdata->stop_unit,
2779 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2780 base, startdev);
2781 data += sizeof(struct PFX_eckd_data);
2782 } else {
2783 define_extent(ccw++, (struct DE_eckd_data *) data,
2784 fdata->start_unit, fdata->stop_unit,
2785 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
2786 data += sizeof(struct DE_eckd_data);
2787 }
2788 ccw[-1].flags |= CCW_FLAG_CC;
2789 locate_record(ccw++, (struct LO_eckd_data *) data,
2790 fdata->start_unit, 0, rpt * nr_tracks + 1,
2791 DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
2792 base->block->bp_block);
2793 data += sizeof(struct LO_eckd_data);
2794 break;
2795 case 0x04: /* Invalidate track. */
2796 if (use_prefix) {
2797 prefix(ccw++, (struct PFX_eckd_data *) data,
2798 fdata->start_unit, fdata->stop_unit,
2799 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2800 data += sizeof(struct PFX_eckd_data);
2801 } else {
2802 define_extent(ccw++, (struct DE_eckd_data *) data,
2803 fdata->start_unit, fdata->stop_unit,
2804 DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2805 data += sizeof(struct DE_eckd_data);
2806 }
2807 ccw[-1].flags |= CCW_FLAG_CC;
2808 locate_record(ccw++, (struct LO_eckd_data *) data,
2809 fdata->start_unit, 0, 1,
2810 DASD_ECKD_CCW_WRITE_CKD, base, 8);
2811 data += sizeof(struct LO_eckd_data);
2812 break;
2813 }
2814
2815 for (j = 0; j < nr_tracks; j++) {
2816 /* calculate cylinder and head for the current track */
2817 set_ch_t(&address,
2818 (fdata->start_unit + j) /
2819 base_priv->rdc_data.trk_per_cyl,
2820 (fdata->start_unit + j) %
2821 base_priv->rdc_data.trk_per_cyl);
2822 if (intensity & 0x01) { /* write record zero */
2823 ect = (struct eckd_count *) data;
2824 data += sizeof(struct eckd_count);
2825 ect->cyl = address.cyl;
2826 ect->head = address.head;
2827 ect->record = 0;
2828 ect->kl = 0;
2829 ect->dl = 8;
2830 ccw[-1].flags |= CCW_FLAG_CC;
2831 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
2832 ccw->flags = CCW_FLAG_SLI;
2833 ccw->count = 8;
2834 ccw->cda = (__u32)(addr_t) ect;
2835 ccw++;
2836 }
2837 if ((intensity & ~0x08) & 0x04) { /* erase track */
2838 ect = (struct eckd_count *) data;
2839 data += sizeof(struct eckd_count);
2840 ect->cyl = address.cyl;
2841 ect->head = address.head;
2842 ect->record = 1;
2843 ect->kl = 0;
2844 ect->dl = 0;
2845 ccw[-1].flags |= CCW_FLAG_CC;
2846 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
2847 ccw->flags = CCW_FLAG_SLI;
2848 ccw->count = 8;
2849 ccw->cda = (__u32)(addr_t) ect;
2850 } else { /* write remaining records */
2851 for (i = 0; i < rpt; i++) {
2852 ect = (struct eckd_count *) data;
2853 data += sizeof(struct eckd_count);
2854 ect->cyl = address.cyl;
2855 ect->head = address.head;
2856 ect->record = i + 1;
2857 ect->kl = 0;
2858 ect->dl = fdata->blksize;
2859 /*
2860 * Check for special tracks 0-1
2861 * when formatting CDL
2862 */
2863 if ((intensity & 0x08) &&
2864 address.cyl == 0 && address.head == 0) {
2865 if (i < 3) {
2866 ect->kl = 4;
2867 ect->dl = sizes_trk0[i] - 4;
2868 }
2869 }
2870 if ((intensity & 0x08) &&
2871 address.cyl == 0 && address.head == 1) {
2872 ect->kl = 44;
2873 ect->dl = LABEL_SIZE - 44;
2874 }
2875 ccw[-1].flags |= CCW_FLAG_CC;
2876 if (i != 0 || j == 0)
2877 ccw->cmd_code =
2878 DASD_ECKD_CCW_WRITE_CKD;
2879 else
2880 ccw->cmd_code =
2881 DASD_ECKD_CCW_WRITE_CKD_MT;
2882 ccw->flags = CCW_FLAG_SLI;
2883 ccw->count = 8;
2884 ccw->cda = (__u32)(addr_t) ect;
2885 ccw++;
2886 }
2887 }
2888 }
2889
2890 fcp->startdev = startdev;
2891 fcp->memdev = startdev;
2892 fcp->basedev = base;
2893 fcp->retries = 256;
2894 fcp->expires = startdev->default_expires * HZ;
2895 fcp->buildclk = get_tod_clock();
2896 fcp->status = DASD_CQR_FILLED;
2897
2898 return fcp;
2899 }
2900
2901 /*
2902 * Wrapper function to build a CCW request depending on input data
2903 */
2904 static struct dasd_ccw_req *
dasd_eckd_format_build_ccw_req(struct dasd_device * base,struct format_data_t * fdata,int enable_pav,int tpm,struct eckd_count * fmt_buffer,int rpt)2905 dasd_eckd_format_build_ccw_req(struct dasd_device *base,
2906 struct format_data_t *fdata, int enable_pav,
2907 int tpm, struct eckd_count *fmt_buffer, int rpt)
2908 {
2909 struct dasd_ccw_req *ccw_req;
2910
2911 if (!fmt_buffer) {
2912 ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
2913 } else {
2914 if (tpm)
2915 ccw_req = dasd_eckd_build_check_tcw(base, fdata,
2916 enable_pav,
2917 fmt_buffer, rpt);
2918 else
2919 ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
2920 fmt_buffer, rpt);
2921 }
2922
2923 return ccw_req;
2924 }
2925
2926 /*
2927 * Sanity checks on format_data
2928 */
dasd_eckd_format_sanity_checks(struct dasd_device * base,struct format_data_t * fdata)2929 static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
2930 struct format_data_t *fdata)
2931 {
2932 struct dasd_eckd_private *private = base->private;
2933
2934 if (fdata->start_unit >=
2935 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2936 dev_warn(&base->cdev->dev,
2937 "Start track number %u used in formatting is too big\n",
2938 fdata->start_unit);
2939 return -EINVAL;
2940 }
2941 if (fdata->stop_unit >=
2942 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2943 dev_warn(&base->cdev->dev,
2944 "Stop track number %u used in formatting is too big\n",
2945 fdata->stop_unit);
2946 return -EINVAL;
2947 }
2948 if (fdata->start_unit > fdata->stop_unit) {
2949 dev_warn(&base->cdev->dev,
2950 "Start track %u used in formatting exceeds end track\n",
2951 fdata->start_unit);
2952 return -EINVAL;
2953 }
2954 if (dasd_check_blocksize(fdata->blksize) != 0) {
2955 dev_warn(&base->cdev->dev,
2956 "The DASD cannot be formatted with block size %u\n",
2957 fdata->blksize);
2958 return -EINVAL;
2959 }
2960 return 0;
2961 }
2962
2963 /*
2964 * This function will process format_data originally coming from an IOCTL
2965 */
dasd_eckd_format_process_data(struct dasd_device * base,struct format_data_t * fdata,int enable_pav,int tpm,struct eckd_count * fmt_buffer,int rpt,struct irb * irb)2966 static int dasd_eckd_format_process_data(struct dasd_device *base,
2967 struct format_data_t *fdata,
2968 int enable_pav, int tpm,
2969 struct eckd_count *fmt_buffer, int rpt,
2970 struct irb *irb)
2971 {
2972 struct dasd_eckd_private *private = base->private;
2973 struct dasd_ccw_req *cqr, *n;
2974 struct list_head format_queue;
2975 struct dasd_device *device;
2976 char *sense = NULL;
2977 int old_start, old_stop, format_step;
2978 int step, retry;
2979 int rc;
2980
2981 rc = dasd_eckd_format_sanity_checks(base, fdata);
2982 if (rc)
2983 return rc;
2984
2985 INIT_LIST_HEAD(&format_queue);
2986
2987 old_start = fdata->start_unit;
2988 old_stop = fdata->stop_unit;
2989
2990 if (!tpm && fmt_buffer != NULL) {
2991 /* Command Mode / Format Check */
2992 format_step = 1;
2993 } else if (tpm && fmt_buffer != NULL) {
2994 /* Transport Mode / Format Check */
2995 format_step = DASD_CQR_MAX_CCW / rpt;
2996 } else {
2997 /* Normal Formatting */
2998 format_step = DASD_CQR_MAX_CCW /
2999 recs_per_track(&private->rdc_data, 0, fdata->blksize);
3000 }
3001
3002 do {
3003 retry = 0;
3004 while (fdata->start_unit <= old_stop) {
3005 step = fdata->stop_unit - fdata->start_unit + 1;
3006 if (step > format_step) {
3007 fdata->stop_unit =
3008 fdata->start_unit + format_step - 1;
3009 }
3010
3011 cqr = dasd_eckd_format_build_ccw_req(base, fdata,
3012 enable_pav, tpm,
3013 fmt_buffer, rpt);
3014 if (IS_ERR(cqr)) {
3015 rc = PTR_ERR(cqr);
3016 if (rc == -ENOMEM) {
3017 if (list_empty(&format_queue))
3018 goto out;
3019 /*
3020 * not enough memory available, start
3021 * requests retry after first requests
3022 * were finished
3023 */
3024 retry = 1;
3025 break;
3026 }
3027 goto out_err;
3028 }
3029 list_add_tail(&cqr->blocklist, &format_queue);
3030
3031 if (fmt_buffer) {
3032 step = fdata->stop_unit - fdata->start_unit + 1;
3033 fmt_buffer += rpt * step;
3034 }
3035 fdata->start_unit = fdata->stop_unit + 1;
3036 fdata->stop_unit = old_stop;
3037 }
3038
3039 rc = dasd_sleep_on_queue(&format_queue);
3040
3041 out_err:
3042 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
3043 device = cqr->startdev;
3044 private = device->private;
3045
3046 if (cqr->status == DASD_CQR_FAILED) {
3047 /*
3048 * Only get sense data if called by format
3049 * check
3050 */
3051 if (fmt_buffer && irb) {
3052 sense = dasd_get_sense(&cqr->irb);
3053 memcpy(irb, &cqr->irb, sizeof(*irb));
3054 }
3055 rc = -EIO;
3056 }
3057 list_del_init(&cqr->blocklist);
3058 dasd_ffree_request(cqr, device);
3059 private->count--;
3060 }
3061
3062 if (rc && rc != -EIO)
3063 goto out;
3064 if (rc == -EIO) {
3065 /*
3066 * In case fewer than the expected records are on the
3067 * track, we will most likely get a 'No Record Found'
3068 * error (in command mode) or a 'File Protected' error
3069 * (in transport mode). Those particular cases shouldn't
3070 * pass the -EIO to the IOCTL, therefore reset the rc
3071 * and continue.
3072 */
3073 if (sense &&
3074 (sense[1] & SNS1_NO_REC_FOUND ||
3075 sense[1] & SNS1_FILE_PROTECTED))
3076 retry = 1;
3077 else
3078 goto out;
3079 }
3080
3081 } while (retry);
3082
3083 out:
3084 fdata->start_unit = old_start;
3085 fdata->stop_unit = old_stop;
3086
3087 return rc;
3088 }
3089
dasd_eckd_format_device(struct dasd_device * base,struct format_data_t * fdata,int enable_pav)3090 static int dasd_eckd_format_device(struct dasd_device *base,
3091 struct format_data_t *fdata, int enable_pav)
3092 {
3093 return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
3094 0, NULL);
3095 }
3096
test_and_set_format_track(struct dasd_format_entry * to_format,struct dasd_ccw_req * cqr)3097 static bool test_and_set_format_track(struct dasd_format_entry *to_format,
3098 struct dasd_ccw_req *cqr)
3099 {
3100 struct dasd_block *block = cqr->block;
3101 struct dasd_format_entry *format;
3102 unsigned long flags;
3103 bool rc = false;
3104
3105 spin_lock_irqsave(&block->format_lock, flags);
3106 if (cqr->trkcount != atomic_read(&block->trkcount)) {
3107 /*
3108 * The number of formatted tracks has changed after request
3109 * start and we can not tell if the current track was involved.
3110 * To avoid data corruption treat it as if the current track is
3111 * involved
3112 */
3113 rc = true;
3114 goto out;
3115 }
3116 list_for_each_entry(format, &block->format_list, list) {
3117 if (format->track == to_format->track) {
3118 rc = true;
3119 goto out;
3120 }
3121 }
3122 list_add_tail(&to_format->list, &block->format_list);
3123
3124 out:
3125 spin_unlock_irqrestore(&block->format_lock, flags);
3126 return rc;
3127 }
3128
clear_format_track(struct dasd_format_entry * format,struct dasd_block * block)3129 static void clear_format_track(struct dasd_format_entry *format,
3130 struct dasd_block *block)
3131 {
3132 unsigned long flags;
3133
3134 spin_lock_irqsave(&block->format_lock, flags);
3135 atomic_inc(&block->trkcount);
3136 list_del_init(&format->list);
3137 spin_unlock_irqrestore(&block->format_lock, flags);
3138 }
3139
3140 /*
3141 * Callback function to free ESE format requests.
3142 */
dasd_eckd_ese_format_cb(struct dasd_ccw_req * cqr,void * data)3143 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
3144 {
3145 struct dasd_device *device = cqr->startdev;
3146 struct dasd_eckd_private *private = device->private;
3147 struct dasd_format_entry *format = data;
3148
3149 clear_format_track(format, cqr->basedev->block);
3150 private->count--;
3151 dasd_ffree_request(cqr, device);
3152 }
3153
3154 static struct dasd_ccw_req *
dasd_eckd_ese_format(struct dasd_device * startdev,struct dasd_ccw_req * cqr,struct irb * irb)3155 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3156 struct irb *irb)
3157 {
3158 struct dasd_eckd_private *private;
3159 struct dasd_format_entry *format;
3160 struct format_data_t fdata;
3161 unsigned int recs_per_trk;
3162 struct dasd_ccw_req *fcqr;
3163 struct dasd_device *base;
3164 struct dasd_block *block;
3165 unsigned int blksize;
3166 struct request *req;
3167 sector_t first_trk;
3168 sector_t last_trk;
3169 sector_t curr_trk;
3170 int rc;
3171
3172 req = dasd_get_callback_data(cqr);
3173 block = cqr->block;
3174 base = block->base;
3175 private = base->private;
3176 blksize = block->bp_block;
3177 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3178 format = &startdev->format_entry;
3179
3180 first_trk = blk_rq_pos(req) >> block->s2b_shift;
3181 sector_div(first_trk, recs_per_trk);
3182 last_trk =
3183 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3184 sector_div(last_trk, recs_per_trk);
3185 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3186 if (rc)
3187 return ERR_PTR(rc);
3188
3189 if (curr_trk < first_trk || curr_trk > last_trk) {
3190 DBF_DEV_EVENT(DBF_WARNING, startdev,
3191 "ESE error track %llu not within range %llu - %llu\n",
3192 curr_trk, first_trk, last_trk);
3193 return ERR_PTR(-EINVAL);
3194 }
3195 format->track = curr_trk;
3196 /* test if track is already in formatting by another thread */
3197 if (test_and_set_format_track(format, cqr)) {
3198 /* this is no real error so do not count down retries */
3199 cqr->retries++;
3200 return ERR_PTR(-EEXIST);
3201 }
3202
3203 fdata.start_unit = curr_trk;
3204 fdata.stop_unit = curr_trk;
3205 fdata.blksize = blksize;
3206 fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
3207
3208 rc = dasd_eckd_format_sanity_checks(base, &fdata);
3209 if (rc)
3210 return ERR_PTR(-EINVAL);
3211
3212 /*
3213 * We're building the request with PAV disabled as we're reusing
3214 * the former startdev.
3215 */
3216 fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
3217 if (IS_ERR(fcqr))
3218 return fcqr;
3219
3220 fcqr->callback = dasd_eckd_ese_format_cb;
3221 fcqr->callback_data = (void *) format;
3222
3223 return fcqr;
3224 }
3225
3226 /*
3227 * When data is read from an unformatted area of an ESE volume, this function
3228 * returns zeroed data and thereby mimics a read of zero data.
3229 *
3230 * The first unformatted track is the one that got the NRF error, the address is
3231 * encoded in the sense data.
3232 *
3233 * All tracks before have returned valid data and should not be touched.
3234 * All tracks after the unformatted track might be formatted or not. This is
3235 * currently not known, remember the processed data and return the remainder of
3236 * the request to the blocklayer in __dasd_cleanup_cqr().
3237 */
dasd_eckd_ese_read(struct dasd_ccw_req * cqr,struct irb * irb)3238 static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
3239 {
3240 struct dasd_eckd_private *private;
3241 sector_t first_trk, last_trk;
3242 sector_t first_blk, last_blk;
3243 unsigned int blksize, off;
3244 unsigned int recs_per_trk;
3245 struct dasd_device *base;
3246 struct req_iterator iter;
3247 struct dasd_block *block;
3248 unsigned int skip_block;
3249 unsigned int blk_count;
3250 struct request *req;
3251 struct bio_vec bv;
3252 sector_t curr_trk;
3253 sector_t end_blk;
3254 char *dst;
3255 int rc;
3256
3257 req = (struct request *) cqr->callback_data;
3258 base = cqr->block->base;
3259 blksize = base->block->bp_block;
3260 block = cqr->block;
3261 private = base->private;
3262 skip_block = 0;
3263 blk_count = 0;
3264
3265 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3266 first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
3267 sector_div(first_trk, recs_per_trk);
3268 last_trk = last_blk =
3269 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3270 sector_div(last_trk, recs_per_trk);
3271 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3272 if (rc)
3273 return rc;
3274
3275 /* sanity check if the current track from sense data is valid */
3276 if (curr_trk < first_trk || curr_trk > last_trk) {
3277 DBF_DEV_EVENT(DBF_WARNING, base,
3278 "ESE error track %llu not within range %llu - %llu\n",
3279 curr_trk, first_trk, last_trk);
3280 return -EINVAL;
3281 }
3282
3283 /*
3284 * if not the first track got the NRF error we have to skip over valid
3285 * blocks
3286 */
3287 if (curr_trk != first_trk)
3288 skip_block = curr_trk * recs_per_trk - first_blk;
3289
3290 /* we have no information beyond the current track */
3291 end_blk = (curr_trk + 1) * recs_per_trk;
3292
3293 rq_for_each_segment(bv, req, iter) {
3294 dst = bvec_virt(&bv);
3295 for (off = 0; off < bv.bv_len; off += blksize) {
3296 if (first_blk + blk_count >= end_blk) {
3297 cqr->proc_bytes = blk_count * blksize;
3298 return 0;
3299 }
3300 if (dst && !skip_block)
3301 memset(dst, 0, blksize);
3302 else
3303 skip_block--;
3304 dst += blksize;
3305 blk_count++;
3306 }
3307 }
3308 return 0;
3309 }
3310
3311 /*
3312 * Helper function to count consecutive records of a single track.
3313 */
dasd_eckd_count_records(struct eckd_count * fmt_buffer,int start,int max)3314 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
3315 int max)
3316 {
3317 int head;
3318 int i;
3319
3320 head = fmt_buffer[start].head;
3321
3322 /*
3323 * There are 3 conditions where we stop counting:
3324 * - if data reoccurs (same head and record may reoccur), which may
3325 * happen due to the way DASD_ECKD_CCW_READ_COUNT works
3326 * - when the head changes, because we're iterating over several tracks
3327 * then (DASD_ECKD_CCW_READ_COUNT_MT)
3328 * - when we've reached the end of sensible data in the buffer (the
3329 * record will be 0 then)
3330 */
3331 for (i = start; i < max; i++) {
3332 if (i > start) {
3333 if ((fmt_buffer[i].head == head &&
3334 fmt_buffer[i].record == 1) ||
3335 fmt_buffer[i].head != head ||
3336 fmt_buffer[i].record == 0)
3337 break;
3338 }
3339 }
3340
3341 return i - start;
3342 }
3343
3344 /*
3345 * Evaluate a given range of tracks. Data like number of records, blocksize,
3346 * record ids, and key length are compared with expected data.
3347 *
3348 * If a mismatch occurs, the corresponding error bit is set, as well as
3349 * additional information, depending on the error.
3350 */
dasd_eckd_format_evaluate_tracks(struct eckd_count * fmt_buffer,struct format_check_t * cdata,int rpt_max,int rpt_exp,int trk_per_cyl,int tpm)3351 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
3352 struct format_check_t *cdata,
3353 int rpt_max, int rpt_exp,
3354 int trk_per_cyl, int tpm)
3355 {
3356 struct ch_t geo;
3357 int max_entries;
3358 int count = 0;
3359 int trkcount;
3360 int blksize;
3361 int pos = 0;
3362 int i, j;
3363 int kl;
3364
3365 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3366 max_entries = trkcount * rpt_max;
3367
3368 for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
3369 /* Calculate the correct next starting position in the buffer */
3370 if (tpm) {
3371 while (fmt_buffer[pos].record == 0 &&
3372 fmt_buffer[pos].dl == 0) {
3373 if (pos++ > max_entries)
3374 break;
3375 }
3376 } else {
3377 if (i != cdata->expect.start_unit)
3378 pos += rpt_max - count;
3379 }
3380
3381 /* Calculate the expected geo values for the current track */
3382 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
3383
3384 /* Count and check number of records */
3385 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
3386
3387 if (count < rpt_exp) {
3388 cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
3389 break;
3390 }
3391 if (count > rpt_exp) {
3392 cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
3393 break;
3394 }
3395
3396 for (j = 0; j < count; j++, pos++) {
3397 blksize = cdata->expect.blksize;
3398 kl = 0;
3399
3400 /*
3401 * Set special values when checking CDL formatted
3402 * devices.
3403 */
3404 if ((cdata->expect.intensity & 0x08) &&
3405 geo.cyl == 0 && geo.head == 0) {
3406 if (j < 3) {
3407 blksize = sizes_trk0[j] - 4;
3408 kl = 4;
3409 }
3410 }
3411 if ((cdata->expect.intensity & 0x08) &&
3412 geo.cyl == 0 && geo.head == 1) {
3413 blksize = LABEL_SIZE - 44;
3414 kl = 44;
3415 }
3416
3417 /* Check blocksize */
3418 if (fmt_buffer[pos].dl != blksize) {
3419 cdata->result = DASD_FMT_ERR_BLKSIZE;
3420 goto out;
3421 }
3422 /* Check if key length is 0 */
3423 if (fmt_buffer[pos].kl != kl) {
3424 cdata->result = DASD_FMT_ERR_KEY_LENGTH;
3425 goto out;
3426 }
3427 /* Check if record_id is correct */
3428 if (fmt_buffer[pos].cyl != geo.cyl ||
3429 fmt_buffer[pos].head != geo.head ||
3430 fmt_buffer[pos].record != (j + 1)) {
3431 cdata->result = DASD_FMT_ERR_RECORD_ID;
3432 goto out;
3433 }
3434 }
3435 }
3436
3437 out:
3438 /*
3439 * In case of no errors, we need to decrease by one
3440 * to get the correct positions.
3441 */
3442 if (!cdata->result) {
3443 i--;
3444 pos--;
3445 }
3446
3447 cdata->unit = i;
3448 cdata->num_records = count;
3449 cdata->rec = fmt_buffer[pos].record;
3450 cdata->blksize = fmt_buffer[pos].dl;
3451 cdata->key_length = fmt_buffer[pos].kl;
3452 }
3453
3454 /*
3455 * Check the format of a range of tracks of a DASD.
3456 */
dasd_eckd_check_device_format(struct dasd_device * base,struct format_check_t * cdata,int enable_pav)3457 static int dasd_eckd_check_device_format(struct dasd_device *base,
3458 struct format_check_t *cdata,
3459 int enable_pav)
3460 {
3461 struct dasd_eckd_private *private = base->private;
3462 struct eckd_count *fmt_buffer;
3463 struct irb irb;
3464 int rpt_max, rpt_exp;
3465 int fmt_buffer_size;
3466 int trk_per_cyl;
3467 int trkcount;
3468 int tpm = 0;
3469 int rc;
3470
3471 trk_per_cyl = private->rdc_data.trk_per_cyl;
3472
3473 /* Get maximum and expected amount of records per track */
3474 rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
3475 rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
3476
3477 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3478 fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
3479
3480 fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
3481 if (!fmt_buffer)
3482 return -ENOMEM;
3483
3484 /*
3485 * A certain FICON feature subset is needed to operate in transport
3486 * mode. Additionally, the support for transport mode is implicitly
3487 * checked by comparing the buffer size with fcx_max_data. As long as
3488 * the buffer size is smaller we can operate in transport mode and
3489 * process multiple tracks. If not, only one track at once is being
3490 * processed using command mode.
3491 */
3492 if ((private->features.feature[40] & 0x04) &&
3493 fmt_buffer_size <= private->fcx_max_data)
3494 tpm = 1;
3495
3496 rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
3497 tpm, fmt_buffer, rpt_max, &irb);
3498 if (rc && rc != -EIO)
3499 goto out;
3500 if (rc == -EIO) {
3501 /*
3502 * If our first attempt with transport mode enabled comes back
3503 * with an incorrect length error, we're going to retry the
3504 * check with command mode.
3505 */
3506 if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
3507 tpm = 0;
3508 rc = dasd_eckd_format_process_data(base, &cdata->expect,
3509 enable_pav, tpm,
3510 fmt_buffer, rpt_max,
3511 &irb);
3512 if (rc)
3513 goto out;
3514 } else {
3515 goto out;
3516 }
3517 }
3518
3519 dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
3520 trk_per_cyl, tpm);
3521
3522 out:
3523 kfree(fmt_buffer);
3524
3525 return rc;
3526 }
3527
dasd_eckd_handle_terminated_request(struct dasd_ccw_req * cqr)3528 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3529 {
3530 if (cqr->retries < 0) {
3531 cqr->status = DASD_CQR_FAILED;
3532 return;
3533 }
3534 cqr->status = DASD_CQR_FILLED;
3535 if (cqr->block && (cqr->startdev != cqr->block->base)) {
3536 dasd_eckd_reset_ccw_to_base_io(cqr);
3537 cqr->startdev = cqr->block->base;
3538 cqr->lpm = dasd_path_get_opm(cqr->block->base);
3539 }
3540 };
3541
3542 static dasd_erp_fn_t
dasd_eckd_erp_action(struct dasd_ccw_req * cqr)3543 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3544 {
3545 struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3546 struct ccw_device *cdev = device->cdev;
3547
3548 switch (cdev->id.cu_type) {
3549 case 0x3990:
3550 case 0x2105:
3551 case 0x2107:
3552 case 0x1750:
3553 return dasd_3990_erp_action;
3554 case 0x9343:
3555 case 0x3880:
3556 default:
3557 return dasd_default_erp_action;
3558 }
3559 }
3560
3561 static dasd_erp_fn_t
dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)3562 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3563 {
3564 return dasd_default_erp_postaction;
3565 }
3566
dasd_eckd_check_for_device_change(struct dasd_device * device,struct dasd_ccw_req * cqr,struct irb * irb)3567 static void dasd_eckd_check_for_device_change(struct dasd_device *device,
3568 struct dasd_ccw_req *cqr,
3569 struct irb *irb)
3570 {
3571 char mask;
3572 char *sense = NULL;
3573 struct dasd_eckd_private *private = device->private;
3574
3575 /* first of all check for state change pending interrupt */
3576 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
3577 if ((scsw_dstat(&irb->scsw) & mask) == mask) {
3578 /*
3579 * for alias only, not in offline processing
3580 * and only if not suspended
3581 */
3582 if (!device->block && private->lcu &&
3583 device->state == DASD_STATE_ONLINE &&
3584 !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3585 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
3586 /* schedule worker to reload device */
3587 dasd_reload_device(device);
3588 }
3589 dasd_generic_handle_state_change(device);
3590 return;
3591 }
3592
3593 sense = dasd_get_sense(irb);
3594 if (!sense)
3595 return;
3596
3597 /* summary unit check */
3598 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
3599 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
3600 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
3601 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3602 "eckd suc: device already notified");
3603 return;
3604 }
3605 sense = dasd_get_sense(irb);
3606 if (!sense) {
3607 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3608 "eckd suc: no reason code available");
3609 clear_bit(DASD_FLAG_SUC, &device->flags);
3610 return;
3611
3612 }
3613 private->suc_reason = sense[8];
3614 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
3615 "eckd handle summary unit check: reason",
3616 private->suc_reason);
3617 dasd_get_device(device);
3618 if (!schedule_work(&device->suc_work))
3619 dasd_put_device(device);
3620
3621 return;
3622 }
3623
3624 /* service information message SIM */
3625 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3626 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
3627 dasd_3990_erp_handle_sim(device, sense);
3628 return;
3629 }
3630
3631 /* loss of device reservation is handled via base devices only
3632 * as alias devices may be used with several bases
3633 */
3634 if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3635 (sense[7] == 0x3F) &&
3636 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
3637 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
3638 if (device->features & DASD_FEATURE_FAILONSLCK)
3639 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
3640 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3641 dev_err(&device->cdev->dev,
3642 "The device reservation was lost\n");
3643 }
3644 }
3645
dasd_eckd_ras_sanity_checks(struct dasd_device * device,unsigned int first_trk,unsigned int last_trk)3646 static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
3647 unsigned int first_trk,
3648 unsigned int last_trk)
3649 {
3650 struct dasd_eckd_private *private = device->private;
3651 unsigned int trks_per_vol;
3652 int rc = 0;
3653
3654 trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
3655
3656 if (first_trk >= trks_per_vol) {
3657 dev_warn(&device->cdev->dev,
3658 "Start track number %u used in the space release command is too big\n",
3659 first_trk);
3660 rc = -EINVAL;
3661 } else if (last_trk >= trks_per_vol) {
3662 dev_warn(&device->cdev->dev,
3663 "Stop track number %u used in the space release command is too big\n",
3664 last_trk);
3665 rc = -EINVAL;
3666 } else if (first_trk > last_trk) {
3667 dev_warn(&device->cdev->dev,
3668 "Start track %u used in the space release command exceeds the end track\n",
3669 first_trk);
3670 rc = -EINVAL;
3671 }
3672 return rc;
3673 }
3674
3675 /*
3676 * Helper function to count the amount of involved extents within a given range
3677 * with extent alignment in mind.
3678 */
count_exts(unsigned int from,unsigned int to,int trks_per_ext)3679 static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
3680 {
3681 int cur_pos = 0;
3682 int count = 0;
3683 int tmp;
3684
3685 if (from == to)
3686 return 1;
3687
3688 /* Count first partial extent */
3689 if (from % trks_per_ext != 0) {
3690 tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
3691 if (tmp > to)
3692 tmp = to;
3693 cur_pos = tmp - from + 1;
3694 count++;
3695 }
3696 /* Count full extents */
3697 if (to - (from + cur_pos) + 1 >= trks_per_ext) {
3698 tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
3699 count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
3700 cur_pos = tmp;
3701 }
3702 /* Count last partial extent */
3703 if (cur_pos < to)
3704 count++;
3705
3706 return count;
3707 }
3708
3709 /*
3710 * Release allocated space for a given range or an entire volume.
3711 */
3712 static struct dasd_ccw_req *
dasd_eckd_dso_ras(struct dasd_device * device,struct dasd_block * block,struct request * req,unsigned int first_trk,unsigned int last_trk,int by_extent)3713 dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
3714 struct request *req, unsigned int first_trk,
3715 unsigned int last_trk, int by_extent)
3716 {
3717 struct dasd_eckd_private *private = device->private;
3718 struct dasd_dso_ras_ext_range *ras_range;
3719 struct dasd_rssd_features *features;
3720 struct dasd_dso_ras_data *ras_data;
3721 u16 heads, beg_head, end_head;
3722 int cur_to_trk, cur_from_trk;
3723 struct dasd_ccw_req *cqr;
3724 u32 beg_cyl, end_cyl;
3725 struct ccw1 *ccw;
3726 int trks_per_ext;
3727 size_t ras_size;
3728 size_t size;
3729 int nr_exts;
3730 void *rq;
3731 int i;
3732
3733 if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
3734 return ERR_PTR(-EINVAL);
3735
3736 rq = req ? blk_mq_rq_to_pdu(req) : NULL;
3737
3738 features = &private->features;
3739
3740 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3741 nr_exts = 0;
3742 if (by_extent)
3743 nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
3744 ras_size = sizeof(*ras_data);
3745 size = ras_size + (nr_exts * sizeof(*ras_range));
3746
3747 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
3748 if (IS_ERR(cqr)) {
3749 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3750 "Could not allocate RAS request");
3751 return cqr;
3752 }
3753
3754 ras_data = cqr->data;
3755 memset(ras_data, 0, size);
3756
3757 ras_data->order = DSO_ORDER_RAS;
3758 ras_data->flags.vol_type = 0; /* CKD volume */
3759 /* Release specified extents or entire volume */
3760 ras_data->op_flags.by_extent = by_extent;
3761 /*
3762 * This bit guarantees initialisation of tracks within an extent that is
3763 * not fully specified, but is only supported with a certain feature
3764 * subset.
3765 */
3766 ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
3767 ras_data->lss = private->ned->ID;
3768 ras_data->dev_addr = private->ned->unit_addr;
3769 ras_data->nr_exts = nr_exts;
3770
3771 if (by_extent) {
3772 heads = private->rdc_data.trk_per_cyl;
3773 cur_from_trk = first_trk;
3774 cur_to_trk = first_trk + trks_per_ext -
3775 (first_trk % trks_per_ext) - 1;
3776 if (cur_to_trk > last_trk)
3777 cur_to_trk = last_trk;
3778 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
3779
3780 for (i = 0; i < nr_exts; i++) {
3781 beg_cyl = cur_from_trk / heads;
3782 beg_head = cur_from_trk % heads;
3783 end_cyl = cur_to_trk / heads;
3784 end_head = cur_to_trk % heads;
3785
3786 set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
3787 set_ch_t(&ras_range->end_ext, end_cyl, end_head);
3788
3789 cur_from_trk = cur_to_trk + 1;
3790 cur_to_trk = cur_from_trk + trks_per_ext - 1;
3791 if (cur_to_trk > last_trk)
3792 cur_to_trk = last_trk;
3793 ras_range++;
3794 }
3795 }
3796
3797 ccw = cqr->cpaddr;
3798 ccw->cda = (__u32)(addr_t)cqr->data;
3799 ccw->cmd_code = DASD_ECKD_CCW_DSO;
3800 ccw->count = size;
3801
3802 cqr->startdev = device;
3803 cqr->memdev = device;
3804 cqr->block = block;
3805 cqr->retries = 256;
3806 cqr->expires = device->default_expires * HZ;
3807 cqr->buildclk = get_tod_clock();
3808 cqr->status = DASD_CQR_FILLED;
3809
3810 return cqr;
3811 }
3812
dasd_eckd_release_space_full(struct dasd_device * device)3813 static int dasd_eckd_release_space_full(struct dasd_device *device)
3814 {
3815 struct dasd_ccw_req *cqr;
3816 int rc;
3817
3818 cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
3819 if (IS_ERR(cqr))
3820 return PTR_ERR(cqr);
3821
3822 rc = dasd_sleep_on_interruptible(cqr);
3823
3824 dasd_sfree_request(cqr, cqr->memdev);
3825
3826 return rc;
3827 }
3828
dasd_eckd_release_space_trks(struct dasd_device * device,unsigned int from,unsigned int to)3829 static int dasd_eckd_release_space_trks(struct dasd_device *device,
3830 unsigned int from, unsigned int to)
3831 {
3832 struct dasd_eckd_private *private = device->private;
3833 struct dasd_block *block = device->block;
3834 struct dasd_ccw_req *cqr, *n;
3835 struct list_head ras_queue;
3836 unsigned int device_exts;
3837 int trks_per_ext;
3838 int stop, step;
3839 int cur_pos;
3840 int rc = 0;
3841 int retry;
3842
3843 INIT_LIST_HEAD(&ras_queue);
3844
3845 device_exts = private->real_cyl / dasd_eckd_ext_size(device);
3846 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3847
3848 /* Make sure device limits are not exceeded */
3849 step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
3850 cur_pos = from;
3851
3852 do {
3853 retry = 0;
3854 while (cur_pos < to) {
3855 stop = cur_pos + step -
3856 ((cur_pos + step) % trks_per_ext) - 1;
3857 if (stop > to)
3858 stop = to;
3859
3860 cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
3861 if (IS_ERR(cqr)) {
3862 rc = PTR_ERR(cqr);
3863 if (rc == -ENOMEM) {
3864 if (list_empty(&ras_queue))
3865 goto out;
3866 retry = 1;
3867 break;
3868 }
3869 goto err_out;
3870 }
3871
3872 spin_lock_irq(&block->queue_lock);
3873 list_add_tail(&cqr->blocklist, &ras_queue);
3874 spin_unlock_irq(&block->queue_lock);
3875 cur_pos = stop + 1;
3876 }
3877
3878 rc = dasd_sleep_on_queue_interruptible(&ras_queue);
3879
3880 err_out:
3881 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
3882 device = cqr->startdev;
3883 private = device->private;
3884
3885 spin_lock_irq(&block->queue_lock);
3886 list_del_init(&cqr->blocklist);
3887 spin_unlock_irq(&block->queue_lock);
3888 dasd_sfree_request(cqr, device);
3889 private->count--;
3890 }
3891 } while (retry);
3892
3893 out:
3894 return rc;
3895 }
3896
dasd_eckd_release_space(struct dasd_device * device,struct format_data_t * rdata)3897 static int dasd_eckd_release_space(struct dasd_device *device,
3898 struct format_data_t *rdata)
3899 {
3900 if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
3901 return dasd_eckd_release_space_full(device);
3902 else if (rdata->intensity == 0)
3903 return dasd_eckd_release_space_trks(device, rdata->start_unit,
3904 rdata->stop_unit);
3905 else
3906 return -EINVAL;
3907 }
3908
dasd_eckd_build_cp_cmd_single(struct dasd_device * startdev,struct dasd_block * block,struct request * req,sector_t first_rec,sector_t last_rec,sector_t first_trk,sector_t last_trk,unsigned int first_offs,unsigned int last_offs,unsigned int blk_per_trk,unsigned int blksize)3909 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3910 struct dasd_device *startdev,
3911 struct dasd_block *block,
3912 struct request *req,
3913 sector_t first_rec,
3914 sector_t last_rec,
3915 sector_t first_trk,
3916 sector_t last_trk,
3917 unsigned int first_offs,
3918 unsigned int last_offs,
3919 unsigned int blk_per_trk,
3920 unsigned int blksize)
3921 {
3922 struct dasd_eckd_private *private;
3923 unsigned long *idaws;
3924 struct LO_eckd_data *LO_data;
3925 struct dasd_ccw_req *cqr;
3926 struct ccw1 *ccw;
3927 struct req_iterator iter;
3928 struct bio_vec bv;
3929 char *dst;
3930 unsigned int off;
3931 int count, cidaw, cplength, datasize;
3932 sector_t recid;
3933 unsigned char cmd, rcmd;
3934 int use_prefix;
3935 struct dasd_device *basedev;
3936
3937 basedev = block->base;
3938 private = basedev->private;
3939 if (rq_data_dir(req) == READ)
3940 cmd = DASD_ECKD_CCW_READ_MT;
3941 else if (rq_data_dir(req) == WRITE)
3942 cmd = DASD_ECKD_CCW_WRITE_MT;
3943 else
3944 return ERR_PTR(-EINVAL);
3945
3946 /* Check struct bio and count the number of blocks for the request. */
3947 count = 0;
3948 cidaw = 0;
3949 rq_for_each_segment(bv, req, iter) {
3950 if (bv.bv_len & (blksize - 1))
3951 /* Eckd can only do full blocks. */
3952 return ERR_PTR(-EINVAL);
3953 count += bv.bv_len >> (block->s2b_shift + 9);
3954 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
3955 cidaw += bv.bv_len >> (block->s2b_shift + 9);
3956 }
3957 /* Paranoia. */
3958 if (count != last_rec - first_rec + 1)
3959 return ERR_PTR(-EINVAL);
3960
3961 /* use the prefix command if available */
3962 use_prefix = private->features.feature[8] & 0x01;
3963 if (use_prefix) {
3964 /* 1x prefix + number of blocks */
3965 cplength = 2 + count;
3966 /* 1x prefix + cidaws*sizeof(long) */
3967 datasize = sizeof(struct PFX_eckd_data) +
3968 sizeof(struct LO_eckd_data) +
3969 cidaw * sizeof(unsigned long);
3970 } else {
3971 /* 1x define extent + 1x locate record + number of blocks */
3972 cplength = 2 + count;
3973 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
3974 datasize = sizeof(struct DE_eckd_data) +
3975 sizeof(struct LO_eckd_data) +
3976 cidaw * sizeof(unsigned long);
3977 }
3978 /* Find out the number of additional locate record ccws for cdl. */
3979 if (private->uses_cdl && first_rec < 2*blk_per_trk) {
3980 if (last_rec >= 2*blk_per_trk)
3981 count = 2*blk_per_trk - first_rec;
3982 cplength += count;
3983 datasize += count*sizeof(struct LO_eckd_data);
3984 }
3985 /* Allocate the ccw request. */
3986 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3987 startdev, blk_mq_rq_to_pdu(req));
3988 if (IS_ERR(cqr))
3989 return cqr;
3990 ccw = cqr->cpaddr;
3991 /* First ccw is define extent or prefix. */
3992 if (use_prefix) {
3993 if (prefix(ccw++, cqr->data, first_trk,
3994 last_trk, cmd, basedev, startdev) == -EAGAIN) {
3995 /* Clock not in sync and XRC is enabled.
3996 * Try again later.
3997 */
3998 dasd_sfree_request(cqr, startdev);
3999 return ERR_PTR(-EAGAIN);
4000 }
4001 idaws = (unsigned long *) (cqr->data +
4002 sizeof(struct PFX_eckd_data));
4003 } else {
4004 if (define_extent(ccw++, cqr->data, first_trk,
4005 last_trk, cmd, basedev, 0) == -EAGAIN) {
4006 /* Clock not in sync and XRC is enabled.
4007 * Try again later.
4008 */
4009 dasd_sfree_request(cqr, startdev);
4010 return ERR_PTR(-EAGAIN);
4011 }
4012 idaws = (unsigned long *) (cqr->data +
4013 sizeof(struct DE_eckd_data));
4014 }
4015 /* Build locate_record+read/write/ccws. */
4016 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
4017 recid = first_rec;
4018 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
4019 /* Only standard blocks so there is just one locate record. */
4020 ccw[-1].flags |= CCW_FLAG_CC;
4021 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
4022 last_rec - recid + 1, cmd, basedev, blksize);
4023 }
4024 rq_for_each_segment(bv, req, iter) {
4025 dst = bvec_virt(&bv);
4026 if (dasd_page_cache) {
4027 char *copy = kmem_cache_alloc(dasd_page_cache,
4028 GFP_DMA | __GFP_NOWARN);
4029 if (copy && rq_data_dir(req) == WRITE)
4030 memcpy(copy + bv.bv_offset, dst, bv.bv_len);
4031 if (copy)
4032 dst = copy + bv.bv_offset;
4033 }
4034 for (off = 0; off < bv.bv_len; off += blksize) {
4035 sector_t trkid = recid;
4036 unsigned int recoffs = sector_div(trkid, blk_per_trk);
4037 rcmd = cmd;
4038 count = blksize;
4039 /* Locate record for cdl special block ? */
4040 if (private->uses_cdl && recid < 2*blk_per_trk) {
4041 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
4042 rcmd |= 0x8;
4043 count = dasd_eckd_cdl_reclen(recid);
4044 if (count < blksize &&
4045 rq_data_dir(req) == READ)
4046 memset(dst + count, 0xe5,
4047 blksize - count);
4048 }
4049 ccw[-1].flags |= CCW_FLAG_CC;
4050 locate_record(ccw++, LO_data++,
4051 trkid, recoffs + 1,
4052 1, rcmd, basedev, count);
4053 }
4054 /* Locate record for standard blocks ? */
4055 if (private->uses_cdl && recid == 2*blk_per_trk) {
4056 ccw[-1].flags |= CCW_FLAG_CC;
4057 locate_record(ccw++, LO_data++,
4058 trkid, recoffs + 1,
4059 last_rec - recid + 1,
4060 cmd, basedev, count);
4061 }
4062 /* Read/write ccw. */
4063 ccw[-1].flags |= CCW_FLAG_CC;
4064 ccw->cmd_code = rcmd;
4065 ccw->count = count;
4066 if (idal_is_needed(dst, blksize)) {
4067 ccw->cda = (__u32)(addr_t) idaws;
4068 ccw->flags = CCW_FLAG_IDA;
4069 idaws = idal_create_words(idaws, dst, blksize);
4070 } else {
4071 ccw->cda = (__u32)(addr_t) dst;
4072 ccw->flags = 0;
4073 }
4074 ccw++;
4075 dst += blksize;
4076 recid++;
4077 }
4078 }
4079 if (blk_noretry_request(req) ||
4080 block->base->features & DASD_FEATURE_FAILFAST)
4081 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4082 cqr->startdev = startdev;
4083 cqr->memdev = startdev;
4084 cqr->block = block;
4085 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4086 cqr->lpm = dasd_path_get_ppm(startdev);
4087 cqr->retries = startdev->default_retries;
4088 cqr->buildclk = get_tod_clock();
4089 cqr->status = DASD_CQR_FILLED;
4090
4091 /* Set flags to suppress output for expected errors */
4092 if (dasd_eckd_is_ese(basedev)) {
4093 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4094 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4095 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4096 }
4097
4098 return cqr;
4099 }
4100
dasd_eckd_build_cp_cmd_track(struct dasd_device * startdev,struct dasd_block * block,struct request * req,sector_t first_rec,sector_t last_rec,sector_t first_trk,sector_t last_trk,unsigned int first_offs,unsigned int last_offs,unsigned int blk_per_trk,unsigned int blksize)4101 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
4102 struct dasd_device *startdev,
4103 struct dasd_block *block,
4104 struct request *req,
4105 sector_t first_rec,
4106 sector_t last_rec,
4107 sector_t first_trk,
4108 sector_t last_trk,
4109 unsigned int first_offs,
4110 unsigned int last_offs,
4111 unsigned int blk_per_trk,
4112 unsigned int blksize)
4113 {
4114 unsigned long *idaws;
4115 struct dasd_ccw_req *cqr;
4116 struct ccw1 *ccw;
4117 struct req_iterator iter;
4118 struct bio_vec bv;
4119 char *dst, *idaw_dst;
4120 unsigned int cidaw, cplength, datasize;
4121 unsigned int tlf;
4122 sector_t recid;
4123 unsigned char cmd;
4124 struct dasd_device *basedev;
4125 unsigned int trkcount, count, count_to_trk_end;
4126 unsigned int idaw_len, seg_len, part_len, len_to_track_end;
4127 unsigned char new_track, end_idaw;
4128 sector_t trkid;
4129 unsigned int recoffs;
4130
4131 basedev = block->base;
4132 if (rq_data_dir(req) == READ)
4133 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4134 else if (rq_data_dir(req) == WRITE)
4135 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4136 else
4137 return ERR_PTR(-EINVAL);
4138
4139 /* Track based I/O needs IDAWs for each page, and not just for
4140 * 64 bit addresses. We need additional idals for pages
4141 * that get filled from two tracks, so we use the number
4142 * of records as upper limit.
4143 */
4144 cidaw = last_rec - first_rec + 1;
4145 trkcount = last_trk - first_trk + 1;
4146
4147 /* 1x prefix + one read/write ccw per track */
4148 cplength = 1 + trkcount;
4149
4150 datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
4151
4152 /* Allocate the ccw request. */
4153 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4154 startdev, blk_mq_rq_to_pdu(req));
4155 if (IS_ERR(cqr))
4156 return cqr;
4157 ccw = cqr->cpaddr;
4158 /* transfer length factor: how many bytes to read from the last track */
4159 if (first_trk == last_trk)
4160 tlf = last_offs - first_offs + 1;
4161 else
4162 tlf = last_offs + 1;
4163 tlf *= blksize;
4164
4165 if (prefix_LRE(ccw++, cqr->data, first_trk,
4166 last_trk, cmd, basedev, startdev,
4167 1 /* format */, first_offs + 1,
4168 trkcount, blksize,
4169 tlf) == -EAGAIN) {
4170 /* Clock not in sync and XRC is enabled.
4171 * Try again later.
4172 */
4173 dasd_sfree_request(cqr, startdev);
4174 return ERR_PTR(-EAGAIN);
4175 }
4176
4177 /*
4178 * The translation of request into ccw programs must meet the
4179 * following conditions:
4180 * - all idaws but the first and the last must address full pages
4181 * (or 2K blocks on 31-bit)
4182 * - the scope of a ccw and it's idal ends with the track boundaries
4183 */
4184 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
4185 recid = first_rec;
4186 new_track = 1;
4187 end_idaw = 0;
4188 len_to_track_end = 0;
4189 idaw_dst = NULL;
4190 idaw_len = 0;
4191 rq_for_each_segment(bv, req, iter) {
4192 dst = bvec_virt(&bv);
4193 seg_len = bv.bv_len;
4194 while (seg_len) {
4195 if (new_track) {
4196 trkid = recid;
4197 recoffs = sector_div(trkid, blk_per_trk);
4198 count_to_trk_end = blk_per_trk - recoffs;
4199 count = min((last_rec - recid + 1),
4200 (sector_t)count_to_trk_end);
4201 len_to_track_end = count * blksize;
4202 ccw[-1].flags |= CCW_FLAG_CC;
4203 ccw->cmd_code = cmd;
4204 ccw->count = len_to_track_end;
4205 ccw->cda = (__u32)(addr_t)idaws;
4206 ccw->flags = CCW_FLAG_IDA;
4207 ccw++;
4208 recid += count;
4209 new_track = 0;
4210 /* first idaw for a ccw may start anywhere */
4211 if (!idaw_dst)
4212 idaw_dst = dst;
4213 }
4214 /* If we start a new idaw, we must make sure that it
4215 * starts on an IDA_BLOCK_SIZE boundary.
4216 * If we continue an idaw, we must make sure that the
4217 * current segment begins where the so far accumulated
4218 * idaw ends
4219 */
4220 if (!idaw_dst) {
4221 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
4222 dasd_sfree_request(cqr, startdev);
4223 return ERR_PTR(-ERANGE);
4224 } else
4225 idaw_dst = dst;
4226 }
4227 if ((idaw_dst + idaw_len) != dst) {
4228 dasd_sfree_request(cqr, startdev);
4229 return ERR_PTR(-ERANGE);
4230 }
4231 part_len = min(seg_len, len_to_track_end);
4232 seg_len -= part_len;
4233 dst += part_len;
4234 idaw_len += part_len;
4235 len_to_track_end -= part_len;
4236 /* collected memory area ends on an IDA_BLOCK border,
4237 * -> create an idaw
4238 * idal_create_words will handle cases where idaw_len
4239 * is larger then IDA_BLOCK_SIZE
4240 */
4241 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
4242 end_idaw = 1;
4243 /* We also need to end the idaw at track end */
4244 if (!len_to_track_end) {
4245 new_track = 1;
4246 end_idaw = 1;
4247 }
4248 if (end_idaw) {
4249 idaws = idal_create_words(idaws, idaw_dst,
4250 idaw_len);
4251 idaw_dst = NULL;
4252 idaw_len = 0;
4253 end_idaw = 0;
4254 }
4255 }
4256 }
4257
4258 if (blk_noretry_request(req) ||
4259 block->base->features & DASD_FEATURE_FAILFAST)
4260 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4261 cqr->startdev = startdev;
4262 cqr->memdev = startdev;
4263 cqr->block = block;
4264 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4265 cqr->lpm = dasd_path_get_ppm(startdev);
4266 cqr->retries = startdev->default_retries;
4267 cqr->buildclk = get_tod_clock();
4268 cqr->status = DASD_CQR_FILLED;
4269
4270 /* Set flags to suppress output for expected errors */
4271 if (dasd_eckd_is_ese(basedev))
4272 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4273
4274 return cqr;
4275 }
4276
prepare_itcw(struct itcw * itcw,unsigned int trk,unsigned int totrk,int cmd,struct dasd_device * basedev,struct dasd_device * startdev,unsigned int rec_on_trk,int count,unsigned int blksize,unsigned int total_data_size,unsigned int tlf,unsigned int blk_per_trk)4277 static int prepare_itcw(struct itcw *itcw,
4278 unsigned int trk, unsigned int totrk, int cmd,
4279 struct dasd_device *basedev,
4280 struct dasd_device *startdev,
4281 unsigned int rec_on_trk, int count,
4282 unsigned int blksize,
4283 unsigned int total_data_size,
4284 unsigned int tlf,
4285 unsigned int blk_per_trk)
4286 {
4287 struct PFX_eckd_data pfxdata;
4288 struct dasd_eckd_private *basepriv, *startpriv;
4289 struct DE_eckd_data *dedata;
4290 struct LRE_eckd_data *lredata;
4291 struct dcw *dcw;
4292
4293 u32 begcyl, endcyl;
4294 u16 heads, beghead, endhead;
4295 u8 pfx_cmd;
4296
4297 int rc = 0;
4298 int sector = 0;
4299 int dn, d;
4300
4301
4302 /* setup prefix data */
4303 basepriv = basedev->private;
4304 startpriv = startdev->private;
4305 dedata = &pfxdata.define_extent;
4306 lredata = &pfxdata.locate_record;
4307
4308 memset(&pfxdata, 0, sizeof(pfxdata));
4309 pfxdata.format = 1; /* PFX with LRE */
4310 pfxdata.base_address = basepriv->ned->unit_addr;
4311 pfxdata.base_lss = basepriv->ned->ID;
4312 pfxdata.validity.define_extent = 1;
4313
4314 /* private uid is kept up to date, conf_data may be outdated */
4315 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
4316 pfxdata.validity.verify_base = 1;
4317
4318 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
4319 pfxdata.validity.verify_base = 1;
4320 pfxdata.validity.hyper_pav = 1;
4321 }
4322
4323 switch (cmd) {
4324 case DASD_ECKD_CCW_READ_TRACK_DATA:
4325 dedata->mask.perm = 0x1;
4326 dedata->attributes.operation = basepriv->attrib.operation;
4327 dedata->blk_size = blksize;
4328 dedata->ga_extended |= 0x42;
4329 lredata->operation.orientation = 0x0;
4330 lredata->operation.operation = 0x0C;
4331 lredata->auxiliary.check_bytes = 0x01;
4332 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4333 break;
4334 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
4335 dedata->mask.perm = 0x02;
4336 dedata->attributes.operation = basepriv->attrib.operation;
4337 dedata->blk_size = blksize;
4338 rc = set_timestamp(NULL, dedata, basedev);
4339 dedata->ga_extended |= 0x42;
4340 lredata->operation.orientation = 0x0;
4341 lredata->operation.operation = 0x3F;
4342 lredata->extended_operation = 0x23;
4343 lredata->auxiliary.check_bytes = 0x2;
4344 /*
4345 * If XRC is supported the System Time Stamp is set. The
4346 * validity of the time stamp must be reflected in the prefix
4347 * data as well.
4348 */
4349 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
4350 pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
4351 pfx_cmd = DASD_ECKD_CCW_PFX;
4352 break;
4353 case DASD_ECKD_CCW_READ_COUNT_MT:
4354 dedata->mask.perm = 0x1;
4355 dedata->attributes.operation = DASD_BYPASS_CACHE;
4356 dedata->ga_extended |= 0x42;
4357 dedata->blk_size = blksize;
4358 lredata->operation.orientation = 0x2;
4359 lredata->operation.operation = 0x16;
4360 lredata->auxiliary.check_bytes = 0x01;
4361 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4362 break;
4363 default:
4364 DBF_DEV_EVENT(DBF_ERR, basedev,
4365 "prepare itcw, unknown opcode 0x%x", cmd);
4366 BUG();
4367 break;
4368 }
4369 if (rc)
4370 return rc;
4371
4372 dedata->attributes.mode = 0x3; /* ECKD */
4373
4374 heads = basepriv->rdc_data.trk_per_cyl;
4375 begcyl = trk / heads;
4376 beghead = trk % heads;
4377 endcyl = totrk / heads;
4378 endhead = totrk % heads;
4379
4380 /* check for sequential prestage - enhance cylinder range */
4381 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
4382 dedata->attributes.operation == DASD_SEQ_ACCESS) {
4383
4384 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
4385 endcyl += basepriv->attrib.nr_cyl;
4386 else
4387 endcyl = (basepriv->real_cyl - 1);
4388 }
4389
4390 set_ch_t(&dedata->beg_ext, begcyl, beghead);
4391 set_ch_t(&dedata->end_ext, endcyl, endhead);
4392
4393 dedata->ep_format = 0x20; /* records per track is valid */
4394 dedata->ep_rec_per_track = blk_per_trk;
4395
4396 if (rec_on_trk) {
4397 switch (basepriv->rdc_data.dev_type) {
4398 case 0x3390:
4399 dn = ceil_quot(blksize + 6, 232);
4400 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
4401 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
4402 break;
4403 case 0x3380:
4404 d = 7 + ceil_quot(blksize + 12, 32);
4405 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
4406 break;
4407 }
4408 }
4409
4410 if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
4411 lredata->auxiliary.length_valid = 0;
4412 lredata->auxiliary.length_scope = 0;
4413 lredata->sector = 0xff;
4414 } else {
4415 lredata->auxiliary.length_valid = 1;
4416 lredata->auxiliary.length_scope = 1;
4417 lredata->sector = sector;
4418 }
4419 lredata->auxiliary.imbedded_ccw_valid = 1;
4420 lredata->length = tlf;
4421 lredata->imbedded_ccw = cmd;
4422 lredata->count = count;
4423 set_ch_t(&lredata->seek_addr, begcyl, beghead);
4424 lredata->search_arg.cyl = lredata->seek_addr.cyl;
4425 lredata->search_arg.head = lredata->seek_addr.head;
4426 lredata->search_arg.record = rec_on_trk;
4427
4428 dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
4429 &pfxdata, sizeof(pfxdata), total_data_size);
4430 return PTR_ERR_OR_ZERO(dcw);
4431 }
4432
dasd_eckd_build_cp_tpm_track(struct dasd_device * startdev,struct dasd_block * block,struct request * req,sector_t first_rec,sector_t last_rec,sector_t first_trk,sector_t last_trk,unsigned int first_offs,unsigned int last_offs,unsigned int blk_per_trk,unsigned int blksize)4433 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
4434 struct dasd_device *startdev,
4435 struct dasd_block *block,
4436 struct request *req,
4437 sector_t first_rec,
4438 sector_t last_rec,
4439 sector_t first_trk,
4440 sector_t last_trk,
4441 unsigned int first_offs,
4442 unsigned int last_offs,
4443 unsigned int blk_per_trk,
4444 unsigned int blksize)
4445 {
4446 struct dasd_ccw_req *cqr;
4447 struct req_iterator iter;
4448 struct bio_vec bv;
4449 char *dst;
4450 unsigned int trkcount, ctidaw;
4451 unsigned char cmd;
4452 struct dasd_device *basedev;
4453 unsigned int tlf;
4454 struct itcw *itcw;
4455 struct tidaw *last_tidaw = NULL;
4456 int itcw_op;
4457 size_t itcw_size;
4458 u8 tidaw_flags;
4459 unsigned int seg_len, part_len, len_to_track_end;
4460 unsigned char new_track;
4461 sector_t recid, trkid;
4462 unsigned int offs;
4463 unsigned int count, count_to_trk_end;
4464 int ret;
4465
4466 basedev = block->base;
4467 if (rq_data_dir(req) == READ) {
4468 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4469 itcw_op = ITCW_OP_READ;
4470 } else if (rq_data_dir(req) == WRITE) {
4471 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4472 itcw_op = ITCW_OP_WRITE;
4473 } else
4474 return ERR_PTR(-EINVAL);
4475
4476 /* trackbased I/O needs address all memory via TIDAWs,
4477 * not just for 64 bit addresses. This allows us to map
4478 * each segment directly to one tidaw.
4479 * In the case of write requests, additional tidaws may
4480 * be needed when a segment crosses a track boundary.
4481 */
4482 trkcount = last_trk - first_trk + 1;
4483 ctidaw = 0;
4484 rq_for_each_segment(bv, req, iter) {
4485 ++ctidaw;
4486 }
4487 if (rq_data_dir(req) == WRITE)
4488 ctidaw += (last_trk - first_trk);
4489
4490 /* Allocate the ccw request. */
4491 itcw_size = itcw_calc_size(0, ctidaw, 0);
4492 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
4493 blk_mq_rq_to_pdu(req));
4494 if (IS_ERR(cqr))
4495 return cqr;
4496
4497 /* transfer length factor: how many bytes to read from the last track */
4498 if (first_trk == last_trk)
4499 tlf = last_offs - first_offs + 1;
4500 else
4501 tlf = last_offs + 1;
4502 tlf *= blksize;
4503
4504 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
4505 if (IS_ERR(itcw)) {
4506 ret = -EINVAL;
4507 goto out_error;
4508 }
4509 cqr->cpaddr = itcw_get_tcw(itcw);
4510 if (prepare_itcw(itcw, first_trk, last_trk,
4511 cmd, basedev, startdev,
4512 first_offs + 1,
4513 trkcount, blksize,
4514 (last_rec - first_rec + 1) * blksize,
4515 tlf, blk_per_trk) == -EAGAIN) {
4516 /* Clock not in sync and XRC is enabled.
4517 * Try again later.
4518 */
4519 ret = -EAGAIN;
4520 goto out_error;
4521 }
4522 len_to_track_end = 0;
4523 /*
4524 * A tidaw can address 4k of memory, but must not cross page boundaries
4525 * We can let the block layer handle this by setting
4526 * blk_queue_segment_boundary to page boundaries and
4527 * blk_max_segment_size to page size when setting up the request queue.
4528 * For write requests, a TIDAW must not cross track boundaries, because
4529 * we have to set the CBC flag on the last tidaw for each track.
4530 */
4531 if (rq_data_dir(req) == WRITE) {
4532 new_track = 1;
4533 recid = first_rec;
4534 rq_for_each_segment(bv, req, iter) {
4535 dst = bvec_virt(&bv);
4536 seg_len = bv.bv_len;
4537 while (seg_len) {
4538 if (new_track) {
4539 trkid = recid;
4540 offs = sector_div(trkid, blk_per_trk);
4541 count_to_trk_end = blk_per_trk - offs;
4542 count = min((last_rec - recid + 1),
4543 (sector_t)count_to_trk_end);
4544 len_to_track_end = count * blksize;
4545 recid += count;
4546 new_track = 0;
4547 }
4548 part_len = min(seg_len, len_to_track_end);
4549 seg_len -= part_len;
4550 len_to_track_end -= part_len;
4551 /* We need to end the tidaw at track end */
4552 if (!len_to_track_end) {
4553 new_track = 1;
4554 tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
4555 } else
4556 tidaw_flags = 0;
4557 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
4558 dst, part_len);
4559 if (IS_ERR(last_tidaw)) {
4560 ret = -EINVAL;
4561 goto out_error;
4562 }
4563 dst += part_len;
4564 }
4565 }
4566 } else {
4567 rq_for_each_segment(bv, req, iter) {
4568 dst = bvec_virt(&bv);
4569 last_tidaw = itcw_add_tidaw(itcw, 0x00,
4570 dst, bv.bv_len);
4571 if (IS_ERR(last_tidaw)) {
4572 ret = -EINVAL;
4573 goto out_error;
4574 }
4575 }
4576 }
4577 last_tidaw->flags |= TIDAW_FLAGS_LAST;
4578 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
4579 itcw_finalize(itcw);
4580
4581 if (blk_noretry_request(req) ||
4582 block->base->features & DASD_FEATURE_FAILFAST)
4583 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4584 cqr->cpmode = 1;
4585 cqr->startdev = startdev;
4586 cqr->memdev = startdev;
4587 cqr->block = block;
4588 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4589 cqr->lpm = dasd_path_get_ppm(startdev);
4590 cqr->retries = startdev->default_retries;
4591 cqr->buildclk = get_tod_clock();
4592 cqr->status = DASD_CQR_FILLED;
4593
4594 /* Set flags to suppress output for expected errors */
4595 if (dasd_eckd_is_ese(basedev)) {
4596 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4597 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4598 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4599 }
4600
4601 return cqr;
4602 out_error:
4603 dasd_sfree_request(cqr, startdev);
4604 return ERR_PTR(ret);
4605 }
4606
dasd_eckd_build_cp(struct dasd_device * startdev,struct dasd_block * block,struct request * req)4607 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
4608 struct dasd_block *block,
4609 struct request *req)
4610 {
4611 int cmdrtd, cmdwtd;
4612 int use_prefix;
4613 int fcx_multitrack;
4614 struct dasd_eckd_private *private;
4615 struct dasd_device *basedev;
4616 sector_t first_rec, last_rec;
4617 sector_t first_trk, last_trk;
4618 unsigned int first_offs, last_offs;
4619 unsigned int blk_per_trk, blksize;
4620 int cdlspecial;
4621 unsigned int data_size;
4622 struct dasd_ccw_req *cqr;
4623
4624 basedev = block->base;
4625 private = basedev->private;
4626
4627 /* Calculate number of blocks/records per track. */
4628 blksize = block->bp_block;
4629 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4630 if (blk_per_trk == 0)
4631 return ERR_PTR(-EINVAL);
4632 /* Calculate record id of first and last block. */
4633 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
4634 first_offs = sector_div(first_trk, blk_per_trk);
4635 last_rec = last_trk =
4636 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
4637 last_offs = sector_div(last_trk, blk_per_trk);
4638 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
4639
4640 fcx_multitrack = private->features.feature[40] & 0x20;
4641 data_size = blk_rq_bytes(req);
4642 if (data_size % blksize)
4643 return ERR_PTR(-EINVAL);
4644 /* tpm write request add CBC data on each track boundary */
4645 if (rq_data_dir(req) == WRITE)
4646 data_size += (last_trk - first_trk) * 4;
4647
4648 /* is read track data and write track data in command mode supported? */
4649 cmdrtd = private->features.feature[9] & 0x20;
4650 cmdwtd = private->features.feature[12] & 0x40;
4651 use_prefix = private->features.feature[8] & 0x01;
4652
4653 cqr = NULL;
4654 if (cdlspecial || dasd_page_cache) {
4655 /* do nothing, just fall through to the cmd mode single case */
4656 } else if ((data_size <= private->fcx_max_data)
4657 && (fcx_multitrack || (first_trk == last_trk))) {
4658 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4659 first_rec, last_rec,
4660 first_trk, last_trk,
4661 first_offs, last_offs,
4662 blk_per_trk, blksize);
4663 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4664 (PTR_ERR(cqr) != -ENOMEM))
4665 cqr = NULL;
4666 } else if (use_prefix &&
4667 (((rq_data_dir(req) == READ) && cmdrtd) ||
4668 ((rq_data_dir(req) == WRITE) && cmdwtd))) {
4669 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4670 first_rec, last_rec,
4671 first_trk, last_trk,
4672 first_offs, last_offs,
4673 blk_per_trk, blksize);
4674 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4675 (PTR_ERR(cqr) != -ENOMEM))
4676 cqr = NULL;
4677 }
4678 if (!cqr)
4679 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4680 first_rec, last_rec,
4681 first_trk, last_trk,
4682 first_offs, last_offs,
4683 blk_per_trk, blksize);
4684 return cqr;
4685 }
4686
dasd_eckd_build_cp_raw(struct dasd_device * startdev,struct dasd_block * block,struct request * req)4687 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
4688 struct dasd_block *block,
4689 struct request *req)
4690 {
4691 sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
4692 unsigned int seg_len, len_to_track_end;
4693 unsigned int cidaw, cplength, datasize;
4694 sector_t first_trk, last_trk, sectors;
4695 struct dasd_eckd_private *base_priv;
4696 struct dasd_device *basedev;
4697 struct req_iterator iter;
4698 struct dasd_ccw_req *cqr;
4699 unsigned int trkcount;
4700 unsigned long *idaws;
4701 unsigned int size;
4702 unsigned char cmd;
4703 struct bio_vec bv;
4704 struct ccw1 *ccw;
4705 int use_prefix;
4706 void *data;
4707 char *dst;
4708
4709 /*
4710 * raw track access needs to be mutiple of 64k and on 64k boundary
4711 * For read requests we can fix an incorrect alignment by padding
4712 * the request with dummy pages.
4713 */
4714 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
4715 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
4716 DASD_RAW_SECTORS_PER_TRACK;
4717 end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
4718 DASD_RAW_SECTORS_PER_TRACK;
4719 basedev = block->base;
4720 if ((start_padding_sectors || end_padding_sectors) &&
4721 (rq_data_dir(req) == WRITE)) {
4722 DBF_DEV_EVENT(DBF_ERR, basedev,
4723 "raw write not track aligned (%llu,%llu) req %p",
4724 start_padding_sectors, end_padding_sectors, req);
4725 return ERR_PTR(-EINVAL);
4726 }
4727
4728 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
4729 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
4730 DASD_RAW_SECTORS_PER_TRACK;
4731 trkcount = last_trk - first_trk + 1;
4732
4733 if (rq_data_dir(req) == READ)
4734 cmd = DASD_ECKD_CCW_READ_TRACK;
4735 else if (rq_data_dir(req) == WRITE)
4736 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
4737 else
4738 return ERR_PTR(-EINVAL);
4739
4740 /*
4741 * Raw track based I/O needs IDAWs for each page,
4742 * and not just for 64 bit addresses.
4743 */
4744 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
4745
4746 /*
4747 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
4748 * of extended parameter. This is needed for write full track.
4749 */
4750 base_priv = basedev->private;
4751 use_prefix = base_priv->features.feature[8] & 0x01;
4752 if (use_prefix) {
4753 cplength = 1 + trkcount;
4754 size = sizeof(struct PFX_eckd_data) + 2;
4755 } else {
4756 cplength = 2 + trkcount;
4757 size = sizeof(struct DE_eckd_data) +
4758 sizeof(struct LRE_eckd_data) + 2;
4759 }
4760 size = ALIGN(size, 8);
4761
4762 datasize = size + cidaw * sizeof(unsigned long);
4763
4764 /* Allocate the ccw request. */
4765 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
4766 datasize, startdev, blk_mq_rq_to_pdu(req));
4767 if (IS_ERR(cqr))
4768 return cqr;
4769
4770 ccw = cqr->cpaddr;
4771 data = cqr->data;
4772
4773 if (use_prefix) {
4774 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
4775 startdev, 1, 0, trkcount, 0, 0);
4776 } else {
4777 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
4778 ccw[-1].flags |= CCW_FLAG_CC;
4779
4780 data += sizeof(struct DE_eckd_data);
4781 locate_record_ext(ccw++, data, first_trk, 0,
4782 trkcount, cmd, basedev, 0, 0);
4783 }
4784
4785 idaws = (unsigned long *)(cqr->data + size);
4786 len_to_track_end = 0;
4787 if (start_padding_sectors) {
4788 ccw[-1].flags |= CCW_FLAG_CC;
4789 ccw->cmd_code = cmd;
4790 /* maximum 3390 track size */
4791 ccw->count = 57326;
4792 /* 64k map to one track */
4793 len_to_track_end = 65536 - start_padding_sectors * 512;
4794 ccw->cda = (__u32)(addr_t)idaws;
4795 ccw->flags |= CCW_FLAG_IDA;
4796 ccw->flags |= CCW_FLAG_SLI;
4797 ccw++;
4798 for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
4799 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4800 }
4801 rq_for_each_segment(bv, req, iter) {
4802 dst = bvec_virt(&bv);
4803 seg_len = bv.bv_len;
4804 if (cmd == DASD_ECKD_CCW_READ_TRACK)
4805 memset(dst, 0, seg_len);
4806 if (!len_to_track_end) {
4807 ccw[-1].flags |= CCW_FLAG_CC;
4808 ccw->cmd_code = cmd;
4809 /* maximum 3390 track size */
4810 ccw->count = 57326;
4811 /* 64k map to one track */
4812 len_to_track_end = 65536;
4813 ccw->cda = (__u32)(addr_t)idaws;
4814 ccw->flags |= CCW_FLAG_IDA;
4815 ccw->flags |= CCW_FLAG_SLI;
4816 ccw++;
4817 }
4818 len_to_track_end -= seg_len;
4819 idaws = idal_create_words(idaws, dst, seg_len);
4820 }
4821 for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
4822 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4823 if (blk_noretry_request(req) ||
4824 block->base->features & DASD_FEATURE_FAILFAST)
4825 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4826 cqr->startdev = startdev;
4827 cqr->memdev = startdev;
4828 cqr->block = block;
4829 cqr->expires = startdev->default_expires * HZ;
4830 cqr->lpm = dasd_path_get_ppm(startdev);
4831 cqr->retries = startdev->default_retries;
4832 cqr->buildclk = get_tod_clock();
4833 cqr->status = DASD_CQR_FILLED;
4834
4835 return cqr;
4836 }
4837
4838
4839 static int
dasd_eckd_free_cp(struct dasd_ccw_req * cqr,struct request * req)4840 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4841 {
4842 struct dasd_eckd_private *private;
4843 struct ccw1 *ccw;
4844 struct req_iterator iter;
4845 struct bio_vec bv;
4846 char *dst, *cda;
4847 unsigned int blksize, blk_per_trk, off;
4848 sector_t recid;
4849 int status;
4850
4851 if (!dasd_page_cache)
4852 goto out;
4853 private = cqr->block->base->private;
4854 blksize = cqr->block->bp_block;
4855 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4856 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4857 ccw = cqr->cpaddr;
4858 /* Skip over define extent & locate record. */
4859 ccw++;
4860 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
4861 ccw++;
4862 rq_for_each_segment(bv, req, iter) {
4863 dst = bvec_virt(&bv);
4864 for (off = 0; off < bv.bv_len; off += blksize) {
4865 /* Skip locate record. */
4866 if (private->uses_cdl && recid <= 2*blk_per_trk)
4867 ccw++;
4868 if (dst) {
4869 if (ccw->flags & CCW_FLAG_IDA)
4870 cda = *((char **)((addr_t) ccw->cda));
4871 else
4872 cda = (char *)((addr_t) ccw->cda);
4873 if (dst != cda) {
4874 if (rq_data_dir(req) == READ)
4875 memcpy(dst, cda, bv.bv_len);
4876 kmem_cache_free(dasd_page_cache,
4877 (void *)((addr_t)cda & PAGE_MASK));
4878 }
4879 dst = NULL;
4880 }
4881 ccw++;
4882 recid++;
4883 }
4884 }
4885 out:
4886 status = cqr->status == DASD_CQR_DONE;
4887 dasd_sfree_request(cqr, cqr->memdev);
4888 return status;
4889 }
4890
4891 /*
4892 * Modify ccw/tcw in cqr so it can be started on a base device.
4893 *
4894 * Note that this is not enough to restart the cqr!
4895 * Either reset cqr->startdev as well (summary unit check handling)
4896 * or restart via separate cqr (as in ERP handling).
4897 */
dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req * cqr)4898 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4899 {
4900 struct ccw1 *ccw;
4901 struct PFX_eckd_data *pfxdata;
4902 struct tcw *tcw;
4903 struct tccb *tccb;
4904 struct dcw *dcw;
4905
4906 if (cqr->cpmode == 1) {
4907 tcw = cqr->cpaddr;
4908 tccb = tcw_get_tccb(tcw);
4909 dcw = (struct dcw *)&tccb->tca[0];
4910 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
4911 pfxdata->validity.verify_base = 0;
4912 pfxdata->validity.hyper_pav = 0;
4913 } else {
4914 ccw = cqr->cpaddr;
4915 pfxdata = cqr->data;
4916 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
4917 pfxdata->validity.verify_base = 0;
4918 pfxdata->validity.hyper_pav = 0;
4919 }
4920 }
4921 }
4922
4923 #define DASD_ECKD_CHANQ_MAX_SIZE 4
4924
dasd_eckd_build_alias_cp(struct dasd_device * base,struct dasd_block * block,struct request * req)4925 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4926 struct dasd_block *block,
4927 struct request *req)
4928 {
4929 struct dasd_eckd_private *private;
4930 struct dasd_device *startdev;
4931 unsigned long flags;
4932 struct dasd_ccw_req *cqr;
4933
4934 startdev = dasd_alias_get_start_dev(base);
4935 if (!startdev)
4936 startdev = base;
4937 private = startdev->private;
4938 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
4939 return ERR_PTR(-EBUSY);
4940
4941 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
4942 private->count++;
4943 if ((base->features & DASD_FEATURE_USERAW))
4944 cqr = dasd_eckd_build_cp_raw(startdev, block, req);
4945 else
4946 cqr = dasd_eckd_build_cp(startdev, block, req);
4947 if (IS_ERR(cqr))
4948 private->count--;
4949 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
4950 return cqr;
4951 }
4952
dasd_eckd_free_alias_cp(struct dasd_ccw_req * cqr,struct request * req)4953 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
4954 struct request *req)
4955 {
4956 struct dasd_eckd_private *private;
4957 unsigned long flags;
4958
4959 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
4960 private = cqr->memdev->private;
4961 private->count--;
4962 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
4963 return dasd_eckd_free_cp(cqr, req);
4964 }
4965
4966 static int
dasd_eckd_fill_info(struct dasd_device * device,struct dasd_information2_t * info)4967 dasd_eckd_fill_info(struct dasd_device * device,
4968 struct dasd_information2_t * info)
4969 {
4970 struct dasd_eckd_private *private = device->private;
4971
4972 info->label_block = 2;
4973 info->FBA_layout = private->uses_cdl ? 0 : 1;
4974 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
4975 info->characteristics_size = sizeof(private->rdc_data);
4976 memcpy(info->characteristics, &private->rdc_data,
4977 sizeof(private->rdc_data));
4978 info->confdata_size = min((unsigned long)private->conf_len,
4979 sizeof(info->configuration_data));
4980 memcpy(info->configuration_data, private->conf_data,
4981 info->confdata_size);
4982 return 0;
4983 }
4984
4985 /*
4986 * SECTION: ioctl functions for eckd devices.
4987 */
4988
4989 /*
4990 * Release device ioctl.
4991 * Buils a channel programm to releases a prior reserved
4992 * (see dasd_eckd_reserve) device.
4993 */
4994 static int
dasd_eckd_release(struct dasd_device * device)4995 dasd_eckd_release(struct dasd_device *device)
4996 {
4997 struct dasd_ccw_req *cqr;
4998 int rc;
4999 struct ccw1 *ccw;
5000 int useglobal;
5001
5002 if (!capable(CAP_SYS_ADMIN))
5003 return -EACCES;
5004
5005 useglobal = 0;
5006 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5007 if (IS_ERR(cqr)) {
5008 mutex_lock(&dasd_reserve_mutex);
5009 useglobal = 1;
5010 cqr = &dasd_reserve_req->cqr;
5011 memset(cqr, 0, sizeof(*cqr));
5012 memset(&dasd_reserve_req->ccw, 0,
5013 sizeof(dasd_reserve_req->ccw));
5014 cqr->cpaddr = &dasd_reserve_req->ccw;
5015 cqr->data = &dasd_reserve_req->data;
5016 cqr->magic = DASD_ECKD_MAGIC;
5017 }
5018 ccw = cqr->cpaddr;
5019 ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
5020 ccw->flags |= CCW_FLAG_SLI;
5021 ccw->count = 32;
5022 ccw->cda = (__u32)(addr_t) cqr->data;
5023 cqr->startdev = device;
5024 cqr->memdev = device;
5025 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5026 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5027 cqr->retries = 2; /* set retry counter to enable basic ERP */
5028 cqr->expires = 2 * HZ;
5029 cqr->buildclk = get_tod_clock();
5030 cqr->status = DASD_CQR_FILLED;
5031
5032 rc = dasd_sleep_on_immediatly(cqr);
5033 if (!rc)
5034 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5035
5036 if (useglobal)
5037 mutex_unlock(&dasd_reserve_mutex);
5038 else
5039 dasd_sfree_request(cqr, cqr->memdev);
5040 return rc;
5041 }
5042
5043 /*
5044 * Reserve device ioctl.
5045 * Options are set to 'synchronous wait for interrupt' and
5046 * 'timeout the request'. This leads to a terminate IO if
5047 * the interrupt is outstanding for a certain time.
5048 */
5049 static int
dasd_eckd_reserve(struct dasd_device * device)5050 dasd_eckd_reserve(struct dasd_device *device)
5051 {
5052 struct dasd_ccw_req *cqr;
5053 int rc;
5054 struct ccw1 *ccw;
5055 int useglobal;
5056
5057 if (!capable(CAP_SYS_ADMIN))
5058 return -EACCES;
5059
5060 useglobal = 0;
5061 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5062 if (IS_ERR(cqr)) {
5063 mutex_lock(&dasd_reserve_mutex);
5064 useglobal = 1;
5065 cqr = &dasd_reserve_req->cqr;
5066 memset(cqr, 0, sizeof(*cqr));
5067 memset(&dasd_reserve_req->ccw, 0,
5068 sizeof(dasd_reserve_req->ccw));
5069 cqr->cpaddr = &dasd_reserve_req->ccw;
5070 cqr->data = &dasd_reserve_req->data;
5071 cqr->magic = DASD_ECKD_MAGIC;
5072 }
5073 ccw = cqr->cpaddr;
5074 ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
5075 ccw->flags |= CCW_FLAG_SLI;
5076 ccw->count = 32;
5077 ccw->cda = (__u32)(addr_t) cqr->data;
5078 cqr->startdev = device;
5079 cqr->memdev = device;
5080 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5081 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5082 cqr->retries = 2; /* set retry counter to enable basic ERP */
5083 cqr->expires = 2 * HZ;
5084 cqr->buildclk = get_tod_clock();
5085 cqr->status = DASD_CQR_FILLED;
5086
5087 rc = dasd_sleep_on_immediatly(cqr);
5088 if (!rc)
5089 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5090
5091 if (useglobal)
5092 mutex_unlock(&dasd_reserve_mutex);
5093 else
5094 dasd_sfree_request(cqr, cqr->memdev);
5095 return rc;
5096 }
5097
5098 /*
5099 * Steal lock ioctl - unconditional reserve device.
5100 * Buils a channel programm to break a device's reservation.
5101 * (unconditional reserve)
5102 */
5103 static int
dasd_eckd_steal_lock(struct dasd_device * device)5104 dasd_eckd_steal_lock(struct dasd_device *device)
5105 {
5106 struct dasd_ccw_req *cqr;
5107 int rc;
5108 struct ccw1 *ccw;
5109 int useglobal;
5110
5111 if (!capable(CAP_SYS_ADMIN))
5112 return -EACCES;
5113
5114 useglobal = 0;
5115 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5116 if (IS_ERR(cqr)) {
5117 mutex_lock(&dasd_reserve_mutex);
5118 useglobal = 1;
5119 cqr = &dasd_reserve_req->cqr;
5120 memset(cqr, 0, sizeof(*cqr));
5121 memset(&dasd_reserve_req->ccw, 0,
5122 sizeof(dasd_reserve_req->ccw));
5123 cqr->cpaddr = &dasd_reserve_req->ccw;
5124 cqr->data = &dasd_reserve_req->data;
5125 cqr->magic = DASD_ECKD_MAGIC;
5126 }
5127 ccw = cqr->cpaddr;
5128 ccw->cmd_code = DASD_ECKD_CCW_SLCK;
5129 ccw->flags |= CCW_FLAG_SLI;
5130 ccw->count = 32;
5131 ccw->cda = (__u32)(addr_t) cqr->data;
5132 cqr->startdev = device;
5133 cqr->memdev = device;
5134 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5135 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5136 cqr->retries = 2; /* set retry counter to enable basic ERP */
5137 cqr->expires = 2 * HZ;
5138 cqr->buildclk = get_tod_clock();
5139 cqr->status = DASD_CQR_FILLED;
5140
5141 rc = dasd_sleep_on_immediatly(cqr);
5142 if (!rc)
5143 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5144
5145 if (useglobal)
5146 mutex_unlock(&dasd_reserve_mutex);
5147 else
5148 dasd_sfree_request(cqr, cqr->memdev);
5149 return rc;
5150 }
5151
5152 /*
5153 * SNID - Sense Path Group ID
5154 * This ioctl may be used in situations where I/O is stalled due to
5155 * a reserve, so if the normal dasd_smalloc_request fails, we use the
5156 * preallocated dasd_reserve_req.
5157 */
dasd_eckd_snid(struct dasd_device * device,void __user * argp)5158 static int dasd_eckd_snid(struct dasd_device *device,
5159 void __user *argp)
5160 {
5161 struct dasd_ccw_req *cqr;
5162 int rc;
5163 struct ccw1 *ccw;
5164 int useglobal;
5165 struct dasd_snid_ioctl_data usrparm;
5166
5167 if (!capable(CAP_SYS_ADMIN))
5168 return -EACCES;
5169
5170 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5171 return -EFAULT;
5172
5173 useglobal = 0;
5174 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
5175 sizeof(struct dasd_snid_data), device,
5176 NULL);
5177 if (IS_ERR(cqr)) {
5178 mutex_lock(&dasd_reserve_mutex);
5179 useglobal = 1;
5180 cqr = &dasd_reserve_req->cqr;
5181 memset(cqr, 0, sizeof(*cqr));
5182 memset(&dasd_reserve_req->ccw, 0,
5183 sizeof(dasd_reserve_req->ccw));
5184 cqr->cpaddr = &dasd_reserve_req->ccw;
5185 cqr->data = &dasd_reserve_req->data;
5186 cqr->magic = DASD_ECKD_MAGIC;
5187 }
5188 ccw = cqr->cpaddr;
5189 ccw->cmd_code = DASD_ECKD_CCW_SNID;
5190 ccw->flags |= CCW_FLAG_SLI;
5191 ccw->count = 12;
5192 ccw->cda = (__u32)(addr_t) cqr->data;
5193 cqr->startdev = device;
5194 cqr->memdev = device;
5195 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5196 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5197 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
5198 cqr->retries = 5;
5199 cqr->expires = 10 * HZ;
5200 cqr->buildclk = get_tod_clock();
5201 cqr->status = DASD_CQR_FILLED;
5202 cqr->lpm = usrparm.path_mask;
5203
5204 rc = dasd_sleep_on_immediatly(cqr);
5205 /* verify that I/O processing didn't modify the path mask */
5206 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
5207 rc = -EIO;
5208 if (!rc) {
5209 usrparm.data = *((struct dasd_snid_data *)cqr->data);
5210 if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
5211 rc = -EFAULT;
5212 }
5213
5214 if (useglobal)
5215 mutex_unlock(&dasd_reserve_mutex);
5216 else
5217 dasd_sfree_request(cqr, cqr->memdev);
5218 return rc;
5219 }
5220
5221 /*
5222 * Read performance statistics
5223 */
5224 static int
dasd_eckd_performance(struct dasd_device * device,void __user * argp)5225 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
5226 {
5227 struct dasd_psf_prssd_data *prssdp;
5228 struct dasd_rssd_perf_stats_t *stats;
5229 struct dasd_ccw_req *cqr;
5230 struct ccw1 *ccw;
5231 int rc;
5232
5233 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5234 (sizeof(struct dasd_psf_prssd_data) +
5235 sizeof(struct dasd_rssd_perf_stats_t)),
5236 device, NULL);
5237 if (IS_ERR(cqr)) {
5238 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5239 "Could not allocate initialization request");
5240 return PTR_ERR(cqr);
5241 }
5242 cqr->startdev = device;
5243 cqr->memdev = device;
5244 cqr->retries = 0;
5245 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5246 cqr->expires = 10 * HZ;
5247
5248 /* Prepare for Read Subsystem Data */
5249 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5250 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5251 prssdp->order = PSF_ORDER_PRSSD;
5252 prssdp->suborder = 0x01; /* Performance Statistics */
5253 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
5254
5255 ccw = cqr->cpaddr;
5256 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5257 ccw->count = sizeof(struct dasd_psf_prssd_data);
5258 ccw->flags |= CCW_FLAG_CC;
5259 ccw->cda = (__u32)(addr_t) prssdp;
5260
5261 /* Read Subsystem Data - Performance Statistics */
5262 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5263 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
5264
5265 ccw++;
5266 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5267 ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
5268 ccw->cda = (__u32)(addr_t) stats;
5269
5270 cqr->buildclk = get_tod_clock();
5271 cqr->status = DASD_CQR_FILLED;
5272 rc = dasd_sleep_on(cqr);
5273 if (rc == 0) {
5274 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5275 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5276 if (copy_to_user(argp, stats,
5277 sizeof(struct dasd_rssd_perf_stats_t)))
5278 rc = -EFAULT;
5279 }
5280 dasd_sfree_request(cqr, cqr->memdev);
5281 return rc;
5282 }
5283
5284 /*
5285 * Get attributes (cache operations)
5286 * Returnes the cache attributes used in Define Extend (DE).
5287 */
5288 static int
dasd_eckd_get_attrib(struct dasd_device * device,void __user * argp)5289 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
5290 {
5291 struct dasd_eckd_private *private = device->private;
5292 struct attrib_data_t attrib = private->attrib;
5293 int rc;
5294
5295 if (!capable(CAP_SYS_ADMIN))
5296 return -EACCES;
5297 if (!argp)
5298 return -EINVAL;
5299
5300 rc = 0;
5301 if (copy_to_user(argp, (long *) &attrib,
5302 sizeof(struct attrib_data_t)))
5303 rc = -EFAULT;
5304
5305 return rc;
5306 }
5307
5308 /*
5309 * Set attributes (cache operations)
5310 * Stores the attributes for cache operation to be used in Define Extend (DE).
5311 */
5312 static int
dasd_eckd_set_attrib(struct dasd_device * device,void __user * argp)5313 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
5314 {
5315 struct dasd_eckd_private *private = device->private;
5316 struct attrib_data_t attrib;
5317
5318 if (!capable(CAP_SYS_ADMIN))
5319 return -EACCES;
5320 if (!argp)
5321 return -EINVAL;
5322
5323 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
5324 return -EFAULT;
5325 private->attrib = attrib;
5326
5327 dev_info(&device->cdev->dev,
5328 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
5329 private->attrib.operation, private->attrib.nr_cyl);
5330 return 0;
5331 }
5332
5333 /*
5334 * Issue syscall I/O to EMC Symmetrix array.
5335 * CCWs are PSF and RSSD
5336 */
dasd_symm_io(struct dasd_device * device,void __user * argp)5337 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
5338 {
5339 struct dasd_symmio_parms usrparm;
5340 char *psf_data, *rssd_result;
5341 struct dasd_ccw_req *cqr;
5342 struct ccw1 *ccw;
5343 char psf0, psf1;
5344 int rc;
5345
5346 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
5347 return -EACCES;
5348 psf0 = psf1 = 0;
5349
5350 /* Copy parms from caller */
5351 rc = -EFAULT;
5352 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5353 goto out;
5354 if (is_compat_task()) {
5355 /* Make sure pointers are sane even on 31 bit. */
5356 rc = -EINVAL;
5357 if ((usrparm.psf_data >> 32) != 0)
5358 goto out;
5359 if ((usrparm.rssd_result >> 32) != 0)
5360 goto out;
5361 usrparm.psf_data &= 0x7fffffffULL;
5362 usrparm.rssd_result &= 0x7fffffffULL;
5363 }
5364 /* at least 2 bytes are accessed and should be allocated */
5365 if (usrparm.psf_data_len < 2) {
5366 DBF_DEV_EVENT(DBF_WARNING, device,
5367 "Symmetrix ioctl invalid data length %d",
5368 usrparm.psf_data_len);
5369 rc = -EINVAL;
5370 goto out;
5371 }
5372 /* alloc I/O data area */
5373 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
5374 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
5375 if (!psf_data || !rssd_result) {
5376 rc = -ENOMEM;
5377 goto out_free;
5378 }
5379
5380 /* get syscall header from user space */
5381 rc = -EFAULT;
5382 if (copy_from_user(psf_data,
5383 (void __user *)(unsigned long) usrparm.psf_data,
5384 usrparm.psf_data_len))
5385 goto out_free;
5386 psf0 = psf_data[0];
5387 psf1 = psf_data[1];
5388
5389 /* setup CCWs for PSF + RSSD */
5390 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
5391 if (IS_ERR(cqr)) {
5392 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5393 "Could not allocate initialization request");
5394 rc = PTR_ERR(cqr);
5395 goto out_free;
5396 }
5397
5398 cqr->startdev = device;
5399 cqr->memdev = device;
5400 cqr->retries = 3;
5401 cqr->expires = 10 * HZ;
5402 cqr->buildclk = get_tod_clock();
5403 cqr->status = DASD_CQR_FILLED;
5404
5405 /* Build the ccws */
5406 ccw = cqr->cpaddr;
5407
5408 /* PSF ccw */
5409 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5410 ccw->count = usrparm.psf_data_len;
5411 ccw->flags |= CCW_FLAG_CC;
5412 ccw->cda = (__u32)(addr_t) psf_data;
5413
5414 ccw++;
5415
5416 /* RSSD ccw */
5417 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5418 ccw->count = usrparm.rssd_result_len;
5419 ccw->flags = CCW_FLAG_SLI ;
5420 ccw->cda = (__u32)(addr_t) rssd_result;
5421
5422 rc = dasd_sleep_on(cqr);
5423 if (rc)
5424 goto out_sfree;
5425
5426 rc = -EFAULT;
5427 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
5428 rssd_result, usrparm.rssd_result_len))
5429 goto out_sfree;
5430 rc = 0;
5431
5432 out_sfree:
5433 dasd_sfree_request(cqr, cqr->memdev);
5434 out_free:
5435 kfree(rssd_result);
5436 kfree(psf_data);
5437 out:
5438 DBF_DEV_EVENT(DBF_WARNING, device,
5439 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
5440 (int) psf0, (int) psf1, rc);
5441 return rc;
5442 }
5443
5444 static int
dasd_eckd_ioctl(struct dasd_block * block,unsigned int cmd,void __user * argp)5445 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
5446 {
5447 struct dasd_device *device = block->base;
5448
5449 switch (cmd) {
5450 case BIODASDGATTR:
5451 return dasd_eckd_get_attrib(device, argp);
5452 case BIODASDSATTR:
5453 return dasd_eckd_set_attrib(device, argp);
5454 case BIODASDPSRD:
5455 return dasd_eckd_performance(device, argp);
5456 case BIODASDRLSE:
5457 return dasd_eckd_release(device);
5458 case BIODASDRSRV:
5459 return dasd_eckd_reserve(device);
5460 case BIODASDSLCK:
5461 return dasd_eckd_steal_lock(device);
5462 case BIODASDSNID:
5463 return dasd_eckd_snid(device, argp);
5464 case BIODASDSYMMIO:
5465 return dasd_symm_io(device, argp);
5466 default:
5467 return -ENOTTY;
5468 }
5469 }
5470
5471 /*
5472 * Dump the range of CCWs into 'page' buffer
5473 * and return number of printed chars.
5474 */
5475 static int
dasd_eckd_dump_ccw_range(struct ccw1 * from,struct ccw1 * to,char * page)5476 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
5477 {
5478 int len, count;
5479 char *datap;
5480
5481 len = 0;
5482 while (from <= to) {
5483 len += sprintf(page + len, PRINTK_HEADER
5484 " CCW %p: %08X %08X DAT:",
5485 from, ((int *) from)[0], ((int *) from)[1]);
5486
5487 /* get pointer to data (consider IDALs) */
5488 if (from->flags & CCW_FLAG_IDA)
5489 datap = (char *) *((addr_t *) (addr_t) from->cda);
5490 else
5491 datap = (char *) ((addr_t) from->cda);
5492
5493 /* dump data (max 32 bytes) */
5494 for (count = 0; count < from->count && count < 32; count++) {
5495 if (count % 8 == 0) len += sprintf(page + len, " ");
5496 if (count % 4 == 0) len += sprintf(page + len, " ");
5497 len += sprintf(page + len, "%02x", datap[count]);
5498 }
5499 len += sprintf(page + len, "\n");
5500 from++;
5501 }
5502 return len;
5503 }
5504
5505 static void
dasd_eckd_dump_sense_dbf(struct dasd_device * device,struct irb * irb,char * reason)5506 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
5507 char *reason)
5508 {
5509 u64 *sense;
5510 u64 *stat;
5511
5512 sense = (u64 *) dasd_get_sense(irb);
5513 stat = (u64 *) &irb->scsw;
5514 if (sense) {
5515 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
5516 "%016llx %016llx %016llx %016llx",
5517 reason, *stat, *((u32 *) (stat + 1)),
5518 sense[0], sense[1], sense[2], sense[3]);
5519 } else {
5520 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
5521 reason, *stat, *((u32 *) (stat + 1)),
5522 "NO VALID SENSE");
5523 }
5524 }
5525
5526 /*
5527 * Print sense data and related channel program.
5528 * Parts are printed because printk buffer is only 1024 bytes.
5529 */
dasd_eckd_dump_sense_ccw(struct dasd_device * device,struct dasd_ccw_req * req,struct irb * irb)5530 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
5531 struct dasd_ccw_req *req, struct irb *irb)
5532 {
5533 char *page;
5534 struct ccw1 *first, *last, *fail, *from, *to;
5535 int len, sl, sct;
5536
5537 page = (char *) get_zeroed_page(GFP_ATOMIC);
5538 if (page == NULL) {
5539 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5540 "No memory to dump sense data\n");
5541 return;
5542 }
5543 /* dump the sense data */
5544 len = sprintf(page, PRINTK_HEADER
5545 " I/O status report for device %s:\n",
5546 dev_name(&device->cdev->dev));
5547 len += sprintf(page + len, PRINTK_HEADER
5548 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5549 "CS:%02X RC:%d\n",
5550 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5551 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5552 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5553 req ? req->intrc : 0);
5554 len += sprintf(page + len, PRINTK_HEADER
5555 " device %s: Failing CCW: %p\n",
5556 dev_name(&device->cdev->dev),
5557 (void *) (addr_t) irb->scsw.cmd.cpa);
5558 if (irb->esw.esw0.erw.cons) {
5559 for (sl = 0; sl < 4; sl++) {
5560 len += sprintf(page + len, PRINTK_HEADER
5561 " Sense(hex) %2d-%2d:",
5562 (8 * sl), ((8 * sl) + 7));
5563
5564 for (sct = 0; sct < 8; sct++) {
5565 len += sprintf(page + len, " %02x",
5566 irb->ecw[8 * sl + sct]);
5567 }
5568 len += sprintf(page + len, "\n");
5569 }
5570
5571 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
5572 /* 24 Byte Sense Data */
5573 sprintf(page + len, PRINTK_HEADER
5574 " 24 Byte: %x MSG %x, "
5575 "%s MSGb to SYSOP\n",
5576 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
5577 irb->ecw[1] & 0x10 ? "" : "no");
5578 } else {
5579 /* 32 Byte Sense Data */
5580 sprintf(page + len, PRINTK_HEADER
5581 " 32 Byte: Format: %x "
5582 "Exception class %x\n",
5583 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
5584 }
5585 } else {
5586 sprintf(page + len, PRINTK_HEADER
5587 " SORRY - NO VALID SENSE AVAILABLE\n");
5588 }
5589 printk(KERN_ERR "%s", page);
5590
5591 if (req) {
5592 /* req == NULL for unsolicited interrupts */
5593 /* dump the Channel Program (max 140 Bytes per line) */
5594 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
5595 first = req->cpaddr;
5596 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
5597 to = min(first + 6, last);
5598 len = sprintf(page, PRINTK_HEADER
5599 " Related CP in req: %p\n", req);
5600 dasd_eckd_dump_ccw_range(first, to, page + len);
5601 printk(KERN_ERR "%s", page);
5602
5603 /* print failing CCW area (maximum 4) */
5604 /* scsw->cda is either valid or zero */
5605 len = 0;
5606 from = ++to;
5607 fail = (struct ccw1 *)(addr_t)
5608 irb->scsw.cmd.cpa; /* failing CCW */
5609 if (from < fail - 2) {
5610 from = fail - 2; /* there is a gap - print header */
5611 len += sprintf(page, PRINTK_HEADER "......\n");
5612 }
5613 to = min(fail + 1, last);
5614 len += dasd_eckd_dump_ccw_range(from, to, page + len);
5615
5616 /* print last CCWs (maximum 2) */
5617 from = max(from, ++to);
5618 if (from < last - 1) {
5619 from = last - 1; /* there is a gap - print header */
5620 len += sprintf(page + len, PRINTK_HEADER "......\n");
5621 }
5622 len += dasd_eckd_dump_ccw_range(from, last, page + len);
5623 if (len > 0)
5624 printk(KERN_ERR "%s", page);
5625 }
5626 free_page((unsigned long) page);
5627 }
5628
5629
5630 /*
5631 * Print sense data from a tcw.
5632 */
dasd_eckd_dump_sense_tcw(struct dasd_device * device,struct dasd_ccw_req * req,struct irb * irb)5633 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
5634 struct dasd_ccw_req *req, struct irb *irb)
5635 {
5636 char *page;
5637 int len, sl, sct, residual;
5638 struct tsb *tsb;
5639 u8 *sense, *rcq;
5640
5641 page = (char *) get_zeroed_page(GFP_ATOMIC);
5642 if (page == NULL) {
5643 DBF_DEV_EVENT(DBF_WARNING, device, " %s",
5644 "No memory to dump sense data");
5645 return;
5646 }
5647 /* dump the sense data */
5648 len = sprintf(page, PRINTK_HEADER
5649 " I/O status report for device %s:\n",
5650 dev_name(&device->cdev->dev));
5651 len += sprintf(page + len, PRINTK_HEADER
5652 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5653 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5654 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5655 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5656 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5657 irb->scsw.tm.fcxs,
5658 (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
5659 req ? req->intrc : 0);
5660 len += sprintf(page + len, PRINTK_HEADER
5661 " device %s: Failing TCW: %p\n",
5662 dev_name(&device->cdev->dev),
5663 (void *) (addr_t) irb->scsw.tm.tcw);
5664
5665 tsb = NULL;
5666 sense = NULL;
5667 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
5668 tsb = tcw_get_tsb(
5669 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
5670
5671 if (tsb) {
5672 len += sprintf(page + len, PRINTK_HEADER
5673 " tsb->length %d\n", tsb->length);
5674 len += sprintf(page + len, PRINTK_HEADER
5675 " tsb->flags %x\n", tsb->flags);
5676 len += sprintf(page + len, PRINTK_HEADER
5677 " tsb->dcw_offset %d\n", tsb->dcw_offset);
5678 len += sprintf(page + len, PRINTK_HEADER
5679 " tsb->count %d\n", tsb->count);
5680 residual = tsb->count - 28;
5681 len += sprintf(page + len, PRINTK_HEADER
5682 " residual %d\n", residual);
5683
5684 switch (tsb->flags & 0x07) {
5685 case 1: /* tsa_iostat */
5686 len += sprintf(page + len, PRINTK_HEADER
5687 " tsb->tsa.iostat.dev_time %d\n",
5688 tsb->tsa.iostat.dev_time);
5689 len += sprintf(page + len, PRINTK_HEADER
5690 " tsb->tsa.iostat.def_time %d\n",
5691 tsb->tsa.iostat.def_time);
5692 len += sprintf(page + len, PRINTK_HEADER
5693 " tsb->tsa.iostat.queue_time %d\n",
5694 tsb->tsa.iostat.queue_time);
5695 len += sprintf(page + len, PRINTK_HEADER
5696 " tsb->tsa.iostat.dev_busy_time %d\n",
5697 tsb->tsa.iostat.dev_busy_time);
5698 len += sprintf(page + len, PRINTK_HEADER
5699 " tsb->tsa.iostat.dev_act_time %d\n",
5700 tsb->tsa.iostat.dev_act_time);
5701 sense = tsb->tsa.iostat.sense;
5702 break;
5703 case 2: /* ts_ddpc */
5704 len += sprintf(page + len, PRINTK_HEADER
5705 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
5706 for (sl = 0; sl < 2; sl++) {
5707 len += sprintf(page + len, PRINTK_HEADER
5708 " tsb->tsa.ddpc.rcq %2d-%2d: ",
5709 (8 * sl), ((8 * sl) + 7));
5710 rcq = tsb->tsa.ddpc.rcq;
5711 for (sct = 0; sct < 8; sct++) {
5712 len += sprintf(page + len, " %02x",
5713 rcq[8 * sl + sct]);
5714 }
5715 len += sprintf(page + len, "\n");
5716 }
5717 sense = tsb->tsa.ddpc.sense;
5718 break;
5719 case 3: /* tsa_intrg */
5720 len += sprintf(page + len, PRINTK_HEADER
5721 " tsb->tsa.intrg.: not supported yet\n");
5722 break;
5723 }
5724
5725 if (sense) {
5726 for (sl = 0; sl < 4; sl++) {
5727 len += sprintf(page + len, PRINTK_HEADER
5728 " Sense(hex) %2d-%2d:",
5729 (8 * sl), ((8 * sl) + 7));
5730 for (sct = 0; sct < 8; sct++) {
5731 len += sprintf(page + len, " %02x",
5732 sense[8 * sl + sct]);
5733 }
5734 len += sprintf(page + len, "\n");
5735 }
5736
5737 if (sense[27] & DASD_SENSE_BIT_0) {
5738 /* 24 Byte Sense Data */
5739 sprintf(page + len, PRINTK_HEADER
5740 " 24 Byte: %x MSG %x, "
5741 "%s MSGb to SYSOP\n",
5742 sense[7] >> 4, sense[7] & 0x0f,
5743 sense[1] & 0x10 ? "" : "no");
5744 } else {
5745 /* 32 Byte Sense Data */
5746 sprintf(page + len, PRINTK_HEADER
5747 " 32 Byte: Format: %x "
5748 "Exception class %x\n",
5749 sense[6] & 0x0f, sense[22] >> 4);
5750 }
5751 } else {
5752 sprintf(page + len, PRINTK_HEADER
5753 " SORRY - NO VALID SENSE AVAILABLE\n");
5754 }
5755 } else {
5756 sprintf(page + len, PRINTK_HEADER
5757 " SORRY - NO TSB DATA AVAILABLE\n");
5758 }
5759 printk(KERN_ERR "%s", page);
5760 free_page((unsigned long) page);
5761 }
5762
dasd_eckd_dump_sense(struct dasd_device * device,struct dasd_ccw_req * req,struct irb * irb)5763 static void dasd_eckd_dump_sense(struct dasd_device *device,
5764 struct dasd_ccw_req *req, struct irb *irb)
5765 {
5766 u8 *sense = dasd_get_sense(irb);
5767
5768 if (scsw_is_tm(&irb->scsw)) {
5769 /*
5770 * In some cases the 'File Protected' or 'Incorrect Length'
5771 * error might be expected and log messages shouldn't be written
5772 * then. Check if the according suppress bit is set.
5773 */
5774 if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
5775 test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
5776 return;
5777 if (scsw_cstat(&irb->scsw) == 0x40 &&
5778 test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
5779 return;
5780
5781 dasd_eckd_dump_sense_tcw(device, req, irb);
5782 } else {
5783 /*
5784 * In some cases the 'Command Reject' or 'No Record Found'
5785 * error might be expected and log messages shouldn't be
5786 * written then. Check if the according suppress bit is set.
5787 */
5788 if (sense && sense[0] & SNS0_CMD_REJECT &&
5789 test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
5790 return;
5791
5792 if (sense && sense[1] & SNS1_NO_REC_FOUND &&
5793 test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
5794 return;
5795
5796 dasd_eckd_dump_sense_ccw(device, req, irb);
5797 }
5798 }
5799
dasd_eckd_reload_device(struct dasd_device * device)5800 static int dasd_eckd_reload_device(struct dasd_device *device)
5801 {
5802 struct dasd_eckd_private *private = device->private;
5803 int rc, old_base;
5804 char print_uid[60];
5805 struct dasd_uid uid;
5806 unsigned long flags;
5807
5808 /*
5809 * remove device from alias handling to prevent new requests
5810 * from being scheduled on the wrong alias device
5811 */
5812 dasd_alias_remove_device(device);
5813
5814 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5815 old_base = private->uid.base_unit_addr;
5816 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5817
5818 /* Read Configuration Data */
5819 rc = dasd_eckd_read_conf(device);
5820 if (rc)
5821 goto out_err;
5822
5823 rc = dasd_eckd_generate_uid(device);
5824 if (rc)
5825 goto out_err;
5826 /*
5827 * update unit address configuration and
5828 * add device to alias management
5829 */
5830 dasd_alias_update_add_device(device);
5831
5832 dasd_eckd_get_uid(device, &uid);
5833
5834 if (old_base != uid.base_unit_addr) {
5835 if (strlen(uid.vduit) > 0)
5836 snprintf(print_uid, sizeof(print_uid),
5837 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
5838 uid.ssid, uid.base_unit_addr, uid.vduit);
5839 else
5840 snprintf(print_uid, sizeof(print_uid),
5841 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
5842 uid.ssid, uid.base_unit_addr);
5843
5844 dev_info(&device->cdev->dev,
5845 "An Alias device was reassigned to a new base device "
5846 "with UID: %s\n", print_uid);
5847 }
5848 return 0;
5849
5850 out_err:
5851 return -1;
5852 }
5853
dasd_eckd_read_message_buffer(struct dasd_device * device,struct dasd_rssd_messages * messages,__u8 lpum)5854 static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5855 struct dasd_rssd_messages *messages,
5856 __u8 lpum)
5857 {
5858 struct dasd_rssd_messages *message_buf;
5859 struct dasd_psf_prssd_data *prssdp;
5860 struct dasd_ccw_req *cqr;
5861 struct ccw1 *ccw;
5862 int rc;
5863
5864 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5865 (sizeof(struct dasd_psf_prssd_data) +
5866 sizeof(struct dasd_rssd_messages)),
5867 device, NULL);
5868 if (IS_ERR(cqr)) {
5869 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5870 "Could not allocate read message buffer request");
5871 return PTR_ERR(cqr);
5872 }
5873
5874 cqr->lpm = lpum;
5875 retry:
5876 cqr->startdev = device;
5877 cqr->memdev = device;
5878 cqr->block = NULL;
5879 cqr->expires = 10 * HZ;
5880 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5881 /* dasd_sleep_on_immediatly does not do complex error
5882 * recovery so clear erp flag and set retry counter to
5883 * do basic erp */
5884 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5885 cqr->retries = 256;
5886
5887 /* Prepare for Read Subsystem Data */
5888 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5889 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5890 prssdp->order = PSF_ORDER_PRSSD;
5891 prssdp->suborder = 0x03; /* Message Buffer */
5892 /* all other bytes of prssdp must be zero */
5893
5894 ccw = cqr->cpaddr;
5895 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5896 ccw->count = sizeof(struct dasd_psf_prssd_data);
5897 ccw->flags |= CCW_FLAG_CC;
5898 ccw->flags |= CCW_FLAG_SLI;
5899 ccw->cda = (__u32)(addr_t) prssdp;
5900
5901 /* Read Subsystem Data - message buffer */
5902 message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
5903 memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
5904
5905 ccw++;
5906 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5907 ccw->count = sizeof(struct dasd_rssd_messages);
5908 ccw->flags |= CCW_FLAG_SLI;
5909 ccw->cda = (__u32)(addr_t) message_buf;
5910
5911 cqr->buildclk = get_tod_clock();
5912 cqr->status = DASD_CQR_FILLED;
5913 rc = dasd_sleep_on_immediatly(cqr);
5914 if (rc == 0) {
5915 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5916 message_buf = (struct dasd_rssd_messages *)
5917 (prssdp + 1);
5918 memcpy(messages, message_buf,
5919 sizeof(struct dasd_rssd_messages));
5920 } else if (cqr->lpm) {
5921 /*
5922 * on z/VM we might not be able to do I/O on the requested path
5923 * but instead we get the required information on any path
5924 * so retry with open path mask
5925 */
5926 cqr->lpm = 0;
5927 goto retry;
5928 } else
5929 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5930 "Reading messages failed with rc=%d\n"
5931 , rc);
5932 dasd_sfree_request(cqr, cqr->memdev);
5933 return rc;
5934 }
5935
dasd_eckd_query_host_access(struct dasd_device * device,struct dasd_psf_query_host_access * data)5936 static int dasd_eckd_query_host_access(struct dasd_device *device,
5937 struct dasd_psf_query_host_access *data)
5938 {
5939 struct dasd_eckd_private *private = device->private;
5940 struct dasd_psf_query_host_access *host_access;
5941 struct dasd_psf_prssd_data *prssdp;
5942 struct dasd_ccw_req *cqr;
5943 struct ccw1 *ccw;
5944 int rc;
5945
5946 /* not available for HYPER PAV alias devices */
5947 if (!device->block && private->lcu->pav == HYPER_PAV)
5948 return -EOPNOTSUPP;
5949
5950 /* may not be supported by the storage server */
5951 if (!(private->features.feature[14] & 0x80))
5952 return -EOPNOTSUPP;
5953
5954 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5955 sizeof(struct dasd_psf_prssd_data) + 1,
5956 device, NULL);
5957 if (IS_ERR(cqr)) {
5958 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5959 "Could not allocate read message buffer request");
5960 return PTR_ERR(cqr);
5961 }
5962 host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
5963 if (!host_access) {
5964 dasd_sfree_request(cqr, device);
5965 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5966 "Could not allocate host_access buffer");
5967 return -ENOMEM;
5968 }
5969 cqr->startdev = device;
5970 cqr->memdev = device;
5971 cqr->block = NULL;
5972 cqr->retries = 256;
5973 cqr->expires = 10 * HZ;
5974
5975 /* Prepare for Read Subsystem Data */
5976 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5977 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5978 prssdp->order = PSF_ORDER_PRSSD;
5979 prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
5980 /* LSS and Volume that will be queried */
5981 prssdp->lss = private->ned->ID;
5982 prssdp->volume = private->ned->unit_addr;
5983 /* all other bytes of prssdp must be zero */
5984
5985 ccw = cqr->cpaddr;
5986 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5987 ccw->count = sizeof(struct dasd_psf_prssd_data);
5988 ccw->flags |= CCW_FLAG_CC;
5989 ccw->flags |= CCW_FLAG_SLI;
5990 ccw->cda = (__u32)(addr_t) prssdp;
5991
5992 /* Read Subsystem Data - query host access */
5993 ccw++;
5994 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5995 ccw->count = sizeof(struct dasd_psf_query_host_access);
5996 ccw->flags |= CCW_FLAG_SLI;
5997 ccw->cda = (__u32)(addr_t) host_access;
5998
5999 cqr->buildclk = get_tod_clock();
6000 cqr->status = DASD_CQR_FILLED;
6001 /* the command might not be supported, suppress error message */
6002 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
6003 rc = dasd_sleep_on_interruptible(cqr);
6004 if (rc == 0) {
6005 *data = *host_access;
6006 } else {
6007 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
6008 "Reading host access data failed with rc=%d\n",
6009 rc);
6010 rc = -EOPNOTSUPP;
6011 }
6012
6013 dasd_sfree_request(cqr, cqr->memdev);
6014 kfree(host_access);
6015 return rc;
6016 }
6017 /*
6018 * return number of grouped devices
6019 */
dasd_eckd_host_access_count(struct dasd_device * device)6020 static int dasd_eckd_host_access_count(struct dasd_device *device)
6021 {
6022 struct dasd_psf_query_host_access *access;
6023 struct dasd_ckd_path_group_entry *entry;
6024 struct dasd_ckd_host_information *info;
6025 int count = 0;
6026 int rc, i;
6027
6028 access = kzalloc(sizeof(*access), GFP_NOIO);
6029 if (!access) {
6030 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6031 "Could not allocate access buffer");
6032 return -ENOMEM;
6033 }
6034 rc = dasd_eckd_query_host_access(device, access);
6035 if (rc) {
6036 kfree(access);
6037 return rc;
6038 }
6039
6040 info = (struct dasd_ckd_host_information *)
6041 access->host_access_information;
6042 for (i = 0; i < info->entry_count; i++) {
6043 entry = (struct dasd_ckd_path_group_entry *)
6044 (info->entry + i * info->entry_size);
6045 if (entry->status_flags & DASD_ECKD_PG_GROUPED)
6046 count++;
6047 }
6048
6049 kfree(access);
6050 return count;
6051 }
6052
6053 /*
6054 * write host access information to a sequential file
6055 */
dasd_hosts_print(struct dasd_device * device,struct seq_file * m)6056 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
6057 {
6058 struct dasd_psf_query_host_access *access;
6059 struct dasd_ckd_path_group_entry *entry;
6060 struct dasd_ckd_host_information *info;
6061 char sysplex[9] = "";
6062 int rc, i;
6063
6064 access = kzalloc(sizeof(*access), GFP_NOIO);
6065 if (!access) {
6066 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6067 "Could not allocate access buffer");
6068 return -ENOMEM;
6069 }
6070 rc = dasd_eckd_query_host_access(device, access);
6071 if (rc) {
6072 kfree(access);
6073 return rc;
6074 }
6075
6076 info = (struct dasd_ckd_host_information *)
6077 access->host_access_information;
6078 for (i = 0; i < info->entry_count; i++) {
6079 entry = (struct dasd_ckd_path_group_entry *)
6080 (info->entry + i * info->entry_size);
6081 /* PGID */
6082 seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
6083 /* FLAGS */
6084 seq_printf(m, "status_flags %02x\n", entry->status_flags);
6085 /* SYSPLEX NAME */
6086 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
6087 EBCASC(sysplex, sizeof(sysplex));
6088 seq_printf(m, "sysplex_name %8s\n", sysplex);
6089 /* SUPPORTED CYLINDER */
6090 seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
6091 /* TIMESTAMP */
6092 seq_printf(m, "timestamp %lu\n", (unsigned long)
6093 entry->timestamp);
6094 }
6095 kfree(access);
6096
6097 return 0;
6098 }
6099
6100 /*
6101 * Perform Subsystem Function - CUIR response
6102 */
6103 static int
dasd_eckd_psf_cuir_response(struct dasd_device * device,int response,__u32 message_id,__u8 lpum)6104 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
6105 __u32 message_id, __u8 lpum)
6106 {
6107 struct dasd_psf_cuir_response *psf_cuir;
6108 int pos = pathmask_to_pos(lpum);
6109 struct dasd_ccw_req *cqr;
6110 struct ccw1 *ccw;
6111 int rc;
6112
6113 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
6114 sizeof(struct dasd_psf_cuir_response),
6115 device, NULL);
6116
6117 if (IS_ERR(cqr)) {
6118 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6119 "Could not allocate PSF-CUIR request");
6120 return PTR_ERR(cqr);
6121 }
6122
6123 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
6124 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
6125 psf_cuir->cc = response;
6126 psf_cuir->chpid = device->path[pos].chpid;
6127 psf_cuir->message_id = message_id;
6128 psf_cuir->cssid = device->path[pos].cssid;
6129 psf_cuir->ssid = device->path[pos].ssid;
6130 ccw = cqr->cpaddr;
6131 ccw->cmd_code = DASD_ECKD_CCW_PSF;
6132 ccw->cda = (__u32)(addr_t)psf_cuir;
6133 ccw->flags = CCW_FLAG_SLI;
6134 ccw->count = sizeof(struct dasd_psf_cuir_response);
6135
6136 cqr->startdev = device;
6137 cqr->memdev = device;
6138 cqr->block = NULL;
6139 cqr->retries = 256;
6140 cqr->expires = 10*HZ;
6141 cqr->buildclk = get_tod_clock();
6142 cqr->status = DASD_CQR_FILLED;
6143 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
6144
6145 rc = dasd_sleep_on(cqr);
6146
6147 dasd_sfree_request(cqr, cqr->memdev);
6148 return rc;
6149 }
6150
6151 /*
6152 * return configuration data that is referenced by record selector
6153 * if a record selector is specified or per default return the
6154 * conf_data pointer for the path specified by lpum
6155 */
dasd_eckd_get_ref_conf(struct dasd_device * device,__u8 lpum,struct dasd_cuir_message * cuir)6156 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
6157 __u8 lpum,
6158 struct dasd_cuir_message *cuir)
6159 {
6160 struct dasd_conf_data *conf_data;
6161 int path, pos;
6162
6163 if (cuir->record_selector == 0)
6164 goto out;
6165 for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
6166 conf_data = device->path[pos].conf_data;
6167 if (conf_data->gneq.record_selector ==
6168 cuir->record_selector)
6169 return conf_data;
6170 }
6171 out:
6172 return device->path[pathmask_to_pos(lpum)].conf_data;
6173 }
6174
6175 /*
6176 * This function determines the scope of a reconfiguration request by
6177 * analysing the path and device selection data provided in the CUIR request.
6178 * Returns a path mask containing CUIR affected paths for the give device.
6179 *
6180 * If the CUIR request does not contain the required information return the
6181 * path mask of the path the attention message for the CUIR request was reveived
6182 * on.
6183 */
dasd_eckd_cuir_scope(struct dasd_device * device,__u8 lpum,struct dasd_cuir_message * cuir)6184 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
6185 struct dasd_cuir_message *cuir)
6186 {
6187 struct dasd_conf_data *ref_conf_data;
6188 unsigned long bitmask = 0, mask = 0;
6189 struct dasd_conf_data *conf_data;
6190 unsigned int pos, path;
6191 char *ref_gneq, *gneq;
6192 char *ref_ned, *ned;
6193 int tbcpm = 0;
6194
6195 /* if CUIR request does not specify the scope use the path
6196 the attention message was presented on */
6197 if (!cuir->ned_map ||
6198 !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
6199 return lpum;
6200
6201 /* get reference conf data */
6202 ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
6203 /* reference ned is determined by ned_map field */
6204 pos = 8 - ffs(cuir->ned_map);
6205 ref_ned = (char *)&ref_conf_data->neds[pos];
6206 ref_gneq = (char *)&ref_conf_data->gneq;
6207 /* transfer 24 bit neq_map to mask */
6208 mask = cuir->neq_map[2];
6209 mask |= cuir->neq_map[1] << 8;
6210 mask |= cuir->neq_map[0] << 16;
6211
6212 for (path = 0; path < 8; path++) {
6213 /* initialise data per path */
6214 bitmask = mask;
6215 conf_data = device->path[path].conf_data;
6216 pos = 8 - ffs(cuir->ned_map);
6217 ned = (char *) &conf_data->neds[pos];
6218 /* compare reference ned and per path ned */
6219 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
6220 continue;
6221 gneq = (char *)&conf_data->gneq;
6222 /* compare reference gneq and per_path gneq under
6223 24 bit mask where mask bit 0 equals byte 7 of
6224 the gneq and mask bit 24 equals byte 31 */
6225 while (bitmask) {
6226 pos = ffs(bitmask) - 1;
6227 if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
6228 != 0)
6229 break;
6230 clear_bit(pos, &bitmask);
6231 }
6232 if (bitmask)
6233 continue;
6234 /* device and path match the reference values
6235 add path to CUIR scope */
6236 tbcpm |= 0x80 >> path;
6237 }
6238 return tbcpm;
6239 }
6240
dasd_eckd_cuir_notify_user(struct dasd_device * device,unsigned long paths,int action)6241 static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
6242 unsigned long paths, int action)
6243 {
6244 int pos;
6245
6246 while (paths) {
6247 /* get position of bit in mask */
6248 pos = 8 - ffs(paths);
6249 /* get channel path descriptor from this position */
6250 if (action == CUIR_QUIESCE)
6251 pr_warn("Service on the storage server caused path %x.%02x to go offline",
6252 device->path[pos].cssid,
6253 device->path[pos].chpid);
6254 else if (action == CUIR_RESUME)
6255 pr_info("Path %x.%02x is back online after service on the storage server",
6256 device->path[pos].cssid,
6257 device->path[pos].chpid);
6258 clear_bit(7 - pos, &paths);
6259 }
6260 }
6261
dasd_eckd_cuir_remove_path(struct dasd_device * device,__u8 lpum,struct dasd_cuir_message * cuir)6262 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
6263 struct dasd_cuir_message *cuir)
6264 {
6265 unsigned long tbcpm;
6266
6267 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
6268 /* nothing to do if path is not in use */
6269 if (!(dasd_path_get_opm(device) & tbcpm))
6270 return 0;
6271 if (!(dasd_path_get_opm(device) & ~tbcpm)) {
6272 /* no path would be left if the CUIR action is taken
6273 return error */
6274 return -EINVAL;
6275 }
6276 /* remove device from operational path mask */
6277 dasd_path_remove_opm(device, tbcpm);
6278 dasd_path_add_cuirpm(device, tbcpm);
6279 return tbcpm;
6280 }
6281
6282 /*
6283 * walk through all devices and build a path mask to quiesce them
6284 * return an error if the last path to a device would be removed
6285 *
6286 * if only part of the devices are quiesced and an error
6287 * occurs no onlining necessary, the storage server will
6288 * notify the already set offline devices again
6289 */
dasd_eckd_cuir_quiesce(struct dasd_device * device,__u8 lpum,struct dasd_cuir_message * cuir)6290 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
6291 struct dasd_cuir_message *cuir)
6292 {
6293 struct dasd_eckd_private *private = device->private;
6294 struct alias_pav_group *pavgroup, *tempgroup;
6295 struct dasd_device *dev, *n;
6296 unsigned long paths = 0;
6297 unsigned long flags;
6298 int tbcpm;
6299
6300 /* active devices */
6301 list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6302 alias_list) {
6303 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6304 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6305 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6306 if (tbcpm < 0)
6307 goto out_err;
6308 paths |= tbcpm;
6309 }
6310 /* inactive devices */
6311 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6312 alias_list) {
6313 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6314 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6315 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6316 if (tbcpm < 0)
6317 goto out_err;
6318 paths |= tbcpm;
6319 }
6320 /* devices in PAV groups */
6321 list_for_each_entry_safe(pavgroup, tempgroup,
6322 &private->lcu->grouplist, group) {
6323 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6324 alias_list) {
6325 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6326 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6327 spin_unlock_irqrestore(
6328 get_ccwdev_lock(dev->cdev), flags);
6329 if (tbcpm < 0)
6330 goto out_err;
6331 paths |= tbcpm;
6332 }
6333 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6334 alias_list) {
6335 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6336 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6337 spin_unlock_irqrestore(
6338 get_ccwdev_lock(dev->cdev), flags);
6339 if (tbcpm < 0)
6340 goto out_err;
6341 paths |= tbcpm;
6342 }
6343 }
6344 /* notify user about all paths affected by CUIR action */
6345 dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
6346 return 0;
6347 out_err:
6348 return tbcpm;
6349 }
6350
dasd_eckd_cuir_resume(struct dasd_device * device,__u8 lpum,struct dasd_cuir_message * cuir)6351 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
6352 struct dasd_cuir_message *cuir)
6353 {
6354 struct dasd_eckd_private *private = device->private;
6355 struct alias_pav_group *pavgroup, *tempgroup;
6356 struct dasd_device *dev, *n;
6357 unsigned long paths = 0;
6358 int tbcpm;
6359
6360 /*
6361 * the path may have been added through a generic path event before
6362 * only trigger path verification if the path is not already in use
6363 */
6364 list_for_each_entry_safe(dev, n,
6365 &private->lcu->active_devices,
6366 alias_list) {
6367 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6368 paths |= tbcpm;
6369 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6370 dasd_path_add_tbvpm(dev, tbcpm);
6371 dasd_schedule_device_bh(dev);
6372 }
6373 }
6374 list_for_each_entry_safe(dev, n,
6375 &private->lcu->inactive_devices,
6376 alias_list) {
6377 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6378 paths |= tbcpm;
6379 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6380 dasd_path_add_tbvpm(dev, tbcpm);
6381 dasd_schedule_device_bh(dev);
6382 }
6383 }
6384 /* devices in PAV groups */
6385 list_for_each_entry_safe(pavgroup, tempgroup,
6386 &private->lcu->grouplist,
6387 group) {
6388 list_for_each_entry_safe(dev, n,
6389 &pavgroup->baselist,
6390 alias_list) {
6391 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6392 paths |= tbcpm;
6393 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6394 dasd_path_add_tbvpm(dev, tbcpm);
6395 dasd_schedule_device_bh(dev);
6396 }
6397 }
6398 list_for_each_entry_safe(dev, n,
6399 &pavgroup->aliaslist,
6400 alias_list) {
6401 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6402 paths |= tbcpm;
6403 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6404 dasd_path_add_tbvpm(dev, tbcpm);
6405 dasd_schedule_device_bh(dev);
6406 }
6407 }
6408 }
6409 /* notify user about all paths affected by CUIR action */
6410 dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
6411 return 0;
6412 }
6413
dasd_eckd_handle_cuir(struct dasd_device * device,void * messages,__u8 lpum)6414 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
6415 __u8 lpum)
6416 {
6417 struct dasd_cuir_message *cuir = messages;
6418 int response;
6419
6420 DBF_DEV_EVENT(DBF_WARNING, device,
6421 "CUIR request: %016llx %016llx %016llx %08x",
6422 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
6423 ((u32 *)cuir)[3]);
6424
6425 if (cuir->code == CUIR_QUIESCE) {
6426 /* quiesce */
6427 if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
6428 response = PSF_CUIR_LAST_PATH;
6429 else
6430 response = PSF_CUIR_COMPLETED;
6431 } else if (cuir->code == CUIR_RESUME) {
6432 /* resume */
6433 dasd_eckd_cuir_resume(device, lpum, cuir);
6434 response = PSF_CUIR_COMPLETED;
6435 } else
6436 response = PSF_CUIR_NOT_SUPPORTED;
6437
6438 dasd_eckd_psf_cuir_response(device, response,
6439 cuir->message_id, lpum);
6440 DBF_DEV_EVENT(DBF_WARNING, device,
6441 "CUIR response: %d on message ID %08x", response,
6442 cuir->message_id);
6443 /* to make sure there is no attention left schedule work again */
6444 device->discipline->check_attention(device, lpum);
6445 }
6446
dasd_eckd_oos_resume(struct dasd_device * device)6447 static void dasd_eckd_oos_resume(struct dasd_device *device)
6448 {
6449 struct dasd_eckd_private *private = device->private;
6450 struct alias_pav_group *pavgroup, *tempgroup;
6451 struct dasd_device *dev, *n;
6452 unsigned long flags;
6453
6454 spin_lock_irqsave(&private->lcu->lock, flags);
6455 list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6456 alias_list) {
6457 if (dev->stopped & DASD_STOPPED_NOSPC)
6458 dasd_generic_space_avail(dev);
6459 }
6460 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6461 alias_list) {
6462 if (dev->stopped & DASD_STOPPED_NOSPC)
6463 dasd_generic_space_avail(dev);
6464 }
6465 /* devices in PAV groups */
6466 list_for_each_entry_safe(pavgroup, tempgroup,
6467 &private->lcu->grouplist,
6468 group) {
6469 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6470 alias_list) {
6471 if (dev->stopped & DASD_STOPPED_NOSPC)
6472 dasd_generic_space_avail(dev);
6473 }
6474 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6475 alias_list) {
6476 if (dev->stopped & DASD_STOPPED_NOSPC)
6477 dasd_generic_space_avail(dev);
6478 }
6479 }
6480 spin_unlock_irqrestore(&private->lcu->lock, flags);
6481 }
6482
dasd_eckd_handle_oos(struct dasd_device * device,void * messages,__u8 lpum)6483 static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
6484 __u8 lpum)
6485 {
6486 struct dasd_oos_message *oos = messages;
6487
6488 switch (oos->code) {
6489 case REPO_WARN:
6490 case POOL_WARN:
6491 dev_warn(&device->cdev->dev,
6492 "Extent pool usage has reached a critical value\n");
6493 dasd_eckd_oos_resume(device);
6494 break;
6495 case REPO_EXHAUST:
6496 case POOL_EXHAUST:
6497 dev_warn(&device->cdev->dev,
6498 "Extent pool is exhausted\n");
6499 break;
6500 case REPO_RELIEVE:
6501 case POOL_RELIEVE:
6502 dev_info(&device->cdev->dev,
6503 "Extent pool physical space constraint has been relieved\n");
6504 break;
6505 }
6506
6507 /* In any case, update related data */
6508 dasd_eckd_read_ext_pool_info(device);
6509
6510 /* to make sure there is no attention left schedule work again */
6511 device->discipline->check_attention(device, lpum);
6512 }
6513
dasd_eckd_check_attention_work(struct work_struct * work)6514 static void dasd_eckd_check_attention_work(struct work_struct *work)
6515 {
6516 struct check_attention_work_data *data;
6517 struct dasd_rssd_messages *messages;
6518 struct dasd_device *device;
6519 int rc;
6520
6521 data = container_of(work, struct check_attention_work_data, worker);
6522 device = data->device;
6523 messages = kzalloc(sizeof(*messages), GFP_KERNEL);
6524 if (!messages) {
6525 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6526 "Could not allocate attention message buffer");
6527 goto out;
6528 }
6529 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
6530 if (rc)
6531 goto out;
6532
6533 if (messages->length == ATTENTION_LENGTH_CUIR &&
6534 messages->format == ATTENTION_FORMAT_CUIR)
6535 dasd_eckd_handle_cuir(device, messages, data->lpum);
6536 if (messages->length == ATTENTION_LENGTH_OOS &&
6537 messages->format == ATTENTION_FORMAT_OOS)
6538 dasd_eckd_handle_oos(device, messages, data->lpum);
6539
6540 out:
6541 dasd_put_device(device);
6542 kfree(messages);
6543 kfree(data);
6544 }
6545
dasd_eckd_check_attention(struct dasd_device * device,__u8 lpum)6546 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
6547 {
6548 struct check_attention_work_data *data;
6549
6550 data = kzalloc(sizeof(*data), GFP_ATOMIC);
6551 if (!data)
6552 return -ENOMEM;
6553 INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
6554 dasd_get_device(device);
6555 data->device = device;
6556 data->lpum = lpum;
6557 schedule_work(&data->worker);
6558 return 0;
6559 }
6560
dasd_eckd_disable_hpf_path(struct dasd_device * device,__u8 lpum)6561 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
6562 {
6563 if (~lpum & dasd_path_get_opm(device)) {
6564 dasd_path_add_nohpfpm(device, lpum);
6565 dasd_path_remove_opm(device, lpum);
6566 dev_err(&device->cdev->dev,
6567 "Channel path %02X lost HPF functionality and is disabled\n",
6568 lpum);
6569 return 1;
6570 }
6571 return 0;
6572 }
6573
dasd_eckd_disable_hpf_device(struct dasd_device * device)6574 static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
6575 {
6576 struct dasd_eckd_private *private = device->private;
6577
6578 dev_err(&device->cdev->dev,
6579 "High Performance FICON disabled\n");
6580 private->fcx_max_data = 0;
6581 }
6582
dasd_eckd_hpf_enabled(struct dasd_device * device)6583 static int dasd_eckd_hpf_enabled(struct dasd_device *device)
6584 {
6585 struct dasd_eckd_private *private = device->private;
6586
6587 return private->fcx_max_data ? 1 : 0;
6588 }
6589
dasd_eckd_handle_hpf_error(struct dasd_device * device,struct irb * irb)6590 static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
6591 struct irb *irb)
6592 {
6593 struct dasd_eckd_private *private = device->private;
6594
6595 if (!private->fcx_max_data) {
6596 /* sanity check for no HPF, the error makes no sense */
6597 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6598 "Trying to disable HPF for a non HPF device");
6599 return;
6600 }
6601 if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
6602 dasd_eckd_disable_hpf_device(device);
6603 } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
6604 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
6605 return;
6606 dasd_eckd_disable_hpf_device(device);
6607 dasd_path_set_tbvpm(device,
6608 dasd_path_get_hpfpm(device));
6609 }
6610 /*
6611 * prevent that any new I/O ist started on the device and schedule a
6612 * requeue of existing requests
6613 */
6614 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
6615 dasd_schedule_requeue(device);
6616 }
6617
6618 /*
6619 * Initialize block layer request queue.
6620 */
dasd_eckd_setup_blk_queue(struct dasd_block * block)6621 static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
6622 {
6623 unsigned int logical_block_size = block->bp_block;
6624 struct request_queue *q = block->request_queue;
6625 struct dasd_device *device = block->base;
6626 int max;
6627
6628 if (device->features & DASD_FEATURE_USERAW) {
6629 /*
6630 * the max_blocks value for raw_track access is 256
6631 * it is higher than the native ECKD value because we
6632 * only need one ccw per track
6633 * so the max_hw_sectors are
6634 * 2048 x 512B = 1024kB = 16 tracks
6635 */
6636 max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
6637 } else {
6638 max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
6639 }
6640 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
6641 q->limits.max_dev_sectors = max;
6642 blk_queue_logical_block_size(q, logical_block_size);
6643 blk_queue_max_hw_sectors(q, max);
6644 blk_queue_max_segments(q, USHRT_MAX);
6645 /* With page sized segments each segment can be translated into one idaw/tidaw */
6646 blk_queue_max_segment_size(q, PAGE_SIZE);
6647 blk_queue_segment_boundary(q, PAGE_SIZE - 1);
6648 }
6649
6650 static struct ccw_driver dasd_eckd_driver = {
6651 .driver = {
6652 .name = "dasd-eckd",
6653 .owner = THIS_MODULE,
6654 .dev_groups = dasd_dev_groups,
6655 },
6656 .ids = dasd_eckd_ids,
6657 .probe = dasd_eckd_probe,
6658 .remove = dasd_generic_remove,
6659 .set_offline = dasd_generic_set_offline,
6660 .set_online = dasd_eckd_set_online,
6661 .notify = dasd_generic_notify,
6662 .path_event = dasd_generic_path_event,
6663 .shutdown = dasd_generic_shutdown,
6664 .uc_handler = dasd_generic_uc_handler,
6665 .int_class = IRQIO_DAS,
6666 };
6667
6668 static struct dasd_discipline dasd_eckd_discipline = {
6669 .owner = THIS_MODULE,
6670 .name = "ECKD",
6671 .ebcname = "ECKD",
6672 .check_device = dasd_eckd_check_characteristics,
6673 .uncheck_device = dasd_eckd_uncheck_device,
6674 .do_analysis = dasd_eckd_do_analysis,
6675 .pe_handler = dasd_eckd_pe_handler,
6676 .basic_to_ready = dasd_eckd_basic_to_ready,
6677 .online_to_ready = dasd_eckd_online_to_ready,
6678 .basic_to_known = dasd_eckd_basic_to_known,
6679 .setup_blk_queue = dasd_eckd_setup_blk_queue,
6680 .fill_geometry = dasd_eckd_fill_geometry,
6681 .start_IO = dasd_start_IO,
6682 .term_IO = dasd_term_IO,
6683 .handle_terminated_request = dasd_eckd_handle_terminated_request,
6684 .format_device = dasd_eckd_format_device,
6685 .check_device_format = dasd_eckd_check_device_format,
6686 .erp_action = dasd_eckd_erp_action,
6687 .erp_postaction = dasd_eckd_erp_postaction,
6688 .check_for_device_change = dasd_eckd_check_for_device_change,
6689 .build_cp = dasd_eckd_build_alias_cp,
6690 .free_cp = dasd_eckd_free_alias_cp,
6691 .dump_sense = dasd_eckd_dump_sense,
6692 .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
6693 .fill_info = dasd_eckd_fill_info,
6694 .ioctl = dasd_eckd_ioctl,
6695 .reload = dasd_eckd_reload_device,
6696 .get_uid = dasd_eckd_get_uid,
6697 .kick_validate = dasd_eckd_kick_validate_server,
6698 .check_attention = dasd_eckd_check_attention,
6699 .host_access_count = dasd_eckd_host_access_count,
6700 .hosts_print = dasd_hosts_print,
6701 .handle_hpf_error = dasd_eckd_handle_hpf_error,
6702 .disable_hpf = dasd_eckd_disable_hpf_device,
6703 .hpf_enabled = dasd_eckd_hpf_enabled,
6704 .reset_path = dasd_eckd_reset_path,
6705 .is_ese = dasd_eckd_is_ese,
6706 .space_allocated = dasd_eckd_space_allocated,
6707 .space_configured = dasd_eckd_space_configured,
6708 .logical_capacity = dasd_eckd_logical_capacity,
6709 .release_space = dasd_eckd_release_space,
6710 .ext_pool_id = dasd_eckd_ext_pool_id,
6711 .ext_size = dasd_eckd_ext_size,
6712 .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
6713 .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
6714 .ext_pool_oos = dasd_eckd_ext_pool_oos,
6715 .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
6716 .ese_format = dasd_eckd_ese_format,
6717 .ese_read = dasd_eckd_ese_read,
6718 };
6719
6720 static int __init
dasd_eckd_init(void)6721 dasd_eckd_init(void)
6722 {
6723 int ret;
6724
6725 ASCEBC(dasd_eckd_discipline.ebcname, 4);
6726 dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
6727 GFP_KERNEL | GFP_DMA);
6728 if (!dasd_reserve_req)
6729 return -ENOMEM;
6730 dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
6731 GFP_KERNEL | GFP_DMA);
6732 if (!dasd_vol_info_req) {
6733 kfree(dasd_reserve_req);
6734 return -ENOMEM;
6735 }
6736 pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
6737 GFP_KERNEL | GFP_DMA);
6738 if (!pe_handler_worker) {
6739 kfree(dasd_reserve_req);
6740 kfree(dasd_vol_info_req);
6741 return -ENOMEM;
6742 }
6743 rawpadpage = (void *)__get_free_page(GFP_KERNEL);
6744 if (!rawpadpage) {
6745 kfree(pe_handler_worker);
6746 kfree(dasd_reserve_req);
6747 kfree(dasd_vol_info_req);
6748 return -ENOMEM;
6749 }
6750 ret = ccw_driver_register(&dasd_eckd_driver);
6751 if (!ret)
6752 wait_for_device_probe();
6753 else {
6754 kfree(pe_handler_worker);
6755 kfree(dasd_reserve_req);
6756 kfree(dasd_vol_info_req);
6757 free_page((unsigned long)rawpadpage);
6758 }
6759 return ret;
6760 }
6761
6762 static void __exit
dasd_eckd_cleanup(void)6763 dasd_eckd_cleanup(void)
6764 {
6765 ccw_driver_unregister(&dasd_eckd_driver);
6766 kfree(pe_handler_worker);
6767 kfree(dasd_reserve_req);
6768 free_page((unsigned long)rawpadpage);
6769 }
6770
6771 module_init(dasd_eckd_init);
6772 module_exit(dasd_eckd_cleanup);
6773