1
2
3
4
5
6
7
8
9
10
11
12#define KMSG_COMPONENT "dasd-eckd"
13
14#include <linux/stddef.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/hdreg.h>
18#include <linux/bio.h>
19#include <linux/module.h>
20#include <linux/compat.h>
21#include <linux/init.h>
22#include <linux/seq_file.h>
23
24#include <asm/css_chars.h>
25#include <asm/debug.h>
26#include <asm/idals.h>
27#include <asm/ebcdic.h>
28#include <asm/io.h>
29#include <asm/uaccess.h>
30#include <asm/cio.h>
31#include <asm/ccwdev.h>
32#include <asm/itcw.h>
33#include <asm/schid.h>
34#include <asm/chpid.h>
35
36#include "dasd_int.h"
37#include "dasd_eckd.h"
38
39#ifdef PRINTK_HEADER
40#undef PRINTK_HEADER
41#endif
42#define PRINTK_HEADER "dasd(eckd):"
43
44#define ECKD_C0(i) (i->home_bytes)
45#define ECKD_F(i) (i->formula)
46#define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
47 (i->factors.f_0x02.f1))
48#define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
49 (i->factors.f_0x02.f2))
50#define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
51 (i->factors.f_0x02.f3))
52#define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
53#define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
54#define ECKD_F6(i) (i->factor6)
55#define ECKD_F7(i) (i->factor7)
56#define ECKD_F8(i) (i->factor8)
57
58
59
60
61
62#define DASD_RAW_BLOCK_PER_TRACK 16
63#define DASD_RAW_BLOCKSIZE 4096
64
65#define DASD_RAW_SECTORS_PER_TRACK 128
66
67MODULE_LICENSE("GPL");
68
69static struct dasd_discipline dasd_eckd_discipline;
70
71
72
73static struct ccw_device_id dasd_eckd_ids[] = {
74 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
75 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
76 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
77 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
78 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
79 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
80 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
81 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
82 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
83 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
84 { },
85};
86
87MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
88
89static struct ccw_driver dasd_eckd_driver;
90
91#define INIT_CQR_OK 0
92#define INIT_CQR_UNFORMATTED 1
93#define INIT_CQR_ERROR 2
94
95
96static struct {
97 struct dasd_ccw_req cqr;
98 struct ccw1 ccw;
99 char data[32];
100} *dasd_reserve_req;
101static DEFINE_MUTEX(dasd_reserve_mutex);
102
103
104struct path_verification_work_data {
105 struct work_struct worker;
106 struct dasd_device *device;
107 struct dasd_ccw_req cqr;
108 struct ccw1 ccw;
109 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
110 int isglobal;
111 __u8 tbvpm;
112};
113static struct path_verification_work_data *path_verification_worker;
114static DEFINE_MUTEX(dasd_path_verification_mutex);
115
116struct check_attention_work_data {
117 struct work_struct worker;
118 struct dasd_device *device;
119 __u8 lpum;
120};
121
122static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
123 struct dasd_device *, struct dasd_device *,
124 unsigned int, int, unsigned int, unsigned int,
125 unsigned int, unsigned int);
126
127
128
129static int
130dasd_eckd_probe (struct ccw_device *cdev)
131{
132 int ret;
133
134
135 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
136 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
137 if (ret) {
138 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
139 "dasd_eckd_probe: could not set "
140 "ccw-device options");
141 return ret;
142 }
143 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
144 return ret;
145}
146
147static int
148dasd_eckd_set_online(struct ccw_device *cdev)
149{
150 return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
151}
152
153static const int sizes_trk0[] = { 28, 148, 84 };
154#define LABEL_SIZE 140
155
156
157static const int count_area_head[] = { 0, 0, 0, 0, 2 };
158static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
159
160static inline unsigned int
161round_up_multiple(unsigned int no, unsigned int mult)
162{
163 int rem = no % mult;
164 return (rem ? no - rem + mult : no);
165}
166
167static inline unsigned int
168ceil_quot(unsigned int d1, unsigned int d2)
169{
170 return (d1 + (d2 - 1)) / d2;
171}
172
173static unsigned int
174recs_per_track(struct dasd_eckd_characteristics * rdc,
175 unsigned int kl, unsigned int dl)
176{
177 int dn, kn;
178
179 switch (rdc->dev_type) {
180 case 0x3380:
181 if (kl)
182 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
183 ceil_quot(dl + 12, 32));
184 else
185 return 1499 / (15 + ceil_quot(dl + 12, 32));
186 case 0x3390:
187 dn = ceil_quot(dl + 6, 232) + 1;
188 if (kl) {
189 kn = ceil_quot(kl + 6, 232) + 1;
190 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
191 9 + ceil_quot(dl + 6 * dn, 34));
192 } else
193 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
194 case 0x9345:
195 dn = ceil_quot(dl + 6, 232) + 1;
196 if (kl) {
197 kn = ceil_quot(kl + 6, 232) + 1;
198 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
199 ceil_quot(dl + 6 * dn, 34));
200 } else
201 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
202 }
203 return 0;
204}
205
206static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
207{
208 geo->cyl = (__u16) cyl;
209 geo->head = cyl >> 16;
210 geo->head <<= 4;
211 geo->head |= head;
212}
213
214static int
215check_XRC (struct ccw1 *de_ccw,
216 struct DE_eckd_data *data,
217 struct dasd_device *device)
218{
219 struct dasd_eckd_private *private;
220 int rc;
221
222 private = (struct dasd_eckd_private *) device->private;
223 if (!private->rdc_data.facilities.XRC_supported)
224 return 0;
225
226
227 data->ga_extended |= 0x08;
228 data->ga_extended |= 0x02;
229
230 rc = get_sync_clock(&data->ep_sys_time);
231
232 if (rc == -EOPNOTSUPP || rc == -EACCES)
233 rc = 0;
234
235 de_ccw->count = sizeof(struct DE_eckd_data);
236 de_ccw->flags |= CCW_FLAG_SLI;
237 return rc;
238}
239
240static int
241define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
242 unsigned int totrk, int cmd, struct dasd_device *device)
243{
244 struct dasd_eckd_private *private;
245 u32 begcyl, endcyl;
246 u16 heads, beghead, endhead;
247 int rc = 0;
248
249 private = (struct dasd_eckd_private *) device->private;
250
251 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
252 ccw->flags = 0;
253 ccw->count = 16;
254 ccw->cda = (__u32) __pa(data);
255
256 memset(data, 0, sizeof(struct DE_eckd_data));
257 switch (cmd) {
258 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
259 case DASD_ECKD_CCW_READ_RECORD_ZERO:
260 case DASD_ECKD_CCW_READ:
261 case DASD_ECKD_CCW_READ_MT:
262 case DASD_ECKD_CCW_READ_CKD:
263 case DASD_ECKD_CCW_READ_CKD_MT:
264 case DASD_ECKD_CCW_READ_KD:
265 case DASD_ECKD_CCW_READ_KD_MT:
266 data->mask.perm = 0x1;
267 data->attributes.operation = private->attrib.operation;
268 break;
269 case DASD_ECKD_CCW_READ_COUNT:
270 data->mask.perm = 0x1;
271 data->attributes.operation = DASD_BYPASS_CACHE;
272 break;
273 case DASD_ECKD_CCW_WRITE:
274 case DASD_ECKD_CCW_WRITE_MT:
275 case DASD_ECKD_CCW_WRITE_KD:
276 case DASD_ECKD_CCW_WRITE_KD_MT:
277 data->mask.perm = 0x02;
278 data->attributes.operation = private->attrib.operation;
279 rc = check_XRC (ccw, data, device);
280 break;
281 case DASD_ECKD_CCW_WRITE_CKD:
282 case DASD_ECKD_CCW_WRITE_CKD_MT:
283 data->attributes.operation = DASD_BYPASS_CACHE;
284 rc = check_XRC (ccw, data, device);
285 break;
286 case DASD_ECKD_CCW_ERASE:
287 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
288 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
289 data->mask.perm = 0x3;
290 data->mask.auth = 0x1;
291 data->attributes.operation = DASD_BYPASS_CACHE;
292 rc = check_XRC (ccw, data, device);
293 break;
294 default:
295 dev_err(&device->cdev->dev,
296 "0x%x is not a known command\n", cmd);
297 break;
298 }
299
300 data->attributes.mode = 0x3;
301
302 if ((private->rdc_data.cu_type == 0x2105 ||
303 private->rdc_data.cu_type == 0x2107 ||
304 private->rdc_data.cu_type == 0x1750)
305 && !(private->uses_cdl && trk < 2))
306 data->ga_extended |= 0x40;
307
308 heads = private->rdc_data.trk_per_cyl;
309 begcyl = trk / heads;
310 beghead = trk % heads;
311 endcyl = totrk / heads;
312 endhead = totrk % heads;
313
314
315 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
316 data->attributes.operation == DASD_SEQ_ACCESS) {
317
318 if (endcyl + private->attrib.nr_cyl < private->real_cyl)
319 endcyl += private->attrib.nr_cyl;
320 else
321 endcyl = (private->real_cyl - 1);
322 }
323
324 set_ch_t(&data->beg_ext, begcyl, beghead);
325 set_ch_t(&data->end_ext, endcyl, endhead);
326 return rc;
327}
328
329static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
330 struct dasd_device *device)
331{
332 struct dasd_eckd_private *private;
333 int rc;
334
335 private = (struct dasd_eckd_private *) device->private;
336 if (!private->rdc_data.facilities.XRC_supported)
337 return 0;
338
339
340 pfxdata->define_extent.ga_extended |= 0x08;
341 pfxdata->define_extent.ga_extended |= 0x02;
342 pfxdata->validity.time_stamp = 1;
343
344 rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
345
346 if (rc == -EOPNOTSUPP || rc == -EACCES)
347 rc = 0;
348 return rc;
349}
350
351static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
352 unsigned int rec_on_trk, int count, int cmd,
353 struct dasd_device *device, unsigned int reclen,
354 unsigned int tlf)
355{
356 struct dasd_eckd_private *private;
357 int sector;
358 int dn, d;
359
360 private = (struct dasd_eckd_private *) device->private;
361
362 memset(data, 0, sizeof(*data));
363 sector = 0;
364 if (rec_on_trk) {
365 switch (private->rdc_data.dev_type) {
366 case 0x3390:
367 dn = ceil_quot(reclen + 6, 232);
368 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
369 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
370 break;
371 case 0x3380:
372 d = 7 + ceil_quot(reclen + 12, 32);
373 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
374 break;
375 }
376 }
377 data->sector = sector;
378
379
380
381
382 data->count = count;
383 switch (cmd) {
384 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
385 data->operation.orientation = 0x3;
386 data->operation.operation = 0x03;
387 break;
388 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
389 data->operation.orientation = 0x3;
390 data->operation.operation = 0x16;
391 break;
392 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
393 data->operation.orientation = 0x1;
394 data->operation.operation = 0x03;
395 data->count++;
396 break;
397 case DASD_ECKD_CCW_READ_RECORD_ZERO:
398 data->operation.orientation = 0x3;
399 data->operation.operation = 0x16;
400 data->count++;
401 break;
402 case DASD_ECKD_CCW_WRITE:
403 case DASD_ECKD_CCW_WRITE_MT:
404 case DASD_ECKD_CCW_WRITE_KD:
405 case DASD_ECKD_CCW_WRITE_KD_MT:
406 data->auxiliary.length_valid = 0x1;
407 data->length = reclen;
408 data->operation.operation = 0x01;
409 break;
410 case DASD_ECKD_CCW_WRITE_CKD:
411 case DASD_ECKD_CCW_WRITE_CKD_MT:
412 data->auxiliary.length_valid = 0x1;
413 data->length = reclen;
414 data->operation.operation = 0x03;
415 break;
416 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
417 data->operation.orientation = 0x0;
418 data->operation.operation = 0x3F;
419 data->extended_operation = 0x11;
420 data->length = 0;
421 data->extended_parameter_length = 0x02;
422 if (data->count > 8) {
423 data->extended_parameter[0] = 0xFF;
424 data->extended_parameter[1] = 0xFF;
425 data->extended_parameter[1] <<= (16 - count);
426 } else {
427 data->extended_parameter[0] = 0xFF;
428 data->extended_parameter[0] <<= (8 - count);
429 data->extended_parameter[1] = 0x00;
430 }
431 data->sector = 0xFF;
432 break;
433 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
434 data->auxiliary.length_valid = 0x1;
435 data->length = reclen;
436 data->operation.operation = 0x3F;
437 data->extended_operation = 0x23;
438 break;
439 case DASD_ECKD_CCW_READ:
440 case DASD_ECKD_CCW_READ_MT:
441 case DASD_ECKD_CCW_READ_KD:
442 case DASD_ECKD_CCW_READ_KD_MT:
443 data->auxiliary.length_valid = 0x1;
444 data->length = reclen;
445 data->operation.operation = 0x06;
446 break;
447 case DASD_ECKD_CCW_READ_CKD:
448 case DASD_ECKD_CCW_READ_CKD_MT:
449 data->auxiliary.length_valid = 0x1;
450 data->length = reclen;
451 data->operation.operation = 0x16;
452 break;
453 case DASD_ECKD_CCW_READ_COUNT:
454 data->operation.operation = 0x06;
455 break;
456 case DASD_ECKD_CCW_READ_TRACK:
457 data->operation.orientation = 0x1;
458 data->operation.operation = 0x0C;
459 data->extended_parameter_length = 0;
460 data->sector = 0xFF;
461 break;
462 case DASD_ECKD_CCW_READ_TRACK_DATA:
463 data->auxiliary.length_valid = 0x1;
464 data->length = tlf;
465 data->operation.operation = 0x0C;
466 break;
467 case DASD_ECKD_CCW_ERASE:
468 data->length = reclen;
469 data->auxiliary.length_valid = 0x1;
470 data->operation.operation = 0x0b;
471 break;
472 default:
473 DBF_DEV_EVENT(DBF_ERR, device,
474 "fill LRE unknown opcode 0x%x", cmd);
475 BUG();
476 }
477 set_ch_t(&data->seek_addr,
478 trk / private->rdc_data.trk_per_cyl,
479 trk % private->rdc_data.trk_per_cyl);
480 data->search_arg.cyl = data->seek_addr.cyl;
481 data->search_arg.head = data->seek_addr.head;
482 data->search_arg.record = rec_on_trk;
483}
484
485static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
486 unsigned int trk, unsigned int totrk, int cmd,
487 struct dasd_device *basedev, struct dasd_device *startdev,
488 unsigned char format, unsigned int rec_on_trk, int count,
489 unsigned int blksize, unsigned int tlf)
490{
491 struct dasd_eckd_private *basepriv, *startpriv;
492 struct DE_eckd_data *dedata;
493 struct LRE_eckd_data *lredata;
494 u32 begcyl, endcyl;
495 u16 heads, beghead, endhead;
496 int rc = 0;
497
498 basepriv = (struct dasd_eckd_private *) basedev->private;
499 startpriv = (struct dasd_eckd_private *) startdev->private;
500 dedata = &pfxdata->define_extent;
501 lredata = &pfxdata->locate_record;
502
503 ccw->cmd_code = DASD_ECKD_CCW_PFX;
504 ccw->flags = 0;
505 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
506 ccw->count = sizeof(*pfxdata) + 2;
507 ccw->cda = (__u32) __pa(pfxdata);
508 memset(pfxdata, 0, sizeof(*pfxdata) + 2);
509 } else {
510 ccw->count = sizeof(*pfxdata);
511 ccw->cda = (__u32) __pa(pfxdata);
512 memset(pfxdata, 0, sizeof(*pfxdata));
513 }
514
515
516 if (format > 1) {
517 DBF_DEV_EVENT(DBF_ERR, basedev,
518 "PFX LRE unknown format 0x%x", format);
519 BUG();
520 return -EINVAL;
521 }
522 pfxdata->format = format;
523 pfxdata->base_address = basepriv->ned->unit_addr;
524 pfxdata->base_lss = basepriv->ned->ID;
525 pfxdata->validity.define_extent = 1;
526
527
528 if (startpriv->uid.type != UA_BASE_DEVICE) {
529 pfxdata->validity.verify_base = 1;
530 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
531 pfxdata->validity.hyper_pav = 1;
532 }
533
534
535 switch (cmd) {
536 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
537 case DASD_ECKD_CCW_READ_RECORD_ZERO:
538 case DASD_ECKD_CCW_READ:
539 case DASD_ECKD_CCW_READ_MT:
540 case DASD_ECKD_CCW_READ_CKD:
541 case DASD_ECKD_CCW_READ_CKD_MT:
542 case DASD_ECKD_CCW_READ_KD:
543 case DASD_ECKD_CCW_READ_KD_MT:
544 dedata->mask.perm = 0x1;
545 dedata->attributes.operation = basepriv->attrib.operation;
546 break;
547 case DASD_ECKD_CCW_READ_COUNT:
548 dedata->mask.perm = 0x1;
549 dedata->attributes.operation = DASD_BYPASS_CACHE;
550 break;
551 case DASD_ECKD_CCW_READ_TRACK:
552 case DASD_ECKD_CCW_READ_TRACK_DATA:
553 dedata->mask.perm = 0x1;
554 dedata->attributes.operation = basepriv->attrib.operation;
555 dedata->blk_size = 0;
556 break;
557 case DASD_ECKD_CCW_WRITE:
558 case DASD_ECKD_CCW_WRITE_MT:
559 case DASD_ECKD_CCW_WRITE_KD:
560 case DASD_ECKD_CCW_WRITE_KD_MT:
561 dedata->mask.perm = 0x02;
562 dedata->attributes.operation = basepriv->attrib.operation;
563 rc = check_XRC_on_prefix(pfxdata, basedev);
564 break;
565 case DASD_ECKD_CCW_WRITE_CKD:
566 case DASD_ECKD_CCW_WRITE_CKD_MT:
567 dedata->attributes.operation = DASD_BYPASS_CACHE;
568 rc = check_XRC_on_prefix(pfxdata, basedev);
569 break;
570 case DASD_ECKD_CCW_ERASE:
571 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
572 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
573 dedata->mask.perm = 0x3;
574 dedata->mask.auth = 0x1;
575 dedata->attributes.operation = DASD_BYPASS_CACHE;
576 rc = check_XRC_on_prefix(pfxdata, basedev);
577 break;
578 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
579 dedata->mask.perm = 0x03;
580 dedata->attributes.operation = basepriv->attrib.operation;
581 dedata->blk_size = 0;
582 break;
583 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
584 dedata->mask.perm = 0x02;
585 dedata->attributes.operation = basepriv->attrib.operation;
586 dedata->blk_size = blksize;
587 rc = check_XRC_on_prefix(pfxdata, basedev);
588 break;
589 default:
590 DBF_DEV_EVENT(DBF_ERR, basedev,
591 "PFX LRE unknown opcode 0x%x", cmd);
592 BUG();
593 return -EINVAL;
594 }
595
596 dedata->attributes.mode = 0x3;
597
598 if ((basepriv->rdc_data.cu_type == 0x2105 ||
599 basepriv->rdc_data.cu_type == 0x2107 ||
600 basepriv->rdc_data.cu_type == 0x1750)
601 && !(basepriv->uses_cdl && trk < 2))
602 dedata->ga_extended |= 0x40;
603
604 heads = basepriv->rdc_data.trk_per_cyl;
605 begcyl = trk / heads;
606 beghead = trk % heads;
607 endcyl = totrk / heads;
608 endhead = totrk % heads;
609
610
611 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
612 dedata->attributes.operation == DASD_SEQ_ACCESS) {
613
614 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
615 endcyl += basepriv->attrib.nr_cyl;
616 else
617 endcyl = (basepriv->real_cyl - 1);
618 }
619
620 set_ch_t(&dedata->beg_ext, begcyl, beghead);
621 set_ch_t(&dedata->end_ext, endcyl, endhead);
622
623 if (format == 1) {
624 fill_LRE_data(lredata, trk, rec_on_trk, count, cmd,
625 basedev, blksize, tlf);
626 }
627
628 return rc;
629}
630
631static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
632 unsigned int trk, unsigned int totrk, int cmd,
633 struct dasd_device *basedev, struct dasd_device *startdev)
634{
635 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
636 0, 0, 0, 0, 0);
637}
638
639static void
640locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
641 unsigned int rec_on_trk, int no_rec, int cmd,
642 struct dasd_device * device, int reclen)
643{
644 struct dasd_eckd_private *private;
645 int sector;
646 int dn, d;
647
648 private = (struct dasd_eckd_private *) device->private;
649
650 DBF_DEV_EVENT(DBF_INFO, device,
651 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
652 trk, rec_on_trk, no_rec, cmd, reclen);
653
654 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
655 ccw->flags = 0;
656 ccw->count = 16;
657 ccw->cda = (__u32) __pa(data);
658
659 memset(data, 0, sizeof(struct LO_eckd_data));
660 sector = 0;
661 if (rec_on_trk) {
662 switch (private->rdc_data.dev_type) {
663 case 0x3390:
664 dn = ceil_quot(reclen + 6, 232);
665 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
666 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
667 break;
668 case 0x3380:
669 d = 7 + ceil_quot(reclen + 12, 32);
670 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
671 break;
672 }
673 }
674 data->sector = sector;
675 data->count = no_rec;
676 switch (cmd) {
677 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
678 data->operation.orientation = 0x3;
679 data->operation.operation = 0x03;
680 break;
681 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
682 data->operation.orientation = 0x3;
683 data->operation.operation = 0x16;
684 break;
685 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
686 data->operation.orientation = 0x1;
687 data->operation.operation = 0x03;
688 data->count++;
689 break;
690 case DASD_ECKD_CCW_READ_RECORD_ZERO:
691 data->operation.orientation = 0x3;
692 data->operation.operation = 0x16;
693 data->count++;
694 break;
695 case DASD_ECKD_CCW_WRITE:
696 case DASD_ECKD_CCW_WRITE_MT:
697 case DASD_ECKD_CCW_WRITE_KD:
698 case DASD_ECKD_CCW_WRITE_KD_MT:
699 data->auxiliary.last_bytes_used = 0x1;
700 data->length = reclen;
701 data->operation.operation = 0x01;
702 break;
703 case DASD_ECKD_CCW_WRITE_CKD:
704 case DASD_ECKD_CCW_WRITE_CKD_MT:
705 data->auxiliary.last_bytes_used = 0x1;
706 data->length = reclen;
707 data->operation.operation = 0x03;
708 break;
709 case DASD_ECKD_CCW_READ:
710 case DASD_ECKD_CCW_READ_MT:
711 case DASD_ECKD_CCW_READ_KD:
712 case DASD_ECKD_CCW_READ_KD_MT:
713 data->auxiliary.last_bytes_used = 0x1;
714 data->length = reclen;
715 data->operation.operation = 0x06;
716 break;
717 case DASD_ECKD_CCW_READ_CKD:
718 case DASD_ECKD_CCW_READ_CKD_MT:
719 data->auxiliary.last_bytes_used = 0x1;
720 data->length = reclen;
721 data->operation.operation = 0x16;
722 break;
723 case DASD_ECKD_CCW_READ_COUNT:
724 data->operation.operation = 0x06;
725 break;
726 case DASD_ECKD_CCW_ERASE:
727 data->length = reclen;
728 data->auxiliary.last_bytes_used = 0x1;
729 data->operation.operation = 0x0b;
730 break;
731 default:
732 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
733 "opcode 0x%x", cmd);
734 }
735 set_ch_t(&data->seek_addr,
736 trk / private->rdc_data.trk_per_cyl,
737 trk % private->rdc_data.trk_per_cyl);
738 data->search_arg.cyl = data->seek_addr.cyl;
739 data->search_arg.head = data->seek_addr.head;
740 data->search_arg.record = rec_on_trk;
741}
742
743
744
745
746
747
748
749
750
751
752
753static inline int
754dasd_eckd_cdl_special(int blk_per_trk, int recid)
755{
756 if (recid < 3)
757 return 1;
758 if (recid < blk_per_trk)
759 return 0;
760 if (recid < 2 * blk_per_trk)
761 return 1;
762 return 0;
763}
764
765
766
767
768
769
770static inline int
771dasd_eckd_cdl_reclen(int recid)
772{
773 if (recid < 3)
774 return sizes_trk0[recid];
775 return LABEL_SIZE;
776}
777
778static void create_uid(struct dasd_eckd_private *private)
779{
780 int count;
781 struct dasd_uid *uid;
782
783 uid = &private->uid;
784 memset(uid, 0, sizeof(struct dasd_uid));
785 memcpy(uid->vendor, private->ned->HDA_manufacturer,
786 sizeof(uid->vendor) - 1);
787 EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
788 memcpy(uid->serial, private->ned->HDA_location,
789 sizeof(uid->serial) - 1);
790 EBCASC(uid->serial, sizeof(uid->serial) - 1);
791 uid->ssid = private->gneq->subsystemID;
792 uid->real_unit_addr = private->ned->unit_addr;
793 if (private->sneq) {
794 uid->type = private->sneq->sua_flags;
795 if (uid->type == UA_BASE_PAV_ALIAS)
796 uid->base_unit_addr = private->sneq->base_unit_addr;
797 } else {
798 uid->type = UA_BASE_DEVICE;
799 }
800 if (private->vdsneq) {
801 for (count = 0; count < 16; count++) {
802 sprintf(uid->vduit+2*count, "%02x",
803 private->vdsneq->uit[count]);
804 }
805 }
806}
807
808
809
810
811static int dasd_eckd_generate_uid(struct dasd_device *device)
812{
813 struct dasd_eckd_private *private;
814 unsigned long flags;
815
816 private = (struct dasd_eckd_private *) device->private;
817 if (!private)
818 return -ENODEV;
819 if (!private->ned || !private->gneq)
820 return -ENODEV;
821 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
822 create_uid(private);
823 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
824 return 0;
825}
826
827static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
828{
829 struct dasd_eckd_private *private;
830 unsigned long flags;
831
832 if (device->private) {
833 private = (struct dasd_eckd_private *)device->private;
834 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
835 *uid = private->uid;
836 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
837 return 0;
838 }
839 return -EINVAL;
840}
841
842
843
844
845
846static int dasd_eckd_compare_path_uid(struct dasd_device *device,
847 struct dasd_eckd_private *private)
848{
849 struct dasd_uid device_uid;
850
851 create_uid(private);
852 dasd_eckd_get_uid(device, &device_uid);
853
854 return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
855}
856
857static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
858 struct dasd_ccw_req *cqr,
859 __u8 *rcd_buffer,
860 __u8 lpm)
861{
862 struct ccw1 *ccw;
863
864
865
866
867 rcd_buffer[0] = 0xE5;
868 rcd_buffer[1] = 0xF1;
869 rcd_buffer[2] = 0x4B;
870 rcd_buffer[3] = 0xF0;
871
872 ccw = cqr->cpaddr;
873 ccw->cmd_code = DASD_ECKD_CCW_RCD;
874 ccw->flags = 0;
875 ccw->cda = (__u32)(addr_t)rcd_buffer;
876 ccw->count = DASD_ECKD_RCD_DATA_SIZE;
877 cqr->magic = DASD_ECKD_MAGIC;
878
879 cqr->startdev = device;
880 cqr->memdev = device;
881 cqr->block = NULL;
882 cqr->expires = 10*HZ;
883 cqr->lpm = lpm;
884 cqr->retries = 256;
885 cqr->buildclk = get_tod_clock();
886 cqr->status = DASD_CQR_FILLED;
887 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
888}
889
890
891
892
893
894
895
896static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
897{
898 struct ccw1 *ccw;
899 __u8 *rcd_buffer;
900
901 if (cqr->status != DASD_CQR_DONE) {
902 ccw = cqr->cpaddr;
903 rcd_buffer = (__u8 *)((addr_t) ccw->cda);
904 memset(rcd_buffer, 0, sizeof(*rcd_buffer));
905
906 rcd_buffer[0] = 0xE5;
907 rcd_buffer[1] = 0xF1;
908 rcd_buffer[2] = 0x4B;
909 rcd_buffer[3] = 0xF0;
910 }
911 dasd_wakeup_cb(cqr, data);
912}
913
914static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
915 struct dasd_ccw_req *cqr,
916 __u8 *rcd_buffer,
917 __u8 lpm)
918{
919 struct ciw *ciw;
920 int rc;
921
922
923
924
925 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
926 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
927 return -EOPNOTSUPP;
928
929 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
930 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
931 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
932 cqr->retries = 5;
933 cqr->callback = read_conf_cb;
934 rc = dasd_sleep_on_immediatly(cqr);
935 return rc;
936}
937
938static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
939 void **rcd_buffer,
940 int *rcd_buffer_size, __u8 lpm)
941{
942 struct ciw *ciw;
943 char *rcd_buf = NULL;
944 int ret;
945 struct dasd_ccw_req *cqr;
946
947
948
949
950
951 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
952 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
953 ret = -EOPNOTSUPP;
954 goto out_error;
955 }
956 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
957 if (!rcd_buf) {
958 ret = -ENOMEM;
959 goto out_error;
960 }
961 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 ,
962 0,
963 device);
964 if (IS_ERR(cqr)) {
965 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
966 "Could not allocate RCD request");
967 ret = -ENOMEM;
968 goto out_error;
969 }
970 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
971 cqr->callback = read_conf_cb;
972 ret = dasd_sleep_on(cqr);
973
974
975
976 dasd_sfree_request(cqr, cqr->memdev);
977 if (ret)
978 goto out_error;
979
980 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
981 *rcd_buffer = rcd_buf;
982 return 0;
983out_error:
984 kfree(rcd_buf);
985 *rcd_buffer = NULL;
986 *rcd_buffer_size = 0;
987 return ret;
988}
989
990static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
991{
992
993 struct dasd_sneq *sneq;
994 int i, count;
995
996 private->ned = NULL;
997 private->sneq = NULL;
998 private->vdsneq = NULL;
999 private->gneq = NULL;
1000 count = private->conf_len / sizeof(struct dasd_sneq);
1001 sneq = (struct dasd_sneq *)private->conf_data;
1002 for (i = 0; i < count; ++i) {
1003 if (sneq->flags.identifier == 1 && sneq->format == 1)
1004 private->sneq = sneq;
1005 else if (sneq->flags.identifier == 1 && sneq->format == 4)
1006 private->vdsneq = (struct vd_sneq *)sneq;
1007 else if (sneq->flags.identifier == 2)
1008 private->gneq = (struct dasd_gneq *)sneq;
1009 else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
1010 private->ned = (struct dasd_ned *)sneq;
1011 sneq++;
1012 }
1013 if (!private->ned || !private->gneq) {
1014 private->ned = NULL;
1015 private->sneq = NULL;
1016 private->vdsneq = NULL;
1017 private->gneq = NULL;
1018 return -EINVAL;
1019 }
1020 return 0;
1021
1022};
1023
1024static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
1025{
1026 struct dasd_gneq *gneq;
1027 int i, count, found;
1028
1029 count = conf_len / sizeof(*gneq);
1030 gneq = (struct dasd_gneq *)conf_data;
1031 found = 0;
1032 for (i = 0; i < count; ++i) {
1033 if (gneq->flags.identifier == 2) {
1034 found = 1;
1035 break;
1036 }
1037 gneq++;
1038 }
1039 if (found)
1040 return ((char *)gneq)[18] & 0x07;
1041 else
1042 return 0;
1043}
1044
1045static void dasd_eckd_clear_conf_data(struct dasd_device *device)
1046{
1047 struct dasd_eckd_private *private;
1048 int i;
1049
1050 private = (struct dasd_eckd_private *) device->private;
1051 private->conf_data = NULL;
1052 private->conf_len = 0;
1053 for (i = 0; i < 8; i++) {
1054 kfree(device->path[i].conf_data);
1055 device->path[i].conf_data = NULL;
1056 device->path[i].cssid = 0;
1057 device->path[i].ssid = 0;
1058 device->path[i].chpid = 0;
1059 }
1060}
1061
1062
1063static int dasd_eckd_read_conf(struct dasd_device *device)
1064{
1065 void *conf_data;
1066 int conf_len, conf_data_saved;
1067 int rc, path_err, pos;
1068 __u8 lpm, opm;
1069 struct dasd_eckd_private *private, path_private;
1070 struct dasd_uid *uid;
1071 char print_path_uid[60], print_device_uid[60];
1072 struct channel_path_desc *chp_desc;
1073 struct subchannel_id sch_id;
1074
1075 private = (struct dasd_eckd_private *) device->private;
1076 opm = ccw_device_get_path_mask(device->cdev);
1077 ccw_device_get_schid(device->cdev, &sch_id);
1078 conf_data_saved = 0;
1079 path_err = 0;
1080
1081 for (lpm = 0x80; lpm; lpm>>= 1) {
1082 if (!(lpm & opm))
1083 continue;
1084 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
1085 &conf_len, lpm);
1086 if (rc && rc != -EOPNOTSUPP) {
1087 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1088 "Read configuration data returned "
1089 "error %d", rc);
1090 return rc;
1091 }
1092 if (conf_data == NULL) {
1093 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1094 "No configuration data "
1095 "retrieved");
1096
1097 dasd_path_add_opm(device, opm);
1098 continue;
1099 }
1100
1101 if (!conf_data_saved) {
1102
1103 dasd_eckd_clear_conf_data(device);
1104 private->conf_data = conf_data;
1105 private->conf_len = conf_len;
1106 if (dasd_eckd_identify_conf_parts(private)) {
1107 private->conf_data = NULL;
1108 private->conf_len = 0;
1109 kfree(conf_data);
1110 continue;
1111 }
1112 pos = pathmask_to_pos(lpm);
1113
1114 device->path[pos].conf_data = conf_data;
1115 device->path[pos].cssid = sch_id.cssid;
1116 device->path[pos].ssid = sch_id.ssid;
1117 chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
1118 if (chp_desc)
1119 device->path[pos].chpid = chp_desc->chpid;
1120 kfree(chp_desc);
1121
1122
1123
1124
1125 dasd_eckd_generate_uid(device);
1126 conf_data_saved++;
1127 } else {
1128 path_private.conf_data = conf_data;
1129 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1130 if (dasd_eckd_identify_conf_parts(
1131 &path_private)) {
1132 path_private.conf_data = NULL;
1133 path_private.conf_len = 0;
1134 kfree(conf_data);
1135 continue;
1136 }
1137 if (dasd_eckd_compare_path_uid(
1138 device, &path_private)) {
1139 uid = &path_private.uid;
1140 if (strlen(uid->vduit) > 0)
1141 snprintf(print_path_uid,
1142 sizeof(print_path_uid),
1143 "%s.%s.%04x.%02x.%s",
1144 uid->vendor, uid->serial,
1145 uid->ssid, uid->real_unit_addr,
1146 uid->vduit);
1147 else
1148 snprintf(print_path_uid,
1149 sizeof(print_path_uid),
1150 "%s.%s.%04x.%02x",
1151 uid->vendor, uid->serial,
1152 uid->ssid,
1153 uid->real_unit_addr);
1154 uid = &private->uid;
1155 if (strlen(uid->vduit) > 0)
1156 snprintf(print_device_uid,
1157 sizeof(print_device_uid),
1158 "%s.%s.%04x.%02x.%s",
1159 uid->vendor, uid->serial,
1160 uid->ssid, uid->real_unit_addr,
1161 uid->vduit);
1162 else
1163 snprintf(print_device_uid,
1164 sizeof(print_device_uid),
1165 "%s.%s.%04x.%02x",
1166 uid->vendor, uid->serial,
1167 uid->ssid,
1168 uid->real_unit_addr);
1169 dev_err(&device->cdev->dev,
1170 "Not all channel paths lead to "
1171 "the same device, path %02X leads to "
1172 "device %s instead of %s\n", lpm,
1173 print_path_uid, print_device_uid);
1174 path_err = -EINVAL;
1175 dasd_path_add_cablepm(device, lpm);
1176 continue;
1177 }
1178 pos = pathmask_to_pos(lpm);
1179
1180 device->path[pos].conf_data = conf_data;
1181 device->path[pos].cssid = sch_id.cssid;
1182 device->path[pos].ssid = sch_id.ssid;
1183 chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
1184 if (chp_desc)
1185 device->path[pos].chpid = chp_desc->chpid;
1186 kfree(chp_desc);
1187 path_private.conf_data = NULL;
1188 path_private.conf_len = 0;
1189 }
1190 switch (dasd_eckd_path_access(conf_data, conf_len)) {
1191 case 0x02:
1192 dasd_path_add_nppm(device, lpm);
1193 break;
1194 case 0x03:
1195 dasd_path_add_ppm(device, lpm);
1196 break;
1197 }
1198 if (!dasd_path_get_opm(device)) {
1199 dasd_path_set_opm(device, lpm);
1200 dasd_generic_path_operational(device);
1201 } else {
1202 dasd_path_add_opm(device, lpm);
1203 }
1204 }
1205
1206 return path_err;
1207}
1208
1209static u32 get_fcx_max_data(struct dasd_device *device)
1210{
1211#if defined(CONFIG_64BIT)
1212 struct dasd_eckd_private *private;
1213 int fcx_in_css, fcx_in_gneq, fcx_in_features;
1214 int tpm, mdc;
1215
1216 if (dasd_nofcx)
1217 return 0;
1218
1219 private = (struct dasd_eckd_private *) device->private;
1220
1221 fcx_in_css = css_general_characteristics.fcx;
1222 fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1223 fcx_in_features = private->features.feature[40] & 0x80;
1224 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1225
1226 if (!tpm)
1227 return 0;
1228
1229 mdc = ccw_device_get_mdc(device->cdev, 0);
1230 if (mdc < 0) {
1231 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1232 return 0;
1233 } else {
1234 return (u32)mdc * FCX_MAX_DATA_FACTOR;
1235 }
1236#else
1237 return 0;
1238#endif
1239}
1240
1241static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1242{
1243 struct dasd_eckd_private *private;
1244 int mdc;
1245 u32 fcx_max_data;
1246
1247 private = (struct dasd_eckd_private *) device->private;
1248 if (private->fcx_max_data) {
1249 mdc = ccw_device_get_mdc(device->cdev, lpm);
1250 if ((mdc < 0)) {
1251 dev_warn(&device->cdev->dev,
1252 "Detecting the maximum data size for zHPF "
1253 "requests failed (rc=%d) for a new path %x\n",
1254 mdc, lpm);
1255 return mdc;
1256 }
1257 fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
1258 if (fcx_max_data < private->fcx_max_data) {
1259 dev_warn(&device->cdev->dev,
1260 "The maximum data size for zHPF requests %u "
1261 "on a new path %x is below the active maximum "
1262 "%u\n", fcx_max_data, lpm,
1263 private->fcx_max_data);
1264 return -EACCES;
1265 }
1266 }
1267 return 0;
1268}
1269
1270static int rebuild_device_uid(struct dasd_device *device,
1271 struct path_verification_work_data *data)
1272{
1273 struct dasd_eckd_private *private;
1274 __u8 lpm, opm = dasd_path_get_opm(device);
1275 int rc;
1276
1277 rc = -ENODEV;
1278 private = (struct dasd_eckd_private *) device->private;
1279
1280
1281 for (lpm = 0x80; lpm; lpm >>= 1) {
1282 if (!(lpm & opm))
1283 continue;
1284 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1285 memset(&data->cqr, 0, sizeof(data->cqr));
1286 data->cqr.cpaddr = &data->ccw;
1287 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1288 data->rcd_buffer,
1289 lpm);
1290
1291 if (rc) {
1292 if (rc == -EOPNOTSUPP)
1293 continue;
1294 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1295 "Read configuration data "
1296 "returned error %d", rc);
1297 break;
1298 }
1299 memcpy(private->conf_data, data->rcd_buffer,
1300 DASD_ECKD_RCD_DATA_SIZE);
1301 if (dasd_eckd_identify_conf_parts(private)) {
1302 rc = -ENODEV;
1303 } else
1304 break;
1305 }
1306
1307 if (!rc)
1308 rc = dasd_eckd_generate_uid(device);
1309
1310 return rc;
1311}
1312
1313static void do_path_verification_work(struct work_struct *work)
1314{
1315 struct path_verification_work_data *data;
1316 struct dasd_device *device;
1317 struct dasd_eckd_private path_private;
1318 struct dasd_uid *uid;
1319 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1320 __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
1321 unsigned long flags;
1322 char print_uid[60];
1323 int rc;
1324
1325 data = container_of(work, struct path_verification_work_data, worker);
1326 device = data->device;
1327
1328
1329 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1330 schedule_work(work);
1331 return;
1332 }
1333
1334 opm = 0;
1335 npm = 0;
1336 ppm = 0;
1337 epm = 0;
1338 hpfpm = 0;
1339 cablepm = 0;
1340
1341 for (lpm = 0x80; lpm; lpm >>= 1) {
1342 if (!(lpm & data->tbvpm))
1343 continue;
1344 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1345 memset(&data->cqr, 0, sizeof(data->cqr));
1346 data->cqr.cpaddr = &data->ccw;
1347 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1348 data->rcd_buffer,
1349 lpm);
1350 if (!rc) {
1351 switch (dasd_eckd_path_access(data->rcd_buffer,
1352 DASD_ECKD_RCD_DATA_SIZE)
1353 ) {
1354 case 0x02:
1355 npm |= lpm;
1356 break;
1357 case 0x03:
1358 ppm |= lpm;
1359 break;
1360 }
1361 opm |= lpm;
1362 } else if (rc == -EOPNOTSUPP) {
1363 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1364 "path verification: No configuration "
1365 "data retrieved");
1366 opm |= lpm;
1367 } else if (rc == -EAGAIN) {
1368 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1369 "path verification: device is stopped,"
1370 " try again later");
1371 epm |= lpm;
1372 } else {
1373 dev_warn(&device->cdev->dev,
1374 "Reading device feature codes failed "
1375 "(rc=%d) for new path %x\n", rc, lpm);
1376 continue;
1377 }
1378 if (verify_fcx_max_data(device, lpm)) {
1379 opm &= ~lpm;
1380 npm &= ~lpm;
1381 ppm &= ~lpm;
1382 hpfpm |= lpm;
1383 continue;
1384 }
1385
1386
1387
1388
1389
1390
1391 memcpy(&path_rcd_buf, data->rcd_buffer,
1392 DASD_ECKD_RCD_DATA_SIZE);
1393 path_private.conf_data = (void *) &path_rcd_buf;
1394 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1395 if (dasd_eckd_identify_conf_parts(&path_private)) {
1396 path_private.conf_data = NULL;
1397 path_private.conf_len = 0;
1398 continue;
1399 }
1400
1401
1402
1403
1404
1405
1406
1407 if (dasd_path_get_opm(device) &&
1408 dasd_eckd_compare_path_uid(device, &path_private)) {
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420 if (rebuild_device_uid(device, data) ||
1421 dasd_eckd_compare_path_uid(
1422 device, &path_private)) {
1423 uid = &path_private.uid;
1424 if (strlen(uid->vduit) > 0)
1425 snprintf(print_uid, sizeof(print_uid),
1426 "%s.%s.%04x.%02x.%s",
1427 uid->vendor, uid->serial,
1428 uid->ssid, uid->real_unit_addr,
1429 uid->vduit);
1430 else
1431 snprintf(print_uid, sizeof(print_uid),
1432 "%s.%s.%04x.%02x",
1433 uid->vendor, uid->serial,
1434 uid->ssid,
1435 uid->real_unit_addr);
1436 dev_err(&device->cdev->dev,
1437 "The newly added channel path %02X "
1438 "will not be used because it leads "
1439 "to a different device %s\n",
1440 lpm, print_uid);
1441 opm &= ~lpm;
1442 npm &= ~lpm;
1443 ppm &= ~lpm;
1444 cablepm |= lpm;
1445 continue;
1446 }
1447 }
1448
1449
1450
1451
1452
1453
1454
1455
1456 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1457 if (!dasd_path_get_opm(device) && opm) {
1458 dasd_path_set_opm(device, opm);
1459 dasd_generic_path_operational(device);
1460 } else {
1461 dasd_path_add_opm(device, opm);
1462 }
1463 dasd_path_add_nppm(device, npm);
1464 dasd_path_add_ppm(device, ppm);
1465 dasd_path_add_tbvpm(device, epm);
1466 dasd_path_add_cablepm(device, cablepm);
1467 dasd_path_add_nohpfpm(device, hpfpm);
1468 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1469 }
1470
1471 dasd_put_device(device);
1472 if (data->isglobal)
1473 mutex_unlock(&dasd_path_verification_mutex);
1474 else
1475 kfree(data);
1476}
1477
1478static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
1479{
1480 struct path_verification_work_data *data;
1481
1482 data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1483 if (!data) {
1484 if (mutex_trylock(&dasd_path_verification_mutex)) {
1485 data = path_verification_worker;
1486 data->isglobal = 1;
1487 } else
1488 return -ENOMEM;
1489 } else {
1490 memset(data, 0, sizeof(*data));
1491 data->isglobal = 0;
1492 }
1493 INIT_WORK(&data->worker, do_path_verification_work);
1494 dasd_get_device(device);
1495 data->device = device;
1496 data->tbvpm = lpm;
1497 schedule_work(&data->worker);
1498 return 0;
1499}
1500
1501static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1502{
1503 struct dasd_eckd_private *private;
1504 unsigned long flags;
1505
1506 private = (struct dasd_eckd_private *) device->private;
1507 if (!private->fcx_max_data)
1508 private->fcx_max_data = get_fcx_max_data(device);
1509 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1510 dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1511 dasd_schedule_device_bh(device);
1512 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1513}
1514
1515static int dasd_eckd_read_features(struct dasd_device *device)
1516{
1517 struct dasd_psf_prssd_data *prssdp;
1518 struct dasd_rssd_features *features;
1519 struct dasd_ccw_req *cqr;
1520 struct ccw1 *ccw;
1521 int rc;
1522 struct dasd_eckd_private *private;
1523
1524 private = (struct dasd_eckd_private *) device->private;
1525 memset(&private->features, 0, sizeof(struct dasd_rssd_features));
1526 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 + 1 ,
1527 (sizeof(struct dasd_psf_prssd_data) +
1528 sizeof(struct dasd_rssd_features)),
1529 device);
1530 if (IS_ERR(cqr)) {
1531 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1532 "allocate initialization request");
1533 return PTR_ERR(cqr);
1534 }
1535 cqr->startdev = device;
1536 cqr->memdev = device;
1537 cqr->block = NULL;
1538 cqr->retries = 256;
1539 cqr->expires = 10 * HZ;
1540
1541
1542 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1543 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1544 prssdp->order = PSF_ORDER_PRSSD;
1545 prssdp->suborder = 0x41;
1546
1547
1548 ccw = cqr->cpaddr;
1549 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1550 ccw->count = sizeof(struct dasd_psf_prssd_data);
1551 ccw->flags |= CCW_FLAG_CC;
1552 ccw->cda = (__u32)(addr_t) prssdp;
1553
1554
1555 features = (struct dasd_rssd_features *) (prssdp + 1);
1556 memset(features, 0, sizeof(struct dasd_rssd_features));
1557
1558 ccw++;
1559 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1560 ccw->count = sizeof(struct dasd_rssd_features);
1561 ccw->cda = (__u32)(addr_t) features;
1562
1563 cqr->buildclk = get_tod_clock();
1564 cqr->status = DASD_CQR_FILLED;
1565 rc = dasd_sleep_on(cqr);
1566 if (rc == 0) {
1567 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1568 features = (struct dasd_rssd_features *) (prssdp + 1);
1569 memcpy(&private->features, features,
1570 sizeof(struct dasd_rssd_features));
1571 } else
1572 dev_warn(&device->cdev->dev, "Reading device feature codes"
1573 " failed with rc=%d\n", rc);
1574 dasd_sfree_request(cqr, cqr->memdev);
1575 return rc;
1576}
1577
1578
1579
1580
1581
1582static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1583 int enable_pav)
1584{
1585 struct dasd_ccw_req *cqr;
1586 struct dasd_psf_ssc_data *psf_ssc_data;
1587 struct ccw1 *ccw;
1588
1589 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 ,
1590 sizeof(struct dasd_psf_ssc_data),
1591 device);
1592
1593 if (IS_ERR(cqr)) {
1594 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1595 "Could not allocate PSF-SSC request");
1596 return cqr;
1597 }
1598 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1599 psf_ssc_data->order = PSF_ORDER_SSC;
1600 psf_ssc_data->suborder = 0xc0;
1601 if (enable_pav) {
1602 psf_ssc_data->suborder |= 0x08;
1603 psf_ssc_data->reserved[0] = 0x88;
1604 }
1605 ccw = cqr->cpaddr;
1606 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1607 ccw->cda = (__u32)(addr_t)psf_ssc_data;
1608 ccw->count = 66;
1609
1610 cqr->startdev = device;
1611 cqr->memdev = device;
1612 cqr->block = NULL;
1613 cqr->retries = 256;
1614 cqr->expires = 10*HZ;
1615 cqr->buildclk = get_tod_clock();
1616 cqr->status = DASD_CQR_FILLED;
1617 return cqr;
1618}
1619
1620
1621
1622
1623
1624
1625static int
1626dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1627 unsigned long flags)
1628{
1629 struct dasd_ccw_req *cqr;
1630 int rc;
1631
1632 cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1633 if (IS_ERR(cqr))
1634 return PTR_ERR(cqr);
1635
1636
1637
1638
1639
1640 cqr->flags |= flags;
1641
1642 rc = dasd_sleep_on(cqr);
1643 if (!rc)
1644
1645 css_schedule_reprobe();
1646 else if (cqr->intrc == -EAGAIN)
1647 rc = -EAGAIN;
1648
1649 dasd_sfree_request(cqr, cqr->memdev);
1650 return rc;
1651}
1652
1653
1654
1655
1656static int dasd_eckd_validate_server(struct dasd_device *device,
1657 unsigned long flags)
1658{
1659 int rc;
1660 struct dasd_eckd_private *private;
1661 int enable_pav;
1662
1663 private = (struct dasd_eckd_private *) device->private;
1664 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1665 private->uid.type == UA_HYPER_PAV_ALIAS)
1666 return 0;
1667 if (dasd_nopav || MACHINE_IS_VM)
1668 enable_pav = 0;
1669 else
1670 enable_pav = 1;
1671 rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
1672
1673
1674
1675 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1676 "returned rc=%d", private->uid.ssid, rc);
1677 return rc;
1678}
1679
1680
1681
1682
1683static void dasd_eckd_do_validate_server(struct work_struct *work)
1684{
1685 struct dasd_device *device = container_of(work, struct dasd_device,
1686 kick_validate);
1687 unsigned long flags = 0;
1688
1689 set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
1690 if (dasd_eckd_validate_server(device, flags)
1691 == -EAGAIN) {
1692
1693 schedule_work(&device->kick_validate);
1694 return;
1695 }
1696
1697 dasd_put_device(device);
1698}
1699
1700static void dasd_eckd_kick_validate_server(struct dasd_device *device)
1701{
1702 dasd_get_device(device);
1703
1704 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1705 device->state < DASD_STATE_ONLINE) {
1706 dasd_put_device(device);
1707 return;
1708 }
1709
1710 if (!schedule_work(&device->kick_validate))
1711 dasd_put_device(device);
1712}
1713
1714
1715
1716
1717
1718static int
1719dasd_eckd_check_characteristics(struct dasd_device *device)
1720{
1721 struct dasd_eckd_private *private;
1722 struct dasd_block *block;
1723 struct dasd_uid temp_uid;
1724 int rc, i;
1725 int readonly;
1726 unsigned long value;
1727
1728
1729 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
1730
1731 if (!ccw_device_is_pathgroup(device->cdev)) {
1732 dev_warn(&device->cdev->dev,
1733 "A channel path group could not be established\n");
1734 return -EIO;
1735 }
1736 if (!ccw_device_is_multipath(device->cdev)) {
1737 dev_info(&device->cdev->dev,
1738 "The DASD is not operating in multipath mode\n");
1739 }
1740 private = (struct dasd_eckd_private *) device->private;
1741 if (!private) {
1742 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
1743 if (!private) {
1744 dev_warn(&device->cdev->dev,
1745 "Allocating memory for private DASD data "
1746 "failed\n");
1747 return -ENOMEM;
1748 }
1749 device->private = (void *) private;
1750 } else {
1751 memset(private, 0, sizeof(*private));
1752 }
1753
1754 private->init_cqr_status = -1;
1755
1756 private->attrib.operation = DASD_NORMAL_CACHE;
1757 private->attrib.nr_cyl = 0;
1758
1759
1760 rc = dasd_eckd_read_conf(device);
1761 if (rc)
1762 goto out_err1;
1763
1764
1765 device->default_expires = DASD_EXPIRES;
1766 device->path_thrhld = DASD_ECKD_PATH_THRHLD;
1767 device->path_interval = DASD_ECKD_PATH_INTERVAL;
1768 if (private->gneq) {
1769 value = 1;
1770 for (i = 0; i < private->gneq->timeout.value; i++)
1771 value = 10 * value;
1772 value = value * private->gneq->timeout.number;
1773
1774 if (value != 0 && value <= DASD_EXPIRES_MAX)
1775 device->default_expires = value;
1776 }
1777
1778 dasd_eckd_get_uid(device, &temp_uid);
1779 if (temp_uid.type == UA_BASE_DEVICE) {
1780 block = dasd_alloc_block();
1781 if (IS_ERR(block)) {
1782 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1783 "could not allocate dasd "
1784 "block structure");
1785 rc = PTR_ERR(block);
1786 goto out_err1;
1787 }
1788 device->block = block;
1789 block->base = device;
1790 }
1791
1792
1793 rc = dasd_alias_make_device_known_to_lcu(device);
1794 if (rc)
1795 goto out_err2;
1796
1797 dasd_eckd_validate_server(device, 0);
1798
1799
1800 rc = dasd_eckd_read_conf(device);
1801 if (rc)
1802 goto out_err3;
1803
1804
1805 dasd_eckd_read_features(device);
1806
1807
1808 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1809 &private->rdc_data, 64);
1810 if (rc) {
1811 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1812 "Read device characteristic failed, rc=%d", rc);
1813 goto out_err3;
1814 }
1815
1816 if ((device->features & DASD_FEATURE_USERAW) &&
1817 !(private->rdc_data.facilities.RT_in_LR)) {
1818 dev_err(&device->cdev->dev, "The storage server does not "
1819 "support raw-track access\n");
1820 rc = -EINVAL;
1821 goto out_err3;
1822 }
1823
1824
1825 if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
1826 private->rdc_data.long_no_cyl)
1827 private->real_cyl = private->rdc_data.long_no_cyl;
1828 else
1829 private->real_cyl = private->rdc_data.no_cyl;
1830
1831 private->fcx_max_data = get_fcx_max_data(device);
1832
1833 readonly = dasd_device_is_ro(device);
1834 if (readonly)
1835 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
1836
1837 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
1838 "with %d cylinders, %d heads, %d sectors%s\n",
1839 private->rdc_data.dev_type,
1840 private->rdc_data.dev_model,
1841 private->rdc_data.cu_type,
1842 private->rdc_data.cu_model.model,
1843 private->real_cyl,
1844 private->rdc_data.trk_per_cyl,
1845 private->rdc_data.sec_per_trk,
1846 readonly ? ", read-only device" : "");
1847 return 0;
1848
1849out_err3:
1850 dasd_alias_disconnect_device_from_lcu(device);
1851out_err2:
1852 dasd_free_block(device->block);
1853 device->block = NULL;
1854out_err1:
1855 kfree(private->conf_data);
1856 kfree(device->private);
1857 device->private = NULL;
1858 return rc;
1859}
1860
1861static void dasd_eckd_uncheck_device(struct dasd_device *device)
1862{
1863 struct dasd_eckd_private *private;
1864 int i;
1865
1866 private = (struct dasd_eckd_private *) device->private;
1867 dasd_alias_disconnect_device_from_lcu(device);
1868 private->ned = NULL;
1869 private->sneq = NULL;
1870 private->vdsneq = NULL;
1871 private->gneq = NULL;
1872 private->conf_len = 0;
1873 for (i = 0; i < 8; i++) {
1874 kfree(device->path[i].conf_data);
1875 if ((__u8 *)device->path[i].conf_data ==
1876 private->conf_data) {
1877 private->conf_data = NULL;
1878 private->conf_len = 0;
1879 }
1880 device->path[i].conf_data = NULL;
1881 device->path[i].cssid = 0;
1882 device->path[i].ssid = 0;
1883 device->path[i].chpid = 0;
1884 }
1885 kfree(private->conf_data);
1886 private->conf_data = NULL;
1887}
1888
1889static struct dasd_ccw_req *
1890dasd_eckd_analysis_ccw(struct dasd_device *device)
1891{
1892 struct dasd_eckd_private *private;
1893 struct eckd_count *count_data;
1894 struct LO_eckd_data *LO_data;
1895 struct dasd_ccw_req *cqr;
1896 struct ccw1 *ccw;
1897 int cplength, datasize;
1898 int i;
1899
1900 private = (struct dasd_eckd_private *) device->private;
1901
1902 cplength = 8;
1903 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
1904 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1905 if (IS_ERR(cqr))
1906 return cqr;
1907 ccw = cqr->cpaddr;
1908
1909 define_extent(ccw++, cqr->data, 0, 2,
1910 DASD_ECKD_CCW_READ_COUNT, device);
1911 LO_data = cqr->data + sizeof(struct DE_eckd_data);
1912
1913 ccw[-1].flags |= CCW_FLAG_CC;
1914 locate_record(ccw++, LO_data++, 0, 0, 4,
1915 DASD_ECKD_CCW_READ_COUNT, device, 0);
1916
1917 count_data = private->count_area;
1918 for (i = 0; i < 4; i++) {
1919 ccw[-1].flags |= CCW_FLAG_CC;
1920 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1921 ccw->flags = 0;
1922 ccw->count = 8;
1923 ccw->cda = (__u32)(addr_t) count_data;
1924 ccw++;
1925 count_data++;
1926 }
1927
1928
1929 ccw[-1].flags |= CCW_FLAG_CC;
1930 locate_record(ccw++, LO_data++, 2, 0, 1,
1931 DASD_ECKD_CCW_READ_COUNT, device, 0);
1932
1933 ccw[-1].flags |= CCW_FLAG_CC;
1934 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1935 ccw->flags = 0;
1936 ccw->count = 8;
1937 ccw->cda = (__u32)(addr_t) count_data;
1938
1939 cqr->block = NULL;
1940 cqr->startdev = device;
1941 cqr->memdev = device;
1942 cqr->retries = 255;
1943 cqr->buildclk = get_tod_clock();
1944 cqr->status = DASD_CQR_FILLED;
1945 return cqr;
1946}
1947
1948
1949static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
1950{
1951 char *sense;
1952 if (init_cqr->status == DASD_CQR_DONE)
1953 return INIT_CQR_OK;
1954 else if (init_cqr->status == DASD_CQR_NEED_ERP ||
1955 init_cqr->status == DASD_CQR_FAILED) {
1956 sense = dasd_get_sense(&init_cqr->irb);
1957 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
1958 return INIT_CQR_UNFORMATTED;
1959 else
1960 return INIT_CQR_ERROR;
1961 } else
1962 return INIT_CQR_ERROR;
1963}
1964
1965
1966
1967
1968
1969
1970
1971
1972static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
1973 void *data)
1974{
1975 struct dasd_eckd_private *private;
1976 struct dasd_device *device;
1977
1978 device = init_cqr->startdev;
1979 private = (struct dasd_eckd_private *) device->private;
1980 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
1981 dasd_sfree_request(init_cqr, device);
1982 dasd_kick_device(device);
1983}
1984
1985static int dasd_eckd_start_analysis(struct dasd_block *block)
1986{
1987 struct dasd_ccw_req *init_cqr;
1988
1989 init_cqr = dasd_eckd_analysis_ccw(block->base);
1990 if (IS_ERR(init_cqr))
1991 return PTR_ERR(init_cqr);
1992 init_cqr->callback = dasd_eckd_analysis_callback;
1993 init_cqr->callback_data = NULL;
1994 init_cqr->expires = 5*HZ;
1995
1996
1997
1998 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
1999 init_cqr->retries = 0;
2000 dasd_add_request_head(init_cqr);
2001 return -EAGAIN;
2002}
2003
2004static int dasd_eckd_end_analysis(struct dasd_block *block)
2005{
2006 struct dasd_device *device;
2007 struct dasd_eckd_private *private;
2008 struct eckd_count *count_area;
2009 unsigned int sb, blk_per_trk;
2010 int status, i;
2011 struct dasd_ccw_req *init_cqr;
2012
2013 device = block->base;
2014 private = (struct dasd_eckd_private *) device->private;
2015 status = private->init_cqr_status;
2016 private->init_cqr_status = -1;
2017 if (status == INIT_CQR_ERROR) {
2018
2019 init_cqr = dasd_eckd_analysis_ccw(device);
2020 dasd_sleep_on(init_cqr);
2021 status = dasd_eckd_analysis_evaluation(init_cqr);
2022 dasd_sfree_request(init_cqr, device);
2023 }
2024
2025 if (device->features & DASD_FEATURE_USERAW) {
2026 block->bp_block = DASD_RAW_BLOCKSIZE;
2027 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
2028 block->s2b_shift = 3;
2029 goto raw;
2030 }
2031
2032 if (status == INIT_CQR_UNFORMATTED) {
2033 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
2034 return -EMEDIUMTYPE;
2035 } else if (status == INIT_CQR_ERROR) {
2036 dev_err(&device->cdev->dev,
2037 "Detecting the DASD disk layout failed because "
2038 "of an I/O error\n");
2039 return -EIO;
2040 }
2041
2042 private->uses_cdl = 1;
2043
2044 count_area = NULL;
2045 for (i = 0; i < 3; i++) {
2046 if (private->count_area[i].kl != 4 ||
2047 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
2048 private->count_area[i].cyl != 0 ||
2049 private->count_area[i].head != count_area_head[i] ||
2050 private->count_area[i].record != count_area_rec[i]) {
2051 private->uses_cdl = 0;
2052 break;
2053 }
2054 }
2055 if (i == 3)
2056 count_area = &private->count_area[4];
2057
2058 if (private->uses_cdl == 0) {
2059 for (i = 0; i < 5; i++) {
2060 if ((private->count_area[i].kl != 0) ||
2061 (private->count_area[i].dl !=
2062 private->count_area[0].dl) ||
2063 private->count_area[i].cyl != 0 ||
2064 private->count_area[i].head != count_area_head[i] ||
2065 private->count_area[i].record != count_area_rec[i])
2066 break;
2067 }
2068 if (i == 5)
2069 count_area = &private->count_area[0];
2070 } else {
2071 if (private->count_area[3].record == 1)
2072 dev_warn(&device->cdev->dev,
2073 "Track 0 has no records following the VTOC\n");
2074 }
2075
2076 if (count_area != NULL && count_area->kl == 0) {
2077
2078 if (dasd_check_blocksize(count_area->dl) == 0)
2079 block->bp_block = count_area->dl;
2080 }
2081 if (block->bp_block == 0) {
2082 dev_warn(&device->cdev->dev,
2083 "The disk layout of the DASD is not supported\n");
2084 return -EMEDIUMTYPE;
2085 }
2086 block->s2b_shift = 0;
2087 for (sb = 512; sb < block->bp_block; sb = sb << 1)
2088 block->s2b_shift++;
2089
2090 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
2091
2092raw:
2093 block->blocks = (private->real_cyl *
2094 private->rdc_data.trk_per_cyl *
2095 blk_per_trk);
2096
2097 dev_info(&device->cdev->dev,
2098 "DASD with %d KB/block, %d KB total size, %d KB/track, "
2099 "%s\n", (block->bp_block >> 10),
2100 ((private->real_cyl *
2101 private->rdc_data.trk_per_cyl *
2102 blk_per_trk * (block->bp_block >> 9)) >> 1),
2103 ((blk_per_trk * block->bp_block) >> 10),
2104 private->uses_cdl ?
2105 "compatible disk layout" : "linux disk layout");
2106
2107 return 0;
2108}
2109
2110static int dasd_eckd_do_analysis(struct dasd_block *block)
2111{
2112 struct dasd_eckd_private *private;
2113
2114 private = (struct dasd_eckd_private *) block->base->private;
2115 if (private->init_cqr_status < 0)
2116 return dasd_eckd_start_analysis(block);
2117 else
2118 return dasd_eckd_end_analysis(block);
2119}
2120
2121static int dasd_eckd_basic_to_ready(struct dasd_device *device)
2122{
2123 return dasd_alias_add_device(device);
2124};
2125
2126static int dasd_eckd_online_to_ready(struct dasd_device *device)
2127{
2128 cancel_work_sync(&device->reload_device);
2129 cancel_work_sync(&device->kick_validate);
2130 return 0;
2131};
2132
2133static int dasd_eckd_basic_to_known(struct dasd_device *device)
2134{
2135 return dasd_alias_remove_device(device);
2136};
2137
2138static int
2139dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2140{
2141 struct dasd_eckd_private *private;
2142
2143 private = (struct dasd_eckd_private *) block->base->private;
2144 if (dasd_check_blocksize(block->bp_block) == 0) {
2145 geo->sectors = recs_per_track(&private->rdc_data,
2146 0, block->bp_block);
2147 }
2148 geo->cylinders = private->rdc_data.no_cyl;
2149 geo->heads = private->rdc_data.trk_per_cyl;
2150 return 0;
2151}
2152
2153
2154
2155
2156static struct dasd_ccw_req *
2157dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2158 int enable_pav, struct eckd_count *fmt_buffer,
2159 int rpt)
2160{
2161 struct dasd_eckd_private *start_priv;
2162 struct dasd_device *startdev = NULL;
2163 struct tidaw *last_tidaw = NULL;
2164 struct dasd_ccw_req *cqr;
2165 struct itcw *itcw;
2166 int itcw_size;
2167 int count;
2168 int rc;
2169 int i;
2170
2171 if (enable_pav)
2172 startdev = dasd_alias_get_start_dev(base);
2173
2174 if (!startdev)
2175 startdev = base;
2176
2177 start_priv = startdev->private;
2178
2179 count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2180
2181
2182
2183
2184
2185 itcw_size = itcw_calc_size(0, count, 0);
2186
2187 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2188 if (IS_ERR(cqr))
2189 return cqr;
2190
2191 start_priv->count++;
2192
2193 itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2194 if (IS_ERR(itcw)) {
2195 rc = -EINVAL;
2196 goto out_err;
2197 }
2198
2199 cqr->cpaddr = itcw_get_tcw(itcw);
2200 rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
2201 DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
2202 sizeof(struct eckd_count),
2203 count * sizeof(struct eckd_count), 0, rpt);
2204 if (rc)
2205 goto out_err;
2206
2207 for (i = 0; i < count; i++) {
2208 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
2209 sizeof(struct eckd_count));
2210 if (IS_ERR(last_tidaw)) {
2211 rc = -EINVAL;
2212 goto out_err;
2213 }
2214 }
2215
2216 last_tidaw->flags |= TIDAW_FLAGS_LAST;
2217 itcw_finalize(itcw);
2218
2219 cqr->cpmode = 1;
2220 cqr->startdev = startdev;
2221 cqr->memdev = startdev;
2222 cqr->basedev = base;
2223 cqr->retries = 255;
2224 cqr->expires = startdev->default_expires * HZ;
2225 cqr->buildclk = get_tod_clock();
2226 cqr->status = DASD_CQR_FILLED;
2227
2228 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2229 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2230
2231 return cqr;
2232
2233out_err:
2234 dasd_sfree_request(cqr, startdev);
2235
2236 return ERR_PTR(rc);
2237}
2238
2239
2240
2241
2242static struct dasd_ccw_req *
2243dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2244 int enable_pav, struct eckd_count *fmt_buffer, int rpt)
2245{
2246 struct dasd_eckd_private *start_priv;
2247 struct dasd_eckd_private *base_priv;
2248 struct dasd_device *startdev = NULL;
2249 struct dasd_ccw_req *cqr;
2250 struct ccw1 *ccw;
2251 void *data;
2252 int cplength, datasize;
2253 int use_prefix;
2254 int count;
2255 int i;
2256
2257 if (enable_pav)
2258 startdev = dasd_alias_get_start_dev(base);
2259
2260 if (!startdev)
2261 startdev = base;
2262
2263 start_priv = startdev->private;
2264 base_priv = base->private;
2265
2266 count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2267
2268 use_prefix = base_priv->features.feature[8] & 0x01;
2269
2270 if (use_prefix) {
2271 cplength = 1;
2272 datasize = sizeof(struct PFX_eckd_data);
2273 } else {
2274 cplength = 2;
2275 datasize = sizeof(struct DE_eckd_data) +
2276 sizeof(struct LO_eckd_data);
2277 }
2278 cplength += count;
2279
2280 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
2281 startdev);
2282 if (IS_ERR(cqr))
2283 return cqr;
2284
2285 start_priv->count++;
2286 data = cqr->data;
2287 ccw = cqr->cpaddr;
2288
2289 if (use_prefix) {
2290 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
2291 DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
2292 count, 0, 0);
2293 } else {
2294 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
2295 DASD_ECKD_CCW_READ_COUNT, startdev);
2296
2297 data += sizeof(struct DE_eckd_data);
2298 ccw[-1].flags |= CCW_FLAG_CC;
2299
2300 locate_record(ccw++, data, fdata->start_unit, 0, count,
2301 DASD_ECKD_CCW_READ_COUNT, base, 0);
2302 }
2303
2304 for (i = 0; i < count; i++) {
2305 ccw[-1].flags |= CCW_FLAG_CC;
2306 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2307 ccw->flags = CCW_FLAG_SLI;
2308 ccw->count = 8;
2309 ccw->cda = (__u32)(addr_t) fmt_buffer;
2310 ccw++;
2311 fmt_buffer++;
2312 }
2313
2314 cqr->startdev = startdev;
2315 cqr->memdev = startdev;
2316 cqr->basedev = base;
2317 cqr->retries = 255;
2318 cqr->expires = startdev->default_expires * HZ;
2319 cqr->buildclk = get_tod_clock();
2320 cqr->status = DASD_CQR_FILLED;
2321
2322 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2323
2324 return cqr;
2325}
2326
2327static struct dasd_ccw_req *
2328dasd_eckd_build_format(struct dasd_device *base,
2329 struct format_data_t *fdata,
2330 int enable_PAV)
2331{
2332 struct dasd_eckd_private *base_priv;
2333 struct dasd_eckd_private *start_priv;
2334 struct dasd_device *startdev = NULL;
2335 struct dasd_ccw_req *fcp;
2336 struct eckd_count *ect;
2337 struct ch_t address;
2338 struct ccw1 *ccw;
2339 void *data;
2340 int rpt;
2341 int cplength, datasize;
2342 int i, j;
2343 int intensity = 0;
2344 int r0_perm;
2345 int nr_tracks;
2346 int use_prefix;
2347
2348 if (enable_PAV)
2349 startdev = dasd_alias_get_start_dev(base);
2350
2351 if (!startdev)
2352 startdev = base;
2353
2354 start_priv = (struct dasd_eckd_private *) startdev->private;
2355 base_priv = (struct dasd_eckd_private *) base->private;
2356
2357 rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
2358
2359 nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370 if (fdata->intensity & 0x10) {
2371 r0_perm = 0;
2372 intensity = fdata->intensity & ~0x10;
2373 } else {
2374 r0_perm = 1;
2375 intensity = fdata->intensity;
2376 }
2377
2378 use_prefix = base_priv->features.feature[8] & 0x01;
2379
2380 switch (intensity) {
2381 case 0x00:
2382 case 0x08:
2383 cplength = 2 + (rpt*nr_tracks);
2384 if (use_prefix)
2385 datasize = sizeof(struct PFX_eckd_data) +
2386 sizeof(struct LO_eckd_data) +
2387 rpt * nr_tracks * sizeof(struct eckd_count);
2388 else
2389 datasize = sizeof(struct DE_eckd_data) +
2390 sizeof(struct LO_eckd_data) +
2391 rpt * nr_tracks * sizeof(struct eckd_count);
2392 break;
2393 case 0x01:
2394 case 0x09:
2395 cplength = 2 + rpt * nr_tracks;
2396 if (use_prefix)
2397 datasize = sizeof(struct PFX_eckd_data) +
2398 sizeof(struct LO_eckd_data) +
2399 sizeof(struct eckd_count) +
2400 rpt * nr_tracks * sizeof(struct eckd_count);
2401 else
2402 datasize = sizeof(struct DE_eckd_data) +
2403 sizeof(struct LO_eckd_data) +
2404 sizeof(struct eckd_count) +
2405 rpt * nr_tracks * sizeof(struct eckd_count);
2406 break;
2407 case 0x04:
2408 case 0x0c:
2409 cplength = 3;
2410 if (use_prefix)
2411 datasize = sizeof(struct PFX_eckd_data) +
2412 sizeof(struct LO_eckd_data) +
2413 sizeof(struct eckd_count);
2414 else
2415 datasize = sizeof(struct DE_eckd_data) +
2416 sizeof(struct LO_eckd_data) +
2417 sizeof(struct eckd_count);
2418 break;
2419 default:
2420 dev_warn(&startdev->cdev->dev,
2421 "An I/O control call used incorrect flags 0x%x\n",
2422 fdata->intensity);
2423 return ERR_PTR(-EINVAL);
2424 }
2425
2426 fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
2427 datasize, startdev);
2428 if (IS_ERR(fcp))
2429 return fcp;
2430
2431 start_priv->count++;
2432 data = fcp->data;
2433 ccw = fcp->cpaddr;
2434
2435 switch (intensity & ~0x08) {
2436 case 0x00:
2437 if (use_prefix) {
2438 prefix(ccw++, (struct PFX_eckd_data *) data,
2439 fdata->start_unit, fdata->stop_unit,
2440 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2441
2442 if (r0_perm)
2443 ((struct PFX_eckd_data *)data)
2444 ->define_extent.ga_extended |= 0x04;
2445 data += sizeof(struct PFX_eckd_data);
2446 } else {
2447 define_extent(ccw++, (struct DE_eckd_data *) data,
2448 fdata->start_unit, fdata->stop_unit,
2449 DASD_ECKD_CCW_WRITE_CKD, startdev);
2450
2451 if (r0_perm)
2452 ((struct DE_eckd_data *) data)
2453 ->ga_extended |= 0x04;
2454 data += sizeof(struct DE_eckd_data);
2455 }
2456 ccw[-1].flags |= CCW_FLAG_CC;
2457 locate_record(ccw++, (struct LO_eckd_data *) data,
2458 fdata->start_unit, 0, rpt*nr_tracks,
2459 DASD_ECKD_CCW_WRITE_CKD, base,
2460 fdata->blksize);
2461 data += sizeof(struct LO_eckd_data);
2462 break;
2463 case 0x01:
2464 if (use_prefix) {
2465 prefix(ccw++, (struct PFX_eckd_data *) data,
2466 fdata->start_unit, fdata->stop_unit,
2467 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2468 base, startdev);
2469 data += sizeof(struct PFX_eckd_data);
2470 } else {
2471 define_extent(ccw++, (struct DE_eckd_data *) data,
2472 fdata->start_unit, fdata->stop_unit,
2473 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev);
2474 data += sizeof(struct DE_eckd_data);
2475 }
2476 ccw[-1].flags |= CCW_FLAG_CC;
2477 locate_record(ccw++, (struct LO_eckd_data *) data,
2478 fdata->start_unit, 0, rpt * nr_tracks + 1,
2479 DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
2480 base->block->bp_block);
2481 data += sizeof(struct LO_eckd_data);
2482 break;
2483 case 0x04:
2484 if (use_prefix) {
2485 prefix(ccw++, (struct PFX_eckd_data *) data,
2486 fdata->start_unit, fdata->stop_unit,
2487 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2488 data += sizeof(struct PFX_eckd_data);
2489 } else {
2490 define_extent(ccw++, (struct DE_eckd_data *) data,
2491 fdata->start_unit, fdata->stop_unit,
2492 DASD_ECKD_CCW_WRITE_CKD, startdev);
2493 data += sizeof(struct DE_eckd_data);
2494 }
2495 ccw[-1].flags |= CCW_FLAG_CC;
2496 locate_record(ccw++, (struct LO_eckd_data *) data,
2497 fdata->start_unit, 0, 1,
2498 DASD_ECKD_CCW_WRITE_CKD, base, 8);
2499 data += sizeof(struct LO_eckd_data);
2500 break;
2501 }
2502
2503 for (j = 0; j < nr_tracks; j++) {
2504
2505 set_ch_t(&address,
2506 (fdata->start_unit + j) /
2507 base_priv->rdc_data.trk_per_cyl,
2508 (fdata->start_unit + j) %
2509 base_priv->rdc_data.trk_per_cyl);
2510 if (intensity & 0x01) {
2511 ect = (struct eckd_count *) data;
2512 data += sizeof(struct eckd_count);
2513 ect->cyl = address.cyl;
2514 ect->head = address.head;
2515 ect->record = 0;
2516 ect->kl = 0;
2517 ect->dl = 8;
2518 ccw[-1].flags |= CCW_FLAG_CC;
2519 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
2520 ccw->flags = CCW_FLAG_SLI;
2521 ccw->count = 8;
2522 ccw->cda = (__u32)(addr_t) ect;
2523 ccw++;
2524 }
2525 if ((intensity & ~0x08) & 0x04) {
2526 ect = (struct eckd_count *) data;
2527 data += sizeof(struct eckd_count);
2528 ect->cyl = address.cyl;
2529 ect->head = address.head;
2530 ect->record = 1;
2531 ect->kl = 0;
2532 ect->dl = 0;
2533 ccw[-1].flags |= CCW_FLAG_CC;
2534 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
2535 ccw->flags = CCW_FLAG_SLI;
2536 ccw->count = 8;
2537 ccw->cda = (__u32)(addr_t) ect;
2538 } else {
2539 for (i = 0; i < rpt; i++) {
2540 ect = (struct eckd_count *) data;
2541 data += sizeof(struct eckd_count);
2542 ect->cyl = address.cyl;
2543 ect->head = address.head;
2544 ect->record = i + 1;
2545 ect->kl = 0;
2546 ect->dl = fdata->blksize;
2547
2548
2549
2550
2551 if ((intensity & 0x08) &&
2552 address.cyl == 0 && address.head == 0) {
2553 if (i < 3) {
2554 ect->kl = 4;
2555 ect->dl = sizes_trk0[i] - 4;
2556 }
2557 }
2558 if ((intensity & 0x08) &&
2559 address.cyl == 0 && address.head == 1) {
2560 ect->kl = 44;
2561 ect->dl = LABEL_SIZE - 44;
2562 }
2563 ccw[-1].flags |= CCW_FLAG_CC;
2564 if (i != 0 || j == 0)
2565 ccw->cmd_code =
2566 DASD_ECKD_CCW_WRITE_CKD;
2567 else
2568 ccw->cmd_code =
2569 DASD_ECKD_CCW_WRITE_CKD_MT;
2570 ccw->flags = CCW_FLAG_SLI;
2571 ccw->count = 8;
2572 ccw->cda = (__u32)(addr_t) ect;
2573 ccw++;
2574 }
2575 }
2576 }
2577
2578 fcp->startdev = startdev;
2579 fcp->memdev = startdev;
2580 fcp->basedev = base;
2581 fcp->retries = 256;
2582 fcp->expires = startdev->default_expires * HZ;
2583 fcp->buildclk = get_tod_clock();
2584 fcp->status = DASD_CQR_FILLED;
2585
2586 return fcp;
2587}
2588
2589
2590
2591
2592static struct dasd_ccw_req *
2593dasd_eckd_format_build_ccw_req(struct dasd_device *base,
2594 struct format_data_t *fdata, int enable_pav,
2595 int tpm, struct eckd_count *fmt_buffer, int rpt)
2596{
2597 struct dasd_ccw_req *ccw_req;
2598
2599 if (!fmt_buffer) {
2600 ccw_req = dasd_eckd_build_format(base, fdata, enable_pav);
2601 } else {
2602 if (tpm)
2603 ccw_req = dasd_eckd_build_check_tcw(base, fdata,
2604 enable_pav,
2605 fmt_buffer, rpt);
2606 else
2607 ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
2608 fmt_buffer, rpt);
2609 }
2610
2611 return ccw_req;
2612}
2613
2614
2615
2616
2617static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
2618 struct format_data_t *fdata)
2619{
2620 struct dasd_eckd_private *private;
2621
2622 private = (struct dasd_eckd_private *) base->private;
2623
2624 if (fdata->start_unit >=
2625 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2626 dev_warn(&base->cdev->dev,
2627 "Start track number %u used in formatting is too big\n",
2628 fdata->start_unit);
2629 return -EINVAL;
2630 }
2631 if (fdata->stop_unit >=
2632 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2633 dev_warn(&base->cdev->dev,
2634 "Stop track number %u used in formatting is too big\n",
2635 fdata->stop_unit);
2636 return -EINVAL;
2637 }
2638 if (fdata->start_unit > fdata->stop_unit) {
2639 dev_warn(&base->cdev->dev,
2640 "Start track %u used in formatting exceeds end track\n",
2641 fdata->start_unit);
2642 return -EINVAL;
2643 }
2644 if (dasd_check_blocksize(fdata->blksize) != 0) {
2645 dev_warn(&base->cdev->dev,
2646 "The DASD cannot be formatted with block size %u\n",
2647 fdata->blksize);
2648 return -EINVAL;
2649 }
2650 return 0;
2651}
2652
2653
2654
2655
2656static int dasd_eckd_format_process_data(struct dasd_device *base,
2657 struct format_data_t *fdata,
2658 int enable_pav, int tpm,
2659 struct eckd_count *fmt_buffer, int rpt,
2660 struct irb *irb)
2661{
2662 struct dasd_eckd_private *private = base->private;
2663 struct dasd_ccw_req *cqr, *n;
2664 struct list_head format_queue;
2665 struct dasd_device *device;
2666 char *sense = NULL;
2667 int old_start, old_stop, format_step;
2668 int step, retry;
2669 int rc;
2670
2671 rc = dasd_eckd_format_sanity_checks(base, fdata);
2672 if (rc)
2673 return rc;
2674
2675 INIT_LIST_HEAD(&format_queue);
2676
2677 old_start = fdata->start_unit;
2678 old_stop = fdata->stop_unit;
2679
2680 if (!tpm && fmt_buffer != NULL) {
2681
2682 format_step = 1;
2683 } else if (tpm && fmt_buffer != NULL) {
2684
2685 format_step = DASD_CQR_MAX_CCW / rpt;
2686 } else {
2687
2688 format_step = DASD_CQR_MAX_CCW /
2689 recs_per_track(&private->rdc_data, 0, fdata->blksize);
2690 }
2691
2692 do {
2693 retry = 0;
2694 while (fdata->start_unit <= old_stop) {
2695 step = fdata->stop_unit - fdata->start_unit + 1;
2696 if (step > format_step) {
2697 fdata->stop_unit =
2698 fdata->start_unit + format_step - 1;
2699 }
2700
2701 cqr = dasd_eckd_format_build_ccw_req(base, fdata,
2702 enable_pav, tpm,
2703 fmt_buffer, rpt);
2704 if (IS_ERR(cqr)) {
2705 rc = PTR_ERR(cqr);
2706 if (rc == -ENOMEM) {
2707 if (list_empty(&format_queue))
2708 goto out;
2709
2710
2711
2712
2713
2714 retry = 1;
2715 break;
2716 }
2717 goto out_err;
2718 }
2719 list_add_tail(&cqr->blocklist, &format_queue);
2720
2721 if (fmt_buffer) {
2722 step = fdata->stop_unit - fdata->start_unit + 1;
2723 fmt_buffer += rpt * step;
2724 }
2725 fdata->start_unit = fdata->stop_unit + 1;
2726 fdata->stop_unit = old_stop;
2727 }
2728
2729 rc = dasd_sleep_on_queue(&format_queue);
2730
2731out_err:
2732 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
2733 device = cqr->startdev;
2734 private = device->private;
2735
2736 if (cqr->status == DASD_CQR_FAILED) {
2737
2738
2739
2740
2741 if (fmt_buffer && irb) {
2742 sense = dasd_get_sense(&cqr->irb);
2743 memcpy(irb, &cqr->irb, sizeof(*irb));
2744 }
2745 rc = -EIO;
2746 }
2747 list_del_init(&cqr->blocklist);
2748 dasd_sfree_request(cqr, device);
2749 private->count--;
2750 }
2751
2752 if (rc && rc != -EIO)
2753 goto out;
2754 if (rc == -EIO) {
2755
2756
2757
2758
2759
2760
2761
2762
2763 if (sense &&
2764 (sense[1] & SNS1_NO_REC_FOUND ||
2765 sense[1] & SNS1_FILE_PROTECTED))
2766 retry = 1;
2767 else
2768 goto out;
2769 }
2770
2771 } while (retry);
2772
2773out:
2774 fdata->start_unit = old_start;
2775 fdata->stop_unit = old_stop;
2776
2777 return rc;
2778}
2779
2780static int dasd_eckd_format_device(struct dasd_device *base,
2781 struct format_data_t *fdata, int enable_pav)
2782{
2783 return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
2784 0, NULL);
2785}
2786
2787
2788
2789
2790static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
2791 int max)
2792{
2793 int head;
2794 int i;
2795
2796 head = fmt_buffer[start].head;
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807 for (i = start; i < max; i++) {
2808 if (i > start) {
2809 if ((fmt_buffer[i].head == head &&
2810 fmt_buffer[i].record == 1) ||
2811 fmt_buffer[i].head != head ||
2812 fmt_buffer[i].record == 0)
2813 break;
2814 }
2815 }
2816
2817 return i - start;
2818}
2819
2820
2821
2822
2823
2824
2825
2826
2827static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
2828 struct format_check_t *cdata,
2829 int rpt_max, int rpt_exp,
2830 int trk_per_cyl, int tpm)
2831{
2832 struct ch_t geo;
2833 int max_entries;
2834 int count = 0;
2835 int trkcount;
2836 int blksize;
2837 int pos = 0;
2838 int i, j;
2839 int kl;
2840
2841 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
2842 max_entries = trkcount * rpt_max;
2843
2844 for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
2845
2846 if (tpm) {
2847 while (fmt_buffer[pos].record == 0 &&
2848 fmt_buffer[pos].dl == 0) {
2849 if (pos++ > max_entries)
2850 break;
2851 }
2852 } else {
2853 if (i != cdata->expect.start_unit)
2854 pos += rpt_max - count;
2855 }
2856
2857
2858 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
2859
2860
2861 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
2862
2863 if (count < rpt_exp) {
2864 cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
2865 break;
2866 }
2867 if (count > rpt_exp) {
2868 cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
2869 break;
2870 }
2871
2872 for (j = 0; j < count; j++, pos++) {
2873 blksize = cdata->expect.blksize;
2874 kl = 0;
2875
2876
2877
2878
2879
2880 if ((cdata->expect.intensity & 0x08) &&
2881 geo.cyl == 0 && geo.head == 0) {
2882 if (j < 3) {
2883 blksize = sizes_trk0[j] - 4;
2884 kl = 4;
2885 }
2886 }
2887 if ((cdata->expect.intensity & 0x08) &&
2888 geo.cyl == 0 && geo.head == 1) {
2889 blksize = LABEL_SIZE - 44;
2890 kl = 44;
2891 }
2892
2893
2894 if (fmt_buffer[pos].dl != blksize) {
2895 cdata->result = DASD_FMT_ERR_BLKSIZE;
2896 goto out;
2897 }
2898
2899 if (fmt_buffer[pos].kl != kl) {
2900 cdata->result = DASD_FMT_ERR_KEY_LENGTH;
2901 goto out;
2902 }
2903
2904 if (fmt_buffer[pos].cyl != geo.cyl ||
2905 fmt_buffer[pos].head != geo.head ||
2906 fmt_buffer[pos].record != (j + 1)) {
2907 cdata->result = DASD_FMT_ERR_RECORD_ID;
2908 goto out;
2909 }
2910 }
2911 }
2912
2913out:
2914
2915
2916
2917
2918 if (!cdata->result) {
2919 i--;
2920 pos--;
2921 }
2922
2923 cdata->unit = i;
2924 cdata->num_records = count;
2925 cdata->rec = fmt_buffer[pos].record;
2926 cdata->blksize = fmt_buffer[pos].dl;
2927 cdata->key_length = fmt_buffer[pos].kl;
2928}
2929
2930
2931
2932
2933static int dasd_eckd_check_device_format(struct dasd_device *base,
2934 struct format_check_t *cdata,
2935 int enable_pav)
2936{
2937 struct dasd_eckd_private *private = base->private;
2938 struct eckd_count *fmt_buffer;
2939 struct irb irb;
2940 int rpt_max, rpt_exp;
2941 int fmt_buffer_size;
2942 int trk_per_cyl;
2943 int trkcount;
2944 int tpm = 0;
2945 int rc;
2946
2947 trk_per_cyl = private->rdc_data.trk_per_cyl;
2948
2949
2950 rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
2951 rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
2952
2953 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
2954 fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
2955
2956 fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
2957 if (!fmt_buffer)
2958 return -ENOMEM;
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968 if ((private->features.feature[40] & 0x04) &&
2969 fmt_buffer_size <= private->fcx_max_data)
2970 tpm = 1;
2971
2972 rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
2973 tpm, fmt_buffer, rpt_max, &irb);
2974 if (rc && rc != -EIO)
2975 goto out;
2976 if (rc == -EIO) {
2977
2978
2979
2980
2981
2982 if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
2983 tpm = 0;
2984 rc = dasd_eckd_format_process_data(base, &cdata->expect,
2985 enable_pav, tpm,
2986 fmt_buffer, rpt_max,
2987 &irb);
2988 if (rc)
2989 goto out;
2990 } else {
2991 goto out;
2992 }
2993 }
2994
2995 dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
2996 trk_per_cyl, tpm);
2997
2998out:
2999 kfree(fmt_buffer);
3000
3001 return rc;
3002}
3003
3004static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3005{
3006 cqr->status = DASD_CQR_FILLED;
3007 if (cqr->block && (cqr->startdev != cqr->block->base)) {
3008 dasd_eckd_reset_ccw_to_base_io(cqr);
3009 cqr->startdev = cqr->block->base;
3010 cqr->lpm = dasd_path_get_opm(cqr->block->base);
3011 }
3012};
3013
3014static dasd_erp_fn_t
3015dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3016{
3017 struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3018 struct ccw_device *cdev = device->cdev;
3019
3020 switch (cdev->id.cu_type) {
3021 case 0x3990:
3022 case 0x2105:
3023 case 0x2107:
3024 case 0x1750:
3025 return dasd_3990_erp_action;
3026 case 0x9343:
3027 case 0x3880:
3028 default:
3029 return dasd_default_erp_action;
3030 }
3031}
3032
3033static dasd_erp_fn_t
3034dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3035{
3036 return dasd_default_erp_postaction;
3037}
3038
3039static void dasd_eckd_check_for_device_change(struct dasd_device *device,
3040 struct dasd_ccw_req *cqr,
3041 struct irb *irb)
3042{
3043 char mask;
3044 char *sense = NULL;
3045 struct dasd_eckd_private *private;
3046
3047 private = (struct dasd_eckd_private *) device->private;
3048
3049 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
3050 if ((scsw_dstat(&irb->scsw) & mask) == mask) {
3051
3052
3053
3054
3055 if (!device->block && private->lcu &&
3056 device->state == DASD_STATE_ONLINE &&
3057 !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3058 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
3059
3060
3061
3062
3063
3064
3065 dasd_alias_remove_device(device);
3066
3067
3068 dasd_reload_device(device);
3069 }
3070 dasd_generic_handle_state_change(device);
3071 return;
3072 }
3073
3074 sense = dasd_get_sense(irb);
3075 if (!sense)
3076 return;
3077
3078
3079 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
3080 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
3081 dasd_alias_handle_summary_unit_check(device, irb);
3082 return;
3083 }
3084
3085
3086 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3087 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
3088 dasd_3990_erp_handle_sim(device, sense);
3089 return;
3090 }
3091
3092
3093
3094
3095 if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3096 (sense[7] == 0x3F) &&
3097 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
3098 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
3099 if (device->features & DASD_FEATURE_FAILONSLCK)
3100 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
3101 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3102 dev_err(&device->cdev->dev,
3103 "The device reservation was lost\n");
3104 }
3105}
3106
3107static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3108 struct dasd_device *startdev,
3109 struct dasd_block *block,
3110 struct request *req,
3111 sector_t first_rec,
3112 sector_t last_rec,
3113 sector_t first_trk,
3114 sector_t last_trk,
3115 unsigned int first_offs,
3116 unsigned int last_offs,
3117 unsigned int blk_per_trk,
3118 unsigned int blksize)
3119{
3120 struct dasd_eckd_private *private;
3121 unsigned long *idaws;
3122 struct LO_eckd_data *LO_data;
3123 struct dasd_ccw_req *cqr;
3124 struct ccw1 *ccw;
3125 struct req_iterator iter;
3126 struct bio_vec *bv;
3127 char *dst;
3128 unsigned int off;
3129 int count, cidaw, cplength, datasize;
3130 sector_t recid;
3131 unsigned char cmd, rcmd;
3132 int use_prefix;
3133 struct dasd_device *basedev;
3134
3135 basedev = block->base;
3136 private = (struct dasd_eckd_private *) basedev->private;
3137 if (rq_data_dir(req) == READ)
3138 cmd = DASD_ECKD_CCW_READ_MT;
3139 else if (rq_data_dir(req) == WRITE)
3140 cmd = DASD_ECKD_CCW_WRITE_MT;
3141 else
3142 return ERR_PTR(-EINVAL);
3143
3144
3145 count = 0;
3146 cidaw = 0;
3147 rq_for_each_segment(bv, req, iter) {
3148 if (bv->bv_len & (blksize - 1))
3149
3150 return ERR_PTR(-EINVAL);
3151 count += bv->bv_len >> (block->s2b_shift + 9);
3152#if defined(CONFIG_64BIT)
3153 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
3154 cidaw += bv->bv_len >> (block->s2b_shift + 9);
3155#endif
3156 }
3157
3158 if (count != last_rec - first_rec + 1)
3159 return ERR_PTR(-EINVAL);
3160
3161
3162 use_prefix = private->features.feature[8] & 0x01;
3163 if (use_prefix) {
3164
3165 cplength = 2 + count;
3166
3167 datasize = sizeof(struct PFX_eckd_data) +
3168 sizeof(struct LO_eckd_data) +
3169 cidaw * sizeof(unsigned long);
3170 } else {
3171
3172 cplength = 2 + count;
3173
3174 datasize = sizeof(struct DE_eckd_data) +
3175 sizeof(struct LO_eckd_data) +
3176 cidaw * sizeof(unsigned long);
3177 }
3178
3179 if (private->uses_cdl && first_rec < 2*blk_per_trk) {
3180 if (last_rec >= 2*blk_per_trk)
3181 count = 2*blk_per_trk - first_rec;
3182 cplength += count;
3183 datasize += count*sizeof(struct LO_eckd_data);
3184 }
3185
3186 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3187 startdev);
3188 if (IS_ERR(cqr))
3189 return cqr;
3190 ccw = cqr->cpaddr;
3191
3192 if (use_prefix) {
3193 if (prefix(ccw++, cqr->data, first_trk,
3194 last_trk, cmd, basedev, startdev) == -EAGAIN) {
3195
3196
3197
3198 dasd_sfree_request(cqr, startdev);
3199 return ERR_PTR(-EAGAIN);
3200 }
3201 idaws = (unsigned long *) (cqr->data +
3202 sizeof(struct PFX_eckd_data));
3203 } else {
3204 if (define_extent(ccw++, cqr->data, first_trk,
3205 last_trk, cmd, basedev) == -EAGAIN) {
3206
3207
3208
3209 dasd_sfree_request(cqr, startdev);
3210 return ERR_PTR(-EAGAIN);
3211 }
3212 idaws = (unsigned long *) (cqr->data +
3213 sizeof(struct DE_eckd_data));
3214 }
3215
3216 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
3217 recid = first_rec;
3218 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
3219
3220 ccw[-1].flags |= CCW_FLAG_CC;
3221 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
3222 last_rec - recid + 1, cmd, basedev, blksize);
3223 }
3224 rq_for_each_segment(bv, req, iter) {
3225 dst = page_address(bv->bv_page) + bv->bv_offset;
3226 if (dasd_page_cache) {
3227 char *copy = kmem_cache_alloc(dasd_page_cache,
3228 GFP_DMA | __GFP_NOWARN);
3229 if (copy && rq_data_dir(req) == WRITE)
3230 memcpy(copy + bv->bv_offset, dst, bv->bv_len);
3231 if (copy)
3232 dst = copy + bv->bv_offset;
3233 }
3234 for (off = 0; off < bv->bv_len; off += blksize) {
3235 sector_t trkid = recid;
3236 unsigned int recoffs = sector_div(trkid, blk_per_trk);
3237 rcmd = cmd;
3238 count = blksize;
3239
3240 if (private->uses_cdl && recid < 2*blk_per_trk) {
3241 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
3242 rcmd |= 0x8;
3243 count = dasd_eckd_cdl_reclen(recid);
3244 if (count < blksize &&
3245 rq_data_dir(req) == READ)
3246 memset(dst + count, 0xe5,
3247 blksize - count);
3248 }
3249 ccw[-1].flags |= CCW_FLAG_CC;
3250 locate_record(ccw++, LO_data++,
3251 trkid, recoffs + 1,
3252 1, rcmd, basedev, count);
3253 }
3254
3255 if (private->uses_cdl && recid == 2*blk_per_trk) {
3256 ccw[-1].flags |= CCW_FLAG_CC;
3257 locate_record(ccw++, LO_data++,
3258 trkid, recoffs + 1,
3259 last_rec - recid + 1,
3260 cmd, basedev, count);
3261 }
3262
3263 ccw[-1].flags |= CCW_FLAG_CC;
3264 ccw->cmd_code = rcmd;
3265 ccw->count = count;
3266 if (idal_is_needed(dst, blksize)) {
3267 ccw->cda = (__u32)(addr_t) idaws;
3268 ccw->flags = CCW_FLAG_IDA;
3269 idaws = idal_create_words(idaws, dst, blksize);
3270 } else {
3271 ccw->cda = (__u32)(addr_t) dst;
3272 ccw->flags = 0;
3273 }
3274 ccw++;
3275 dst += blksize;
3276 recid++;
3277 }
3278 }
3279 if (blk_noretry_request(req) ||
3280 block->base->features & DASD_FEATURE_FAILFAST)
3281 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3282 cqr->startdev = startdev;
3283 cqr->memdev = startdev;
3284 cqr->block = block;
3285 cqr->expires = startdev->default_expires * HZ;
3286 cqr->lpm = dasd_path_get_ppm(startdev);
3287 cqr->retries = 256;
3288 cqr->buildclk = get_tod_clock();
3289 cqr->status = DASD_CQR_FILLED;
3290 return cqr;
3291}
3292
3293static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
3294 struct dasd_device *startdev,
3295 struct dasd_block *block,
3296 struct request *req,
3297 sector_t first_rec,
3298 sector_t last_rec,
3299 sector_t first_trk,
3300 sector_t last_trk,
3301 unsigned int first_offs,
3302 unsigned int last_offs,
3303 unsigned int blk_per_trk,
3304 unsigned int blksize)
3305{
3306 unsigned long *idaws;
3307 struct dasd_ccw_req *cqr;
3308 struct ccw1 *ccw;
3309 struct req_iterator iter;
3310 struct bio_vec *bv;
3311 char *dst, *idaw_dst;
3312 unsigned int cidaw, cplength, datasize;
3313 unsigned int tlf;
3314 sector_t recid;
3315 unsigned char cmd;
3316 struct dasd_device *basedev;
3317 unsigned int trkcount, count, count_to_trk_end;
3318 unsigned int idaw_len, seg_len, part_len, len_to_track_end;
3319 unsigned char new_track, end_idaw;
3320 sector_t trkid;
3321 unsigned int recoffs;
3322
3323 basedev = block->base;
3324 if (rq_data_dir(req) == READ)
3325 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
3326 else if (rq_data_dir(req) == WRITE)
3327 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
3328 else
3329 return ERR_PTR(-EINVAL);
3330
3331
3332
3333
3334
3335
3336 cidaw = last_rec - first_rec + 1;
3337 trkcount = last_trk - first_trk + 1;
3338
3339
3340 cplength = 1 + trkcount;
3341
3342
3343
3344
3345 datasize = sizeof(struct PFX_eckd_data) +
3346 cidaw * sizeof(unsigned long long);
3347
3348
3349 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3350 startdev);
3351 if (IS_ERR(cqr))
3352 return cqr;
3353 ccw = cqr->cpaddr;
3354
3355 if (first_trk == last_trk)
3356 tlf = last_offs - first_offs + 1;
3357 else
3358 tlf = last_offs + 1;
3359 tlf *= blksize;
3360
3361 if (prefix_LRE(ccw++, cqr->data, first_trk,
3362 last_trk, cmd, basedev, startdev,
3363 1 , first_offs + 1,
3364 trkcount, blksize,
3365 tlf) == -EAGAIN) {
3366
3367
3368
3369 dasd_sfree_request(cqr, startdev);
3370 return ERR_PTR(-EAGAIN);
3371 }
3372
3373
3374
3375
3376
3377
3378
3379
3380 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
3381 recid = first_rec;
3382 new_track = 1;
3383 end_idaw = 0;
3384 len_to_track_end = 0;
3385 idaw_dst = NULL;
3386 idaw_len = 0;
3387 rq_for_each_segment(bv, req, iter) {
3388 dst = page_address(bv->bv_page) + bv->bv_offset;
3389 seg_len = bv->bv_len;
3390 while (seg_len) {
3391 if (new_track) {
3392 trkid = recid;
3393 recoffs = sector_div(trkid, blk_per_trk);
3394 count_to_trk_end = blk_per_trk - recoffs;
3395 count = min((last_rec - recid + 1),
3396 (sector_t)count_to_trk_end);
3397 len_to_track_end = count * blksize;
3398 ccw[-1].flags |= CCW_FLAG_CC;
3399 ccw->cmd_code = cmd;
3400 ccw->count = len_to_track_end;
3401 ccw->cda = (__u32)(addr_t)idaws;
3402 ccw->flags = CCW_FLAG_IDA;
3403 ccw++;
3404 recid += count;
3405 new_track = 0;
3406
3407 if (!idaw_dst)
3408 idaw_dst = dst;
3409 }
3410
3411
3412
3413
3414
3415
3416 if (!idaw_dst) {
3417 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
3418 dasd_sfree_request(cqr, startdev);
3419 return ERR_PTR(-ERANGE);
3420 } else
3421 idaw_dst = dst;
3422 }
3423 if ((idaw_dst + idaw_len) != dst) {
3424 dasd_sfree_request(cqr, startdev);
3425 return ERR_PTR(-ERANGE);
3426 }
3427 part_len = min(seg_len, len_to_track_end);
3428 seg_len -= part_len;
3429 dst += part_len;
3430 idaw_len += part_len;
3431 len_to_track_end -= part_len;
3432
3433
3434
3435
3436
3437 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
3438 end_idaw = 1;
3439
3440 if (!len_to_track_end) {
3441 new_track = 1;
3442 end_idaw = 1;
3443 }
3444 if (end_idaw) {
3445 idaws = idal_create_words(idaws, idaw_dst,
3446 idaw_len);
3447 idaw_dst = NULL;
3448 idaw_len = 0;
3449 end_idaw = 0;
3450 }
3451 }
3452 }
3453
3454 if (blk_noretry_request(req) ||
3455 block->base->features & DASD_FEATURE_FAILFAST)
3456 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3457 cqr->startdev = startdev;
3458 cqr->memdev = startdev;
3459 cqr->block = block;
3460 cqr->expires = startdev->default_expires * HZ;
3461 cqr->lpm = dasd_path_get_ppm(startdev);
3462 cqr->retries = 256;
3463 cqr->buildclk = get_tod_clock();
3464 cqr->status = DASD_CQR_FILLED;
3465 return cqr;
3466}
3467
3468static int prepare_itcw(struct itcw *itcw,
3469 unsigned int trk, unsigned int totrk, int cmd,
3470 struct dasd_device *basedev,
3471 struct dasd_device *startdev,
3472 unsigned int rec_on_trk, int count,
3473 unsigned int blksize,
3474 unsigned int total_data_size,
3475 unsigned int tlf,
3476 unsigned int blk_per_trk)
3477{
3478 struct PFX_eckd_data pfxdata;
3479 struct dasd_eckd_private *basepriv, *startpriv;
3480 struct DE_eckd_data *dedata;
3481 struct LRE_eckd_data *lredata;
3482 struct dcw *dcw;
3483
3484 u32 begcyl, endcyl;
3485 u16 heads, beghead, endhead;
3486 u8 pfx_cmd;
3487
3488 int rc = 0;
3489 int sector = 0;
3490 int dn, d;
3491
3492
3493
3494 basepriv = (struct dasd_eckd_private *) basedev->private;
3495 startpriv = (struct dasd_eckd_private *) startdev->private;
3496 dedata = &pfxdata.define_extent;
3497 lredata = &pfxdata.locate_record;
3498
3499 memset(&pfxdata, 0, sizeof(pfxdata));
3500 pfxdata.format = 1;
3501 pfxdata.base_address = basepriv->ned->unit_addr;
3502 pfxdata.base_lss = basepriv->ned->ID;
3503 pfxdata.validity.define_extent = 1;
3504
3505
3506 if (startpriv->uid.type != UA_BASE_DEVICE) {
3507 pfxdata.validity.verify_base = 1;
3508 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
3509 pfxdata.validity.hyper_pav = 1;
3510 }
3511
3512 switch (cmd) {
3513 case DASD_ECKD_CCW_READ_TRACK_DATA:
3514 dedata->mask.perm = 0x1;
3515 dedata->attributes.operation = basepriv->attrib.operation;
3516 dedata->blk_size = blksize;
3517 dedata->ga_extended |= 0x42;
3518 lredata->operation.orientation = 0x0;
3519 lredata->operation.operation = 0x0C;
3520 lredata->auxiliary.check_bytes = 0x01;
3521 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
3522 break;
3523 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
3524 dedata->mask.perm = 0x02;
3525 dedata->attributes.operation = basepriv->attrib.operation;
3526 dedata->blk_size = blksize;
3527 rc = check_XRC_on_prefix(&pfxdata, basedev);
3528 dedata->ga_extended |= 0x42;
3529 lredata->operation.orientation = 0x0;
3530 lredata->operation.operation = 0x3F;
3531 lredata->extended_operation = 0x23;
3532 lredata->auxiliary.check_bytes = 0x2;
3533 pfx_cmd = DASD_ECKD_CCW_PFX;
3534 break;
3535 case DASD_ECKD_CCW_READ_COUNT_MT:
3536 dedata->mask.perm = 0x1;
3537 dedata->attributes.operation = DASD_BYPASS_CACHE;
3538 dedata->ga_extended |= 0x42;
3539 dedata->blk_size = blksize;
3540 lredata->operation.orientation = 0x2;
3541 lredata->operation.operation = 0x16;
3542 lredata->auxiliary.check_bytes = 0x01;
3543 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
3544 break;
3545 default:
3546 DBF_DEV_EVENT(DBF_ERR, basedev,
3547 "prepare itcw, unknown opcode 0x%x", cmd);
3548 BUG();
3549 break;
3550 }
3551 if (rc)
3552 return rc;
3553
3554 dedata->attributes.mode = 0x3;
3555
3556 heads = basepriv->rdc_data.trk_per_cyl;
3557 begcyl = trk / heads;
3558 beghead = trk % heads;
3559 endcyl = totrk / heads;
3560 endhead = totrk % heads;
3561
3562
3563 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
3564 dedata->attributes.operation == DASD_SEQ_ACCESS) {
3565
3566 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
3567 endcyl += basepriv->attrib.nr_cyl;
3568 else
3569 endcyl = (basepriv->real_cyl - 1);
3570 }
3571
3572 set_ch_t(&dedata->beg_ext, begcyl, beghead);
3573 set_ch_t(&dedata->end_ext, endcyl, endhead);
3574
3575 dedata->ep_format = 0x20;
3576 dedata->ep_rec_per_track = blk_per_trk;
3577
3578 if (rec_on_trk) {
3579 switch (basepriv->rdc_data.dev_type) {
3580 case 0x3390:
3581 dn = ceil_quot(blksize + 6, 232);
3582 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
3583 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
3584 break;
3585 case 0x3380:
3586 d = 7 + ceil_quot(blksize + 12, 32);
3587 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
3588 break;
3589 }
3590 }
3591
3592 if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
3593 lredata->auxiliary.length_valid = 0;
3594 lredata->auxiliary.length_scope = 0;
3595 lredata->sector = 0xff;
3596 } else {
3597 lredata->auxiliary.length_valid = 1;
3598 lredata->auxiliary.length_scope = 1;
3599 lredata->sector = sector;
3600 }
3601 lredata->auxiliary.imbedded_ccw_valid = 1;
3602 lredata->length = tlf;
3603 lredata->imbedded_ccw = cmd;
3604 lredata->count = count;
3605 set_ch_t(&lredata->seek_addr, begcyl, beghead);
3606 lredata->search_arg.cyl = lredata->seek_addr.cyl;
3607 lredata->search_arg.head = lredata->seek_addr.head;
3608 lredata->search_arg.record = rec_on_trk;
3609
3610 dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
3611 &pfxdata, sizeof(pfxdata), total_data_size);
3612 return IS_ERR(dcw) ? PTR_ERR(dcw) : 0;
3613}
3614
3615static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
3616 struct dasd_device *startdev,
3617 struct dasd_block *block,
3618 struct request *req,
3619 sector_t first_rec,
3620 sector_t last_rec,
3621 sector_t first_trk,
3622 sector_t last_trk,
3623 unsigned int first_offs,
3624 unsigned int last_offs,
3625 unsigned int blk_per_trk,
3626 unsigned int blksize)
3627{
3628 struct dasd_ccw_req *cqr;
3629 struct req_iterator iter;
3630 struct bio_vec *bv;
3631 char *dst;
3632 unsigned int trkcount, ctidaw;
3633 unsigned char cmd;
3634 struct dasd_device *basedev;
3635 unsigned int tlf;
3636 struct itcw *itcw;
3637 struct tidaw *last_tidaw = NULL;
3638 int itcw_op;
3639 size_t itcw_size;
3640 u8 tidaw_flags;
3641 unsigned int seg_len, part_len, len_to_track_end;
3642 unsigned char new_track;
3643 sector_t recid, trkid;
3644 unsigned int offs;
3645 unsigned int count, count_to_trk_end;
3646 int ret;
3647
3648 basedev = block->base;
3649 if (rq_data_dir(req) == READ) {
3650 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
3651 itcw_op = ITCW_OP_READ;
3652 } else if (rq_data_dir(req) == WRITE) {
3653 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
3654 itcw_op = ITCW_OP_WRITE;
3655 } else
3656 return ERR_PTR(-EINVAL);
3657
3658
3659
3660
3661
3662
3663
3664 trkcount = last_trk - first_trk + 1;
3665 ctidaw = 0;
3666 rq_for_each_segment(bv, req, iter) {
3667 ++ctidaw;
3668 }
3669 if (rq_data_dir(req) == WRITE)
3670 ctidaw += (last_trk - first_trk);
3671
3672
3673 itcw_size = itcw_calc_size(0, ctidaw, 0);
3674 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
3675 if (IS_ERR(cqr))
3676 return cqr;
3677
3678
3679 if (first_trk == last_trk)
3680 tlf = last_offs - first_offs + 1;
3681 else
3682 tlf = last_offs + 1;
3683 tlf *= blksize;
3684
3685 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
3686 if (IS_ERR(itcw)) {
3687 ret = -EINVAL;
3688 goto out_error;
3689 }
3690 cqr->cpaddr = itcw_get_tcw(itcw);
3691 if (prepare_itcw(itcw, first_trk, last_trk,
3692 cmd, basedev, startdev,
3693 first_offs + 1,
3694 trkcount, blksize,
3695 (last_rec - first_rec + 1) * blksize,
3696 tlf, blk_per_trk) == -EAGAIN) {
3697
3698
3699
3700 ret = -EAGAIN;
3701 goto out_error;
3702 }
3703 len_to_track_end = 0;
3704
3705
3706
3707
3708
3709
3710
3711
3712 if (rq_data_dir(req) == WRITE) {
3713 new_track = 1;
3714 recid = first_rec;
3715 rq_for_each_segment(bv, req, iter) {
3716 dst = page_address(bv->bv_page) + bv->bv_offset;
3717 seg_len = bv->bv_len;
3718 while (seg_len) {
3719 if (new_track) {
3720 trkid = recid;
3721 offs = sector_div(trkid, blk_per_trk);
3722 count_to_trk_end = blk_per_trk - offs;
3723 count = min((last_rec - recid + 1),
3724 (sector_t)count_to_trk_end);
3725 len_to_track_end = count * blksize;
3726 recid += count;
3727 new_track = 0;
3728 }
3729 part_len = min(seg_len, len_to_track_end);
3730 seg_len -= part_len;
3731 len_to_track_end -= part_len;
3732
3733 if (!len_to_track_end) {
3734 new_track = 1;
3735 tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
3736 } else
3737 tidaw_flags = 0;
3738 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
3739 dst, part_len);
3740 if (IS_ERR(last_tidaw)) {
3741 ret = -EINVAL;
3742 goto out_error;
3743 }
3744 dst += part_len;
3745 }
3746 }
3747 } else {
3748 rq_for_each_segment(bv, req, iter) {
3749 dst = page_address(bv->bv_page) + bv->bv_offset;
3750 last_tidaw = itcw_add_tidaw(itcw, 0x00,
3751 dst, bv->bv_len);
3752 if (IS_ERR(last_tidaw)) {
3753 ret = -EINVAL;
3754 goto out_error;
3755 }
3756 }
3757 }
3758 last_tidaw->flags |= TIDAW_FLAGS_LAST;
3759 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
3760 itcw_finalize(itcw);
3761
3762 if (blk_noretry_request(req) ||
3763 block->base->features & DASD_FEATURE_FAILFAST)
3764 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3765 cqr->cpmode = 1;
3766 cqr->startdev = startdev;
3767 cqr->memdev = startdev;
3768 cqr->block = block;
3769 cqr->expires = startdev->default_expires * HZ;
3770 cqr->lpm = dasd_path_get_ppm(startdev);
3771 cqr->retries = 256;
3772 cqr->buildclk = get_tod_clock();
3773 cqr->status = DASD_CQR_FILLED;
3774 return cqr;
3775out_error:
3776 dasd_sfree_request(cqr, startdev);
3777 return ERR_PTR(ret);
3778}
3779
3780static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
3781 struct dasd_block *block,
3782 struct request *req)
3783{
3784 int cmdrtd, cmdwtd;
3785 int use_prefix;
3786 int fcx_multitrack;
3787 struct dasd_eckd_private *private;
3788 struct dasd_device *basedev;
3789 sector_t first_rec, last_rec;
3790 sector_t first_trk, last_trk;
3791 unsigned int first_offs, last_offs;
3792 unsigned int blk_per_trk, blksize;
3793 int cdlspecial;
3794 unsigned int data_size;
3795 struct dasd_ccw_req *cqr;
3796
3797 basedev = block->base;
3798 private = (struct dasd_eckd_private *) basedev->private;
3799
3800
3801 blksize = block->bp_block;
3802 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3803 if (blk_per_trk == 0)
3804 return ERR_PTR(-EINVAL);
3805
3806 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
3807 first_offs = sector_div(first_trk, blk_per_trk);
3808 last_rec = last_trk =
3809 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3810 last_offs = sector_div(last_trk, blk_per_trk);
3811 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
3812
3813 fcx_multitrack = private->features.feature[40] & 0x20;
3814 data_size = blk_rq_bytes(req);
3815 if (data_size % blksize)
3816 return ERR_PTR(-EINVAL);
3817
3818 if (rq_data_dir(req) == WRITE)
3819 data_size += (last_trk - first_trk) * 4;
3820
3821
3822 cmdrtd = private->features.feature[9] & 0x20;
3823 cmdwtd = private->features.feature[12] & 0x40;
3824 use_prefix = private->features.feature[8] & 0x01;
3825
3826 cqr = NULL;
3827 if (cdlspecial || dasd_page_cache) {
3828
3829 } else if ((data_size <= private->fcx_max_data)
3830 && (fcx_multitrack || (first_trk == last_trk))) {
3831 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
3832 first_rec, last_rec,
3833 first_trk, last_trk,
3834 first_offs, last_offs,
3835 blk_per_trk, blksize);
3836 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
3837 (PTR_ERR(cqr) != -ENOMEM))
3838 cqr = NULL;
3839 } else if (use_prefix &&
3840 (((rq_data_dir(req) == READ) && cmdrtd) ||
3841 ((rq_data_dir(req) == WRITE) && cmdwtd))) {
3842 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
3843 first_rec, last_rec,
3844 first_trk, last_trk,
3845 first_offs, last_offs,
3846 blk_per_trk, blksize);
3847 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
3848 (PTR_ERR(cqr) != -ENOMEM))
3849 cqr = NULL;
3850 }
3851 if (!cqr)
3852 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
3853 first_rec, last_rec,
3854 first_trk, last_trk,
3855 first_offs, last_offs,
3856 blk_per_trk, blksize);
3857 return cqr;
3858}
3859
3860static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
3861 struct dasd_block *block,
3862 struct request *req)
3863{
3864 unsigned long *idaws;
3865 struct dasd_device *basedev;
3866 struct dasd_ccw_req *cqr;
3867 struct ccw1 *ccw;
3868 struct req_iterator iter;
3869 struct bio_vec *bv;
3870 char *dst;
3871 unsigned char cmd;
3872 unsigned int trkcount;
3873 unsigned int seg_len, len_to_track_end;
3874 unsigned int first_offs;
3875 unsigned int cidaw, cplength, datasize;
3876 sector_t first_trk, last_trk;
3877 unsigned int pfx_datasize;
3878
3879
3880
3881
3882 if ((blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK) != 0) {
3883 cqr = ERR_PTR(-EINVAL);
3884 goto out;
3885 }
3886 if (((blk_rq_pos(req) + blk_rq_sectors(req)) %
3887 DASD_RAW_SECTORS_PER_TRACK) != 0) {
3888 cqr = ERR_PTR(-EINVAL);
3889 goto out;
3890 }
3891
3892 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
3893 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
3894 DASD_RAW_SECTORS_PER_TRACK;
3895 trkcount = last_trk - first_trk + 1;
3896 first_offs = 0;
3897 basedev = block->base;
3898
3899 if (rq_data_dir(req) == READ)
3900 cmd = DASD_ECKD_CCW_READ_TRACK;
3901 else if (rq_data_dir(req) == WRITE)
3902 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
3903 else {
3904 cqr = ERR_PTR(-EINVAL);
3905 goto out;
3906 }
3907
3908
3909
3910
3911
3912 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
3913
3914
3915 cplength = 1 + trkcount;
3916
3917
3918
3919
3920
3921
3922
3923 pfx_datasize = sizeof(struct PFX_eckd_data) + 8;
3924
3925 datasize = pfx_datasize + cidaw * sizeof(unsigned long long);
3926
3927
3928 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
3929 datasize, startdev);
3930 if (IS_ERR(cqr))
3931 goto out;
3932 ccw = cqr->cpaddr;
3933
3934 if (prefix_LRE(ccw++, cqr->data, first_trk, last_trk, cmd,
3935 basedev, startdev, 1 , first_offs + 1,
3936 trkcount, 0, 0) == -EAGAIN) {
3937
3938
3939
3940 dasd_sfree_request(cqr, startdev);
3941 cqr = ERR_PTR(-EAGAIN);
3942 goto out;
3943 }
3944
3945 idaws = (unsigned long *)(cqr->data + pfx_datasize);
3946
3947 len_to_track_end = 0;
3948
3949 rq_for_each_segment(bv, req, iter) {
3950 dst = page_address(bv->bv_page) + bv->bv_offset;
3951 seg_len = bv->bv_len;
3952 if (!len_to_track_end) {
3953 ccw[-1].flags |= CCW_FLAG_CC;
3954 ccw->cmd_code = cmd;
3955
3956 ccw->count = 57326;
3957
3958 len_to_track_end = 65536;
3959 ccw->cda = (__u32)(addr_t)idaws;
3960 ccw->flags |= CCW_FLAG_IDA;
3961 ccw->flags |= CCW_FLAG_SLI;
3962 ccw++;
3963 }
3964 len_to_track_end -= seg_len;
3965 idaws = idal_create_words(idaws, dst, seg_len);
3966 }
3967
3968 if (blk_noretry_request(req) ||
3969 block->base->features & DASD_FEATURE_FAILFAST)
3970 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3971 cqr->startdev = startdev;
3972 cqr->memdev = startdev;
3973 cqr->block = block;
3974 cqr->expires = startdev->default_expires * HZ;
3975 cqr->lpm = dasd_path_get_ppm(startdev);
3976 cqr->retries = 256;
3977 cqr->buildclk = get_tod_clock();
3978 cqr->status = DASD_CQR_FILLED;
3979
3980 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
3981 cqr = NULL;
3982out:
3983 return cqr;
3984}
3985
3986
3987static int
3988dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
3989{
3990 struct dasd_eckd_private *private;
3991 struct ccw1 *ccw;
3992 struct req_iterator iter;
3993 struct bio_vec *bv;
3994 char *dst, *cda;
3995 unsigned int blksize, blk_per_trk, off;
3996 sector_t recid;
3997 int status;
3998
3999 if (!dasd_page_cache)
4000 goto out;
4001 private = (struct dasd_eckd_private *) cqr->block->base->private;
4002 blksize = cqr->block->bp_block;
4003 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4004 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4005 ccw = cqr->cpaddr;
4006
4007 ccw++;
4008 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
4009 ccw++;
4010 rq_for_each_segment(bv, req, iter) {
4011 dst = page_address(bv->bv_page) + bv->bv_offset;
4012 for (off = 0; off < bv->bv_len; off += blksize) {
4013
4014 if (private->uses_cdl && recid <= 2*blk_per_trk)
4015 ccw++;
4016 if (dst) {
4017 if (ccw->flags & CCW_FLAG_IDA)
4018 cda = *((char **)((addr_t) ccw->cda));
4019 else
4020 cda = (char *)((addr_t) ccw->cda);
4021 if (dst != cda) {
4022 if (rq_data_dir(req) == READ)
4023 memcpy(dst, cda, bv->bv_len);
4024 kmem_cache_free(dasd_page_cache,
4025 (void *)((addr_t)cda & PAGE_MASK));
4026 }
4027 dst = NULL;
4028 }
4029 ccw++;
4030 recid++;
4031 }
4032 }
4033out:
4034 status = cqr->status == DASD_CQR_DONE;
4035 dasd_sfree_request(cqr, cqr->memdev);
4036 return status;
4037}
4038
4039
4040
4041
4042
4043
4044
4045
4046void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4047{
4048 struct ccw1 *ccw;
4049 struct PFX_eckd_data *pfxdata;
4050 struct tcw *tcw;
4051 struct tccb *tccb;
4052 struct dcw *dcw;
4053
4054 if (cqr->cpmode == 1) {
4055 tcw = cqr->cpaddr;
4056 tccb = tcw_get_tccb(tcw);
4057 dcw = (struct dcw *)&tccb->tca[0];
4058 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
4059 pfxdata->validity.verify_base = 0;
4060 pfxdata->validity.hyper_pav = 0;
4061 } else {
4062 ccw = cqr->cpaddr;
4063 pfxdata = cqr->data;
4064 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
4065 pfxdata->validity.verify_base = 0;
4066 pfxdata->validity.hyper_pav = 0;
4067 }
4068 }
4069}
4070
4071#define DASD_ECKD_CHANQ_MAX_SIZE 4
4072
4073static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4074 struct dasd_block *block,
4075 struct request *req)
4076{
4077 struct dasd_eckd_private *private;
4078 struct dasd_device *startdev;
4079 unsigned long flags;
4080 struct dasd_ccw_req *cqr;
4081
4082 startdev = dasd_alias_get_start_dev(base);
4083 if (!startdev)
4084 startdev = base;
4085 private = (struct dasd_eckd_private *) startdev->private;
4086 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
4087 return ERR_PTR(-EBUSY);
4088
4089 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
4090 private->count++;
4091 if ((base->features & DASD_FEATURE_USERAW))
4092 cqr = dasd_raw_build_cp(startdev, block, req);
4093 else
4094 cqr = dasd_eckd_build_cp(startdev, block, req);
4095 if (IS_ERR(cqr))
4096 private->count--;
4097 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
4098 return cqr;
4099}
4100
4101static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
4102 struct request *req)
4103{
4104 struct dasd_eckd_private *private;
4105 unsigned long flags;
4106
4107 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
4108 private = (struct dasd_eckd_private *) cqr->memdev->private;
4109 private->count--;
4110 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
4111 return dasd_eckd_free_cp(cqr, req);
4112}
4113
4114static int
4115dasd_eckd_fill_info(struct dasd_device * device,
4116 struct dasd_information2_t * info)
4117{
4118 struct dasd_eckd_private *private;
4119
4120 private = (struct dasd_eckd_private *) device->private;
4121 info->label_block = 2;
4122 info->FBA_layout = private->uses_cdl ? 0 : 1;
4123 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
4124 info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
4125 memcpy(info->characteristics, &private->rdc_data,
4126 sizeof(struct dasd_eckd_characteristics));
4127 info->confdata_size = min((unsigned long)private->conf_len,
4128 sizeof(info->configuration_data));
4129 memcpy(info->configuration_data, private->conf_data,
4130 info->confdata_size);
4131 return 0;
4132}
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143static int
4144dasd_eckd_release(struct dasd_device *device)
4145{
4146 struct dasd_ccw_req *cqr;
4147 int rc;
4148 struct ccw1 *ccw;
4149 int useglobal;
4150
4151 if (!capable(CAP_SYS_ADMIN))
4152 return -EACCES;
4153
4154 useglobal = 0;
4155 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
4156 if (IS_ERR(cqr)) {
4157 mutex_lock(&dasd_reserve_mutex);
4158 useglobal = 1;
4159 cqr = &dasd_reserve_req->cqr;
4160 memset(cqr, 0, sizeof(*cqr));
4161 memset(&dasd_reserve_req->ccw, 0,
4162 sizeof(dasd_reserve_req->ccw));
4163 cqr->cpaddr = &dasd_reserve_req->ccw;
4164 cqr->data = &dasd_reserve_req->data;
4165 cqr->magic = DASD_ECKD_MAGIC;
4166 }
4167 ccw = cqr->cpaddr;
4168 ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
4169 ccw->flags |= CCW_FLAG_SLI;
4170 ccw->count = 32;
4171 ccw->cda = (__u32)(addr_t) cqr->data;
4172 cqr->startdev = device;
4173 cqr->memdev = device;
4174 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4175 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4176 cqr->retries = 2;
4177 cqr->expires = 2 * HZ;
4178 cqr->buildclk = get_tod_clock();
4179 cqr->status = DASD_CQR_FILLED;
4180
4181 rc = dasd_sleep_on_immediatly(cqr);
4182 if (!rc)
4183 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
4184
4185 if (useglobal)
4186 mutex_unlock(&dasd_reserve_mutex);
4187 else
4188 dasd_sfree_request(cqr, cqr->memdev);
4189 return rc;
4190}
4191
4192
4193
4194
4195
4196
4197
4198static int
4199dasd_eckd_reserve(struct dasd_device *device)
4200{
4201 struct dasd_ccw_req *cqr;
4202 int rc;
4203 struct ccw1 *ccw;
4204 int useglobal;
4205
4206 if (!capable(CAP_SYS_ADMIN))
4207 return -EACCES;
4208
4209 useglobal = 0;
4210 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
4211 if (IS_ERR(cqr)) {
4212 mutex_lock(&dasd_reserve_mutex);
4213 useglobal = 1;
4214 cqr = &dasd_reserve_req->cqr;
4215 memset(cqr, 0, sizeof(*cqr));
4216 memset(&dasd_reserve_req->ccw, 0,
4217 sizeof(dasd_reserve_req->ccw));
4218 cqr->cpaddr = &dasd_reserve_req->ccw;
4219 cqr->data = &dasd_reserve_req->data;
4220 cqr->magic = DASD_ECKD_MAGIC;
4221 }
4222 ccw = cqr->cpaddr;
4223 ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
4224 ccw->flags |= CCW_FLAG_SLI;
4225 ccw->count = 32;
4226 ccw->cda = (__u32)(addr_t) cqr->data;
4227 cqr->startdev = device;
4228 cqr->memdev = device;
4229 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4230 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4231 cqr->retries = 2;
4232 cqr->expires = 2 * HZ;
4233 cqr->buildclk = get_tod_clock();
4234 cqr->status = DASD_CQR_FILLED;
4235
4236 rc = dasd_sleep_on_immediatly(cqr);
4237 if (!rc)
4238 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
4239
4240 if (useglobal)
4241 mutex_unlock(&dasd_reserve_mutex);
4242 else
4243 dasd_sfree_request(cqr, cqr->memdev);
4244 return rc;
4245}
4246
4247
4248
4249
4250
4251
4252static int
4253dasd_eckd_steal_lock(struct dasd_device *device)
4254{
4255 struct dasd_ccw_req *cqr;
4256 int rc;
4257 struct ccw1 *ccw;
4258 int useglobal;
4259
4260 if (!capable(CAP_SYS_ADMIN))
4261 return -EACCES;
4262
4263 useglobal = 0;
4264 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
4265 if (IS_ERR(cqr)) {
4266 mutex_lock(&dasd_reserve_mutex);
4267 useglobal = 1;
4268 cqr = &dasd_reserve_req->cqr;
4269 memset(cqr, 0, sizeof(*cqr));
4270 memset(&dasd_reserve_req->ccw, 0,
4271 sizeof(dasd_reserve_req->ccw));
4272 cqr->cpaddr = &dasd_reserve_req->ccw;
4273 cqr->data = &dasd_reserve_req->data;
4274 cqr->magic = DASD_ECKD_MAGIC;
4275 }
4276 ccw = cqr->cpaddr;
4277 ccw->cmd_code = DASD_ECKD_CCW_SLCK;
4278 ccw->flags |= CCW_FLAG_SLI;
4279 ccw->count = 32;
4280 ccw->cda = (__u32)(addr_t) cqr->data;
4281 cqr->startdev = device;
4282 cqr->memdev = device;
4283 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4284 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4285 cqr->retries = 2;
4286 cqr->expires = 2 * HZ;
4287 cqr->buildclk = get_tod_clock();
4288 cqr->status = DASD_CQR_FILLED;
4289
4290 rc = dasd_sleep_on_immediatly(cqr);
4291 if (!rc)
4292 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
4293
4294 if (useglobal)
4295 mutex_unlock(&dasd_reserve_mutex);
4296 else
4297 dasd_sfree_request(cqr, cqr->memdev);
4298 return rc;
4299}
4300
4301
4302
4303
4304
4305
4306
4307static int dasd_eckd_snid(struct dasd_device *device,
4308 void __user *argp)
4309{
4310 struct dasd_ccw_req *cqr;
4311 int rc;
4312 struct ccw1 *ccw;
4313 int useglobal;
4314 struct dasd_snid_ioctl_data usrparm;
4315
4316 if (!capable(CAP_SYS_ADMIN))
4317 return -EACCES;
4318
4319 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
4320 return -EFAULT;
4321
4322 useglobal = 0;
4323 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
4324 sizeof(struct dasd_snid_data), device);
4325 if (IS_ERR(cqr)) {
4326 mutex_lock(&dasd_reserve_mutex);
4327 useglobal = 1;
4328 cqr = &dasd_reserve_req->cqr;
4329 memset(cqr, 0, sizeof(*cqr));
4330 memset(&dasd_reserve_req->ccw, 0,
4331 sizeof(dasd_reserve_req->ccw));
4332 cqr->cpaddr = &dasd_reserve_req->ccw;
4333 cqr->data = &dasd_reserve_req->data;
4334 cqr->magic = DASD_ECKD_MAGIC;
4335 }
4336 ccw = cqr->cpaddr;
4337 ccw->cmd_code = DASD_ECKD_CCW_SNID;
4338 ccw->flags |= CCW_FLAG_SLI;
4339 ccw->count = 12;
4340 ccw->cda = (__u32)(addr_t) cqr->data;
4341 cqr->startdev = device;
4342 cqr->memdev = device;
4343 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4344 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4345 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
4346 cqr->retries = 5;
4347 cqr->expires = 10 * HZ;
4348 cqr->buildclk = get_tod_clock();
4349 cqr->status = DASD_CQR_FILLED;
4350 cqr->lpm = usrparm.path_mask;
4351
4352 rc = dasd_sleep_on_immediatly(cqr);
4353
4354 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
4355 rc = -EIO;
4356 if (!rc) {
4357 usrparm.data = *((struct dasd_snid_data *)cqr->data);
4358 if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
4359 rc = -EFAULT;
4360 }
4361
4362 if (useglobal)
4363 mutex_unlock(&dasd_reserve_mutex);
4364 else
4365 dasd_sfree_request(cqr, cqr->memdev);
4366 return rc;
4367}
4368
4369
4370
4371
4372static int
4373dasd_eckd_performance(struct dasd_device *device, void __user *argp)
4374{
4375 struct dasd_psf_prssd_data *prssdp;
4376 struct dasd_rssd_perf_stats_t *stats;
4377 struct dasd_ccw_req *cqr;
4378 struct ccw1 *ccw;
4379 int rc;
4380
4381 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 + 1 ,
4382 (sizeof(struct dasd_psf_prssd_data) +
4383 sizeof(struct dasd_rssd_perf_stats_t)),
4384 device);
4385 if (IS_ERR(cqr)) {
4386 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
4387 "Could not allocate initialization request");
4388 return PTR_ERR(cqr);
4389 }
4390 cqr->startdev = device;
4391 cqr->memdev = device;
4392 cqr->retries = 0;
4393 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4394 cqr->expires = 10 * HZ;
4395
4396
4397 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
4398 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
4399 prssdp->order = PSF_ORDER_PRSSD;
4400 prssdp->suborder = 0x01;
4401 prssdp->varies[1] = 0x01;
4402
4403 ccw = cqr->cpaddr;
4404 ccw->cmd_code = DASD_ECKD_CCW_PSF;
4405 ccw->count = sizeof(struct dasd_psf_prssd_data);
4406 ccw->flags |= CCW_FLAG_CC;
4407 ccw->cda = (__u32)(addr_t) prssdp;
4408
4409
4410 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
4411 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
4412
4413 ccw++;
4414 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
4415 ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
4416 ccw->cda = (__u32)(addr_t) stats;
4417
4418 cqr->buildclk = get_tod_clock();
4419 cqr->status = DASD_CQR_FILLED;
4420 rc = dasd_sleep_on(cqr);
4421 if (rc == 0) {
4422 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
4423 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
4424 if (copy_to_user(argp, stats,
4425 sizeof(struct dasd_rssd_perf_stats_t)))
4426 rc = -EFAULT;
4427 }
4428 dasd_sfree_request(cqr, cqr->memdev);
4429 return rc;
4430}
4431
4432
4433
4434
4435
4436static int
4437dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
4438{
4439 struct dasd_eckd_private *private =
4440 (struct dasd_eckd_private *)device->private;
4441 struct attrib_data_t attrib = private->attrib;
4442 int rc;
4443
4444 if (!capable(CAP_SYS_ADMIN))
4445 return -EACCES;
4446 if (!argp)
4447 return -EINVAL;
4448
4449 rc = 0;
4450 if (copy_to_user(argp, (long *) &attrib,
4451 sizeof(struct attrib_data_t)))
4452 rc = -EFAULT;
4453
4454 return rc;
4455}
4456
4457
4458
4459
4460
4461static int
4462dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
4463{
4464 struct dasd_eckd_private *private =
4465 (struct dasd_eckd_private *)device->private;
4466 struct attrib_data_t attrib;
4467
4468 if (!capable(CAP_SYS_ADMIN))
4469 return -EACCES;
4470 if (!argp)
4471 return -EINVAL;
4472
4473 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
4474 return -EFAULT;
4475 private->attrib = attrib;
4476
4477 dev_info(&device->cdev->dev,
4478 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
4479 private->attrib.operation, private->attrib.nr_cyl);
4480 return 0;
4481}
4482
4483
4484
4485
4486
4487static int dasd_symm_io(struct dasd_device *device, void __user *argp)
4488{
4489 struct dasd_symmio_parms usrparm;
4490 char *psf_data, *rssd_result;
4491 struct dasd_ccw_req *cqr;
4492 struct ccw1 *ccw;
4493 char psf0, psf1;
4494 int rc;
4495
4496 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
4497 return -EACCES;
4498 psf0 = psf1 = 0;
4499
4500
4501 rc = -EFAULT;
4502 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
4503 goto out;
4504 if (is_compat_task() || sizeof(long) == 4) {
4505
4506 rc = -EINVAL;
4507 if ((usrparm.psf_data >> 32) != 0)
4508 goto out;
4509 if ((usrparm.rssd_result >> 32) != 0)
4510 goto out;
4511 usrparm.psf_data &= 0x7fffffffULL;
4512 usrparm.rssd_result &= 0x7fffffffULL;
4513 }
4514
4515 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
4516 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
4517 if (!psf_data || !rssd_result) {
4518 rc = -ENOMEM;
4519 goto out_free;
4520 }
4521
4522
4523 rc = -EFAULT;
4524 if (copy_from_user(psf_data,
4525 (void __user *)(unsigned long) usrparm.psf_data,
4526 usrparm.psf_data_len))
4527 goto out_free;
4528 psf0 = psf_data[0];
4529 psf1 = psf_data[1];
4530
4531
4532 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
4533 if (IS_ERR(cqr)) {
4534 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
4535 "Could not allocate initialization request");
4536 rc = PTR_ERR(cqr);
4537 goto out_free;
4538 }
4539
4540 cqr->startdev = device;
4541 cqr->memdev = device;
4542 cqr->retries = 3;
4543 cqr->expires = 10 * HZ;
4544 cqr->buildclk = get_tod_clock();
4545 cqr->status = DASD_CQR_FILLED;
4546
4547
4548 ccw = cqr->cpaddr;
4549
4550
4551 ccw->cmd_code = DASD_ECKD_CCW_PSF;
4552 ccw->count = usrparm.psf_data_len;
4553 ccw->flags |= CCW_FLAG_CC;
4554 ccw->cda = (__u32)(addr_t) psf_data;
4555
4556 ccw++;
4557
4558
4559 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
4560 ccw->count = usrparm.rssd_result_len;
4561 ccw->flags = CCW_FLAG_SLI ;
4562 ccw->cda = (__u32)(addr_t) rssd_result;
4563
4564 rc = dasd_sleep_on(cqr);
4565 if (rc)
4566 goto out_sfree;
4567
4568 rc = -EFAULT;
4569 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
4570 rssd_result, usrparm.rssd_result_len))
4571 goto out_sfree;
4572 rc = 0;
4573
4574out_sfree:
4575 dasd_sfree_request(cqr, cqr->memdev);
4576out_free:
4577 kfree(rssd_result);
4578 kfree(psf_data);
4579out:
4580 DBF_DEV_EVENT(DBF_WARNING, device,
4581 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
4582 (int) psf0, (int) psf1, rc);
4583 return rc;
4584}
4585
4586static int
4587dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
4588{
4589 struct dasd_device *device = block->base;
4590
4591 switch (cmd) {
4592 case BIODASDGATTR:
4593 return dasd_eckd_get_attrib(device, argp);
4594 case BIODASDSATTR:
4595 return dasd_eckd_set_attrib(device, argp);
4596 case BIODASDPSRD:
4597 return dasd_eckd_performance(device, argp);
4598 case BIODASDRLSE:
4599 return dasd_eckd_release(device);
4600 case BIODASDRSRV:
4601 return dasd_eckd_reserve(device);
4602 case BIODASDSLCK:
4603 return dasd_eckd_steal_lock(device);
4604 case BIODASDSNID:
4605 return dasd_eckd_snid(device, argp);
4606 case BIODASDSYMMIO:
4607 return dasd_symm_io(device, argp);
4608 default:
4609 return -ENOTTY;
4610 }
4611}
4612
4613
4614
4615
4616
4617static int
4618dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
4619{
4620 int len, count;
4621 char *datap;
4622
4623 len = 0;
4624 while (from <= to) {
4625 len += sprintf(page + len, PRINTK_HEADER
4626 " CCW %p: %08X %08X DAT:",
4627 from, ((int *) from)[0], ((int *) from)[1]);
4628
4629
4630 if (from->flags & CCW_FLAG_IDA)
4631 datap = (char *) *((addr_t *) (addr_t) from->cda);
4632 else
4633 datap = (char *) ((addr_t) from->cda);
4634
4635
4636 for (count = 0; count < from->count && count < 32; count++) {
4637 if (count % 8 == 0) len += sprintf(page + len, " ");
4638 if (count % 4 == 0) len += sprintf(page + len, " ");
4639 len += sprintf(page + len, "%02x", datap[count]);
4640 }
4641 len += sprintf(page + len, "\n");
4642 from++;
4643 }
4644 return len;
4645}
4646
4647static void
4648dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
4649 char *reason)
4650{
4651 u64 *sense;
4652 u64 *stat;
4653
4654 sense = (u64 *) dasd_get_sense(irb);
4655 stat = (u64 *) &irb->scsw;
4656 if (sense) {
4657 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
4658 "%016llx %016llx %016llx %016llx",
4659 reason, *stat, *((u32 *) (stat + 1)),
4660 sense[0], sense[1], sense[2], sense[3]);
4661 } else {
4662 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
4663 reason, *stat, *((u32 *) (stat + 1)),
4664 "NO VALID SENSE");
4665 }
4666}
4667
4668
4669
4670
4671
4672static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
4673 struct dasd_ccw_req *req, struct irb *irb)
4674{
4675 char *page;
4676 struct ccw1 *first, *last, *fail, *from, *to;
4677 int len, sl, sct;
4678
4679 page = (char *) get_zeroed_page(GFP_ATOMIC);
4680 if (page == NULL) {
4681 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
4682 "No memory to dump sense data\n");
4683 return;
4684 }
4685
4686 len = sprintf(page, PRINTK_HEADER
4687 " I/O status report for device %s:\n",
4688 dev_name(&device->cdev->dev));
4689 len += sprintf(page + len, PRINTK_HEADER
4690 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
4691 "CS:%02X RC:%d\n",
4692 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
4693 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
4694 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
4695 req ? req->intrc : 0);
4696 len += sprintf(page + len, PRINTK_HEADER
4697 " device %s: Failing CCW: %p\n",
4698 dev_name(&device->cdev->dev),
4699 (void *) (addr_t) irb->scsw.cmd.cpa);
4700 if (irb->esw.esw0.erw.cons) {
4701 for (sl = 0; sl < 4; sl++) {
4702 len += sprintf(page + len, PRINTK_HEADER
4703 " Sense(hex) %2d-%2d:",
4704 (8 * sl), ((8 * sl) + 7));
4705
4706 for (sct = 0; sct < 8; sct++) {
4707 len += sprintf(page + len, " %02x",
4708 irb->ecw[8 * sl + sct]);
4709 }
4710 len += sprintf(page + len, "\n");
4711 }
4712
4713 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
4714
4715 sprintf(page + len, PRINTK_HEADER
4716 " 24 Byte: %x MSG %x, "
4717 "%s MSGb to SYSOP\n",
4718 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
4719 irb->ecw[1] & 0x10 ? "" : "no");
4720 } else {
4721
4722 sprintf(page + len, PRINTK_HEADER
4723 " 32 Byte: Format: %x "
4724 "Exception class %x\n",
4725 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
4726 }
4727 } else {
4728 sprintf(page + len, PRINTK_HEADER
4729 " SORRY - NO VALID SENSE AVAILABLE\n");
4730 }
4731 printk(KERN_ERR "%s", page);
4732
4733 if (req) {
4734
4735
4736
4737 first = req->cpaddr;
4738 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
4739 to = min(first + 6, last);
4740 len = sprintf(page, PRINTK_HEADER
4741 " Related CP in req: %p\n", req);
4742 dasd_eckd_dump_ccw_range(first, to, page + len);
4743 printk(KERN_ERR "%s", page);
4744
4745
4746
4747 len = 0;
4748 from = ++to;
4749 fail = (struct ccw1 *)(addr_t)
4750 irb->scsw.cmd.cpa;
4751 if (from < fail - 2) {
4752 from = fail - 2;
4753 len += sprintf(page, PRINTK_HEADER "......\n");
4754 }
4755 to = min(fail + 1, last);
4756 len += dasd_eckd_dump_ccw_range(from, to, page + len);
4757
4758
4759 from = max(from, ++to);
4760 if (from < last - 1) {
4761 from = last - 1;
4762 len += sprintf(page + len, PRINTK_HEADER "......\n");
4763 }
4764 len += dasd_eckd_dump_ccw_range(from, last, page + len);
4765 if (len > 0)
4766 printk(KERN_ERR "%s", page);
4767 }
4768 free_page((unsigned long) page);
4769}
4770
4771
4772
4773
4774
4775static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
4776 struct dasd_ccw_req *req, struct irb *irb)
4777{
4778 char *page;
4779 int len, sl, sct, residual;
4780 struct tsb *tsb;
4781 u8 *sense, *rcq;
4782
4783 page = (char *) get_zeroed_page(GFP_ATOMIC);
4784 if (page == NULL) {
4785 DBF_DEV_EVENT(DBF_WARNING, device, " %s",
4786 "No memory to dump sense data");
4787 return;
4788 }
4789
4790 len = sprintf(page, PRINTK_HEADER
4791 " I/O status report for device %s:\n",
4792 dev_name(&device->cdev->dev));
4793 len += sprintf(page + len, PRINTK_HEADER
4794 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
4795 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
4796 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
4797 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
4798 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
4799 irb->scsw.tm.fcxs,
4800 (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
4801 req ? req->intrc : 0);
4802 len += sprintf(page + len, PRINTK_HEADER
4803 " device %s: Failing TCW: %p\n",
4804 dev_name(&device->cdev->dev),
4805 (void *) (addr_t) irb->scsw.tm.tcw);
4806
4807 tsb = NULL;
4808 sense = NULL;
4809 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
4810 tsb = tcw_get_tsb(
4811 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
4812
4813 if (tsb) {
4814 len += sprintf(page + len, PRINTK_HEADER
4815 " tsb->length %d\n", tsb->length);
4816 len += sprintf(page + len, PRINTK_HEADER
4817 " tsb->flags %x\n", tsb->flags);
4818 len += sprintf(page + len, PRINTK_HEADER
4819 " tsb->dcw_offset %d\n", tsb->dcw_offset);
4820 len += sprintf(page + len, PRINTK_HEADER
4821 " tsb->count %d\n", tsb->count);
4822 residual = tsb->count - 28;
4823 len += sprintf(page + len, PRINTK_HEADER
4824 " residual %d\n", residual);
4825
4826 switch (tsb->flags & 0x07) {
4827 case 1:
4828 len += sprintf(page + len, PRINTK_HEADER
4829 " tsb->tsa.iostat.dev_time %d\n",
4830 tsb->tsa.iostat.dev_time);
4831 len += sprintf(page + len, PRINTK_HEADER
4832 " tsb->tsa.iostat.def_time %d\n",
4833 tsb->tsa.iostat.def_time);
4834 len += sprintf(page + len, PRINTK_HEADER
4835 " tsb->tsa.iostat.queue_time %d\n",
4836 tsb->tsa.iostat.queue_time);
4837 len += sprintf(page + len, PRINTK_HEADER
4838 " tsb->tsa.iostat.dev_busy_time %d\n",
4839 tsb->tsa.iostat.dev_busy_time);
4840 len += sprintf(page + len, PRINTK_HEADER
4841 " tsb->tsa.iostat.dev_act_time %d\n",
4842 tsb->tsa.iostat.dev_act_time);
4843 sense = tsb->tsa.iostat.sense;
4844 break;
4845 case 2:
4846 len += sprintf(page + len, PRINTK_HEADER
4847 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
4848 for (sl = 0; sl < 2; sl++) {
4849 len += sprintf(page + len, PRINTK_HEADER
4850 " tsb->tsa.ddpc.rcq %2d-%2d: ",
4851 (8 * sl), ((8 * sl) + 7));
4852 rcq = tsb->tsa.ddpc.rcq;
4853 for (sct = 0; sct < 8; sct++) {
4854 len += sprintf(page + len, " %02x",
4855 rcq[8 * sl + sct]);
4856 }
4857 len += sprintf(page + len, "\n");
4858 }
4859 sense = tsb->tsa.ddpc.sense;
4860 break;
4861 case 3:
4862 len += sprintf(page + len, PRINTK_HEADER
4863 " tsb->tsa.intrg.: not supportet yet\n");
4864 break;
4865 }
4866
4867 if (sense) {
4868 for (sl = 0; sl < 4; sl++) {
4869 len += sprintf(page + len, PRINTK_HEADER
4870 " Sense(hex) %2d-%2d:",
4871 (8 * sl), ((8 * sl) + 7));
4872 for (sct = 0; sct < 8; sct++) {
4873 len += sprintf(page + len, " %02x",
4874 sense[8 * sl + sct]);
4875 }
4876 len += sprintf(page + len, "\n");
4877 }
4878
4879 if (sense[27] & DASD_SENSE_BIT_0) {
4880
4881 sprintf(page + len, PRINTK_HEADER
4882 " 24 Byte: %x MSG %x, "
4883 "%s MSGb to SYSOP\n",
4884 sense[7] >> 4, sense[7] & 0x0f,
4885 sense[1] & 0x10 ? "" : "no");
4886 } else {
4887
4888 sprintf(page + len, PRINTK_HEADER
4889 " 32 Byte: Format: %x "
4890 "Exception class %x\n",
4891 sense[6] & 0x0f, sense[22] >> 4);
4892 }
4893 } else {
4894 sprintf(page + len, PRINTK_HEADER
4895 " SORRY - NO VALID SENSE AVAILABLE\n");
4896 }
4897 } else {
4898 sprintf(page + len, PRINTK_HEADER
4899 " SORRY - NO TSB DATA AVAILABLE\n");
4900 }
4901 printk(KERN_ERR "%s", page);
4902 free_page((unsigned long) page);
4903}
4904
4905static void dasd_eckd_dump_sense(struct dasd_device *device,
4906 struct dasd_ccw_req *req, struct irb *irb)
4907{
4908 u8 *sense = dasd_get_sense(irb);
4909
4910 if (scsw_is_tm(&irb->scsw)) {
4911
4912
4913
4914
4915
4916 if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
4917 test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
4918 return;
4919 if (scsw_cstat(&irb->scsw) == 0x40 &&
4920 test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
4921 return;
4922
4923 dasd_eckd_dump_sense_tcw(device, req, irb);
4924 } else {
4925
4926
4927
4928
4929
4930 if (sense && sense[0] & SNS0_CMD_REJECT &&
4931 test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
4932 return;
4933
4934 if (sense && sense[1] & SNS1_NO_REC_FOUND &&
4935 test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
4936 return;
4937
4938 dasd_eckd_dump_sense_ccw(device, req, irb);
4939 }
4940}
4941
4942static int dasd_eckd_pm_freeze(struct dasd_device *device)
4943{
4944
4945
4946
4947
4948
4949 dasd_alias_remove_device(device);
4950 dasd_alias_disconnect_device_from_lcu(device);
4951
4952 return 0;
4953}
4954
4955static int dasd_eckd_restore_device(struct dasd_device *device)
4956{
4957 struct dasd_eckd_private *private;
4958 struct dasd_eckd_characteristics temp_rdc_data;
4959 int rc;
4960 struct dasd_uid temp_uid;
4961 unsigned long flags;
4962 unsigned long cqr_flags = 0;
4963
4964 private = (struct dasd_eckd_private *) device->private;
4965
4966
4967 rc = dasd_eckd_read_conf(device);
4968 if (rc) {
4969 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
4970 "Read configuration data failed, rc=%d", rc);
4971 goto out_err;
4972 }
4973
4974 dasd_eckd_get_uid(device, &temp_uid);
4975
4976 rc = dasd_eckd_generate_uid(device);
4977 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
4978 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
4979 dev_err(&device->cdev->dev, "The UID of the DASD has "
4980 "changed\n");
4981 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
4982 if (rc)
4983 goto out_err;
4984
4985
4986 rc = dasd_alias_make_device_known_to_lcu(device);
4987 if (rc)
4988 goto out_err;
4989
4990 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags);
4991 dasd_eckd_validate_server(device, cqr_flags);
4992
4993
4994 rc = dasd_eckd_read_conf(device);
4995 if (rc) {
4996 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
4997 "Read configuration data failed, rc=%d", rc);
4998 goto out_err2;
4999 }
5000
5001
5002 dasd_eckd_read_features(device);
5003
5004
5005 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
5006 &temp_rdc_data, 64);
5007 if (rc) {
5008 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5009 "Read device characteristic failed, rc=%d", rc);
5010 goto out_err2;
5011 }
5012 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5013 memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
5014 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5015
5016
5017 dasd_alias_add_device(device);
5018
5019 return 0;
5020
5021out_err2:
5022 dasd_alias_disconnect_device_from_lcu(device);
5023out_err:
5024 return -1;
5025}
5026
5027static int dasd_eckd_reload_device(struct dasd_device *device)
5028{
5029 struct dasd_eckd_private *private;
5030 int rc, old_base;
5031 char print_uid[60];
5032 struct dasd_uid uid;
5033 unsigned long flags;
5034
5035 private = (struct dasd_eckd_private *) device->private;
5036
5037 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5038 old_base = private->uid.base_unit_addr;
5039 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5040
5041
5042 rc = dasd_eckd_read_conf(device);
5043 if (rc)
5044 goto out_err;
5045
5046 rc = dasd_eckd_generate_uid(device);
5047 if (rc)
5048 goto out_err;
5049
5050
5051
5052
5053 dasd_alias_update_add_device(device);
5054
5055 dasd_eckd_get_uid(device, &uid);
5056
5057 if (old_base != uid.base_unit_addr) {
5058 if (strlen(uid.vduit) > 0)
5059 snprintf(print_uid, sizeof(print_uid),
5060 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
5061 uid.ssid, uid.base_unit_addr, uid.vduit);
5062 else
5063 snprintf(print_uid, sizeof(print_uid),
5064 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
5065 uid.ssid, uid.base_unit_addr);
5066
5067 dev_info(&device->cdev->dev,
5068 "An Alias device was reassigned to a new base device "
5069 "with UID: %s\n", print_uid);
5070 }
5071 return 0;
5072
5073out_err:
5074 return -1;
5075}
5076
5077static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5078 struct dasd_rssd_messages *messages,
5079 __u8 lpum)
5080{
5081 struct dasd_rssd_messages *message_buf;
5082 struct dasd_psf_prssd_data *prssdp;
5083 struct dasd_eckd_private *private;
5084 struct dasd_ccw_req *cqr;
5085 struct ccw1 *ccw;
5086 int rc;
5087
5088 private = (struct dasd_eckd_private *) device->private;
5089 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 + 1 ,
5090 (sizeof(struct dasd_psf_prssd_data) +
5091 sizeof(struct dasd_rssd_messages)),
5092 device);
5093 if (IS_ERR(cqr)) {
5094 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5095 "Could not allocate read message buffer request");
5096 return PTR_ERR(cqr);
5097 }
5098
5099 cqr->lpm = lpum;
5100retry:
5101 cqr->startdev = device;
5102 cqr->memdev = device;
5103 cqr->block = NULL;
5104 cqr->expires = 10 * HZ;
5105 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5106
5107
5108
5109 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5110 cqr->retries = 256;
5111
5112
5113 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5114 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5115 prssdp->order = PSF_ORDER_PRSSD;
5116 prssdp->suborder = 0x03;
5117
5118
5119 ccw = cqr->cpaddr;
5120 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5121 ccw->count = sizeof(struct dasd_psf_prssd_data);
5122 ccw->flags |= CCW_FLAG_CC;
5123 ccw->flags |= CCW_FLAG_SLI;
5124 ccw->cda = (__u32)(addr_t) prssdp;
5125
5126
5127 message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
5128 memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
5129
5130 ccw++;
5131 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5132 ccw->count = sizeof(struct dasd_rssd_messages);
5133 ccw->flags |= CCW_FLAG_SLI;
5134 ccw->cda = (__u32)(addr_t) message_buf;
5135
5136 cqr->buildclk = get_tod_clock();
5137 cqr->status = DASD_CQR_FILLED;
5138 rc = dasd_sleep_on_immediatly(cqr);
5139 if (rc == 0) {
5140 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5141 message_buf = (struct dasd_rssd_messages *)
5142 (prssdp + 1);
5143 memcpy(messages, message_buf,
5144 sizeof(struct dasd_rssd_messages));
5145 } else if (cqr->lpm) {
5146
5147
5148
5149
5150
5151 cqr->lpm = 0;
5152 goto retry;
5153 } else
5154 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5155 "Reading messages failed with rc=%d\n"
5156 , rc);
5157 dasd_sfree_request(cqr, cqr->memdev);
5158 return rc;
5159}
5160
5161static int dasd_eckd_query_host_access(struct dasd_device *device,
5162 struct dasd_psf_query_host_access *data)
5163{
5164 struct dasd_eckd_private *private;
5165 struct dasd_psf_query_host_access *host_access;
5166 struct dasd_psf_prssd_data *prssdp;
5167 struct dasd_ccw_req *cqr;
5168 struct ccw1 *ccw;
5169 int rc;
5170
5171 private = (struct dasd_eckd_private *) device->private;
5172
5173 if (!device->block && private->lcu->pav == HYPER_PAV)
5174 return -EOPNOTSUPP;
5175
5176
5177 if (!(private->features.feature[14] & 0x80))
5178 return -EOPNOTSUPP;
5179
5180 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 + 1 ,
5181 sizeof(struct dasd_psf_prssd_data) + 1,
5182 device);
5183 if (IS_ERR(cqr)) {
5184 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5185 "Could not allocate read message buffer request");
5186 return PTR_ERR(cqr);
5187 }
5188 host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
5189 if (!host_access) {
5190 dasd_sfree_request(cqr, device);
5191 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5192 "Could not allocate host_access buffer");
5193 return -ENOMEM;
5194 }
5195 cqr->startdev = device;
5196 cqr->memdev = device;
5197 cqr->block = NULL;
5198 cqr->retries = 256;
5199 cqr->expires = 10 * HZ;
5200
5201
5202 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5203 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5204 prssdp->order = PSF_ORDER_PRSSD;
5205 prssdp->suborder = PSF_SUBORDER_QHA;
5206
5207 prssdp->lss = private->ned->ID;
5208 prssdp->volume = private->ned->unit_addr;
5209
5210
5211 ccw = cqr->cpaddr;
5212 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5213 ccw->count = sizeof(struct dasd_psf_prssd_data);
5214 ccw->flags |= CCW_FLAG_CC;
5215 ccw->flags |= CCW_FLAG_SLI;
5216 ccw->cda = (__u32)(addr_t) prssdp;
5217
5218
5219 ccw++;
5220 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5221 ccw->count = sizeof(struct dasd_psf_query_host_access);
5222 ccw->flags |= CCW_FLAG_SLI;
5223 ccw->cda = (__u32)(addr_t) host_access;
5224
5225 cqr->buildclk = get_tod_clock();
5226 cqr->status = DASD_CQR_FILLED;
5227
5228 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
5229 rc = dasd_sleep_on_interruptible(cqr);
5230 if (rc == 0) {
5231 *data = *host_access;
5232 } else {
5233 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5234 "Reading host access data failed with rc=%d\n",
5235 rc);
5236 rc = -EOPNOTSUPP;
5237 }
5238
5239 dasd_sfree_request(cqr, cqr->memdev);
5240 kfree(host_access);
5241 return rc;
5242}
5243
5244
5245
5246static int dasd_eckd_host_access_count(struct dasd_device *device)
5247{
5248 struct dasd_psf_query_host_access *access;
5249 struct dasd_ckd_path_group_entry *entry;
5250 struct dasd_ckd_host_information *info;
5251 int count = 0;
5252 int rc, i;
5253
5254 access = kzalloc(sizeof(*access), GFP_NOIO);
5255 if (!access) {
5256 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5257 "Could not allocate access buffer");
5258 return -ENOMEM;
5259 }
5260 rc = dasd_eckd_query_host_access(device, access);
5261 if (rc) {
5262 kfree(access);
5263 return rc;
5264 }
5265
5266 info = (struct dasd_ckd_host_information *)
5267 access->host_access_information;
5268 for (i = 0; i < info->entry_count; i++) {
5269 entry = (struct dasd_ckd_path_group_entry *)
5270 (info->entry + i * info->entry_size);
5271 if (entry->status_flags & DASD_ECKD_PG_GROUPED)
5272 count++;
5273 }
5274
5275 kfree(access);
5276 return count;
5277}
5278
5279
5280
5281
5282static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
5283{
5284 struct dasd_psf_query_host_access *access;
5285 struct dasd_ckd_path_group_entry *entry;
5286 struct dasd_ckd_host_information *info;
5287 char sysplex[9] = "";
5288 int rc, i, j;
5289
5290 access = kzalloc(sizeof(*access), GFP_NOIO);
5291 if (!access) {
5292 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5293 "Could not allocate access buffer");
5294 return -ENOMEM;
5295 }
5296 rc = dasd_eckd_query_host_access(device, access);
5297 if (rc) {
5298 kfree(access);
5299 return rc;
5300 }
5301
5302 info = (struct dasd_ckd_host_information *)
5303 access->host_access_information;
5304 for (i = 0; i < info->entry_count; i++) {
5305 entry = (struct dasd_ckd_path_group_entry *)
5306 (info->entry + i * info->entry_size);
5307
5308 seq_puts(m, "pgid ");
5309 for (j = 0; j < 11; j++)
5310 seq_printf(m, "%02x", entry->pgid[j]);
5311 seq_putc(m, '\n');
5312
5313 seq_printf(m, "status_flags %02x\n", entry->status_flags);
5314
5315 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
5316 EBCASC(sysplex, sizeof(sysplex));
5317 seq_printf(m, "sysplex_name %8s\n", sysplex);
5318
5319 seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
5320
5321 seq_printf(m, "timestamp %lu\n", (unsigned long)
5322 entry->timestamp);
5323 }
5324 kfree(access);
5325
5326 return 0;
5327}
5328
5329
5330
5331
5332static int
5333dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
5334 __u32 message_id, __u8 lpum)
5335{
5336 struct dasd_psf_cuir_response *psf_cuir;
5337 int pos = pathmask_to_pos(lpum);
5338 struct dasd_ccw_req *cqr;
5339 struct ccw1 *ccw;
5340 int rc;
5341
5342 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 ,
5343 sizeof(struct dasd_psf_cuir_response),
5344 device);
5345
5346 if (IS_ERR(cqr)) {
5347 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5348 "Could not allocate PSF-CUIR request");
5349 return PTR_ERR(cqr);
5350 }
5351
5352 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
5353 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
5354 psf_cuir->cc = response;
5355 psf_cuir->chpid = device->path[pos].chpid;
5356 psf_cuir->message_id = message_id;
5357 psf_cuir->cssid = device->path[pos].cssid;
5358 psf_cuir->ssid = device->path[pos].ssid;
5359 ccw = cqr->cpaddr;
5360 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5361 ccw->cda = (__u32)(addr_t)psf_cuir;
5362 ccw->flags = CCW_FLAG_SLI;
5363 ccw->count = sizeof(struct dasd_psf_cuir_response);
5364
5365 cqr->startdev = device;
5366 cqr->memdev = device;
5367 cqr->block = NULL;
5368 cqr->retries = 256;
5369 cqr->expires = 10*HZ;
5370 cqr->buildclk = get_tod_clock();
5371 cqr->status = DASD_CQR_FILLED;
5372 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5373
5374 rc = dasd_sleep_on(cqr);
5375
5376 dasd_sfree_request(cqr, cqr->memdev);
5377 return rc;
5378}
5379
5380
5381
5382
5383
5384
5385static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
5386 __u8 lpum,
5387 struct dasd_cuir_message *cuir)
5388{
5389 struct dasd_conf_data *conf_data;
5390 int path, pos;
5391
5392 if (cuir->record_selector == 0)
5393 goto out;
5394 for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
5395 conf_data = device->path[pos].conf_data;
5396 if (conf_data->gneq.record_selector ==
5397 cuir->record_selector)
5398 return conf_data;
5399 }
5400out:
5401 return device->path[pathmask_to_pos(lpum)].conf_data;
5402}
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
5414 struct dasd_cuir_message *cuir)
5415{
5416 struct dasd_conf_data *ref_conf_data;
5417 unsigned long bitmask = 0, mask = 0;
5418 struct dasd_conf_data *conf_data;
5419 unsigned int pos, path;
5420 char *ref_gneq, *gneq;
5421 char *ref_ned, *ned;
5422 int tbcpm = 0;
5423
5424
5425
5426 if (!cuir->ned_map ||
5427 !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
5428 return lpum;
5429
5430
5431 ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
5432
5433 pos = 8 - ffs(cuir->ned_map);
5434 ref_ned = (char *)&ref_conf_data->neds[pos];
5435 ref_gneq = (char *)&ref_conf_data->gneq;
5436
5437 mask = cuir->neq_map[2];
5438 mask |= cuir->neq_map[1] << 8;
5439 mask |= cuir->neq_map[0] << 16;
5440
5441 for (path = 0; path < 8; path++) {
5442
5443 bitmask = mask;
5444 conf_data = device->path[path].conf_data;
5445 pos = 8 - ffs(cuir->ned_map);
5446 ned = (char *) &conf_data->neds[pos];
5447
5448 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
5449 continue;
5450 gneq = (char *)&conf_data->gneq;
5451
5452
5453
5454 while (bitmask) {
5455 pos = ffs(bitmask) - 1;
5456 if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
5457 != 0)
5458 break;
5459 clear_bit(pos, &bitmask);
5460 }
5461 if (bitmask)
5462 continue;
5463
5464
5465 tbcpm |= 0x80 >> path;
5466 }
5467 return tbcpm;
5468}
5469
5470static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
5471 unsigned long paths, int action)
5472{
5473 int pos;
5474
5475 while (paths) {
5476
5477 pos = 8 - ffs(paths);
5478
5479 if (action == CUIR_QUIESCE)
5480 pr_warn("Service on the storage server caused path %x.%02x to go offline",
5481 device->path[pos].cssid,
5482 device->path[pos].chpid);
5483 else if (action == CUIR_RESUME)
5484 pr_info("Path %x.%02x is back online after service on the storage server",
5485 device->path[pos].cssid,
5486 device->path[pos].chpid);
5487 clear_bit(7 - pos, &paths);
5488 }
5489}
5490
5491static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
5492 struct dasd_cuir_message *cuir)
5493{
5494 unsigned long tbcpm;
5495
5496 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
5497
5498 if (!(dasd_path_get_opm(device) & tbcpm))
5499 return 0;
5500 if (!(dasd_path_get_opm(device) & ~tbcpm)) {
5501
5502
5503 return -EINVAL;
5504 }
5505
5506 dasd_path_remove_opm(device, tbcpm);
5507 dasd_path_add_cuirpm(device, tbcpm);
5508 return tbcpm;
5509}
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
5520 struct dasd_cuir_message *cuir)
5521{
5522 struct alias_pav_group *pavgroup, *tempgroup;
5523 struct dasd_eckd_private *private;
5524 struct dasd_device *dev, *n;
5525 unsigned long paths = 0;
5526 unsigned long flags;
5527 int tbcpm;
5528
5529 private = (struct dasd_eckd_private *) device->private;
5530
5531 list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
5532 alias_list) {
5533 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
5534 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
5535 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
5536 if (tbcpm < 0)
5537 goto out_err;
5538 paths |= tbcpm;
5539 }
5540
5541 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
5542 alias_list) {
5543 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
5544 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
5545 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
5546 if (tbcpm < 0)
5547 goto out_err;
5548 paths |= tbcpm;
5549 }
5550
5551 list_for_each_entry_safe(pavgroup, tempgroup,
5552 &private->lcu->grouplist, group) {
5553 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
5554 alias_list) {
5555 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
5556 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
5557 spin_unlock_irqrestore(
5558 get_ccwdev_lock(dev->cdev), flags);
5559 if (tbcpm < 0)
5560 goto out_err;
5561 paths |= tbcpm;
5562 }
5563 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
5564 alias_list) {
5565 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
5566 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
5567 spin_unlock_irqrestore(
5568 get_ccwdev_lock(dev->cdev), flags);
5569 if (tbcpm < 0)
5570 goto out_err;
5571 paths |= tbcpm;
5572 }
5573 }
5574
5575 dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
5576 return 0;
5577out_err:
5578 return tbcpm;
5579}
5580
5581static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
5582 struct dasd_cuir_message *cuir)
5583{
5584 struct alias_pav_group *pavgroup, *tempgroup;
5585 struct dasd_eckd_private *private;
5586 struct dasd_device *dev, *n;
5587 unsigned long paths = 0;
5588 int tbcpm;
5589
5590 private = (struct dasd_eckd_private *) device->private;
5591
5592
5593
5594
5595 list_for_each_entry_safe(dev, n,
5596 &private->lcu->active_devices,
5597 alias_list) {
5598 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
5599 paths |= tbcpm;
5600 if (!(dasd_path_get_opm(dev) & tbcpm)) {
5601 dasd_path_add_tbvpm(dev, tbcpm);
5602 dasd_schedule_device_bh(dev);
5603 }
5604 }
5605 list_for_each_entry_safe(dev, n,
5606 &private->lcu->inactive_devices,
5607 alias_list) {
5608 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
5609 paths |= tbcpm;
5610 if (!(dasd_path_get_opm(dev) & tbcpm)) {
5611 dasd_path_add_tbvpm(dev, tbcpm);
5612 dasd_schedule_device_bh(dev);
5613 }
5614 }
5615
5616 list_for_each_entry_safe(pavgroup, tempgroup,
5617 &private->lcu->grouplist,
5618 group) {
5619 list_for_each_entry_safe(dev, n,
5620 &pavgroup->baselist,
5621 alias_list) {
5622 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
5623 paths |= tbcpm;
5624 if (!(dasd_path_get_opm(dev) & tbcpm)) {
5625 dasd_path_add_tbvpm(dev, tbcpm);
5626 dasd_schedule_device_bh(dev);
5627 }
5628 }
5629 list_for_each_entry_safe(dev, n,
5630 &pavgroup->aliaslist,
5631 alias_list) {
5632 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
5633 paths |= tbcpm;
5634 if (!(dasd_path_get_opm(dev) & tbcpm)) {
5635 dasd_path_add_tbvpm(dev, tbcpm);
5636 dasd_schedule_device_bh(dev);
5637 }
5638 }
5639 }
5640
5641 dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
5642 return 0;
5643}
5644
5645static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
5646 __u8 lpum)
5647{
5648 struct dasd_cuir_message *cuir = messages;
5649 int response;
5650
5651 DBF_DEV_EVENT(DBF_WARNING, device,
5652 "CUIR request: %016llx %016llx %016llx %08x",
5653 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
5654 ((u32 *)cuir)[3]);
5655
5656 if (cuir->code == CUIR_QUIESCE) {
5657
5658 if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
5659 response = PSF_CUIR_LAST_PATH;
5660 else
5661 response = PSF_CUIR_COMPLETED;
5662 } else if (cuir->code == CUIR_RESUME) {
5663
5664 dasd_eckd_cuir_resume(device, lpum, cuir);
5665 response = PSF_CUIR_COMPLETED;
5666 } else
5667 response = PSF_CUIR_NOT_SUPPORTED;
5668
5669 dasd_eckd_psf_cuir_response(device, response,
5670 cuir->message_id, lpum);
5671 DBF_DEV_EVENT(DBF_WARNING, device,
5672 "CUIR response: %d on message ID %08x", response,
5673 cuir->message_id);
5674
5675 device->discipline->check_attention(device, lpum);
5676}
5677
5678static void dasd_eckd_check_attention_work(struct work_struct *work)
5679{
5680 struct check_attention_work_data *data;
5681 struct dasd_rssd_messages *messages;
5682 struct dasd_device *device;
5683 int rc;
5684
5685 data = container_of(work, struct check_attention_work_data, worker);
5686 device = data->device;
5687 messages = kzalloc(sizeof(*messages), GFP_KERNEL);
5688 if (!messages) {
5689 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5690 "Could not allocate attention message buffer");
5691 goto out;
5692 }
5693 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
5694 if (rc)
5695 goto out;
5696 if (messages->length == ATTENTION_LENGTH_CUIR &&
5697 messages->format == ATTENTION_FORMAT_CUIR)
5698 dasd_eckd_handle_cuir(device, messages, data->lpum);
5699out:
5700 dasd_put_device(device);
5701 kfree(messages);
5702 kfree(data);
5703}
5704
5705static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
5706{
5707 struct check_attention_work_data *data;
5708
5709 data = kzalloc(sizeof(*data), GFP_ATOMIC);
5710 if (!data)
5711 return -ENOMEM;
5712 INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
5713 dasd_get_device(device);
5714 data->device = device;
5715 data->lpum = lpum;
5716 schedule_work(&data->worker);
5717 return 0;
5718}
5719
5720static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
5721{
5722 if (~lpum & dasd_path_get_opm(device)) {
5723 dasd_path_add_nohpfpm(device, lpum);
5724 dasd_path_remove_opm(device, lpum);
5725 dev_err(&device->cdev->dev,
5726 "Channel path %02X lost HPF functionality and is disabled\n",
5727 lpum);
5728 return 1;
5729 }
5730 return 0;
5731}
5732
5733static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
5734{
5735 struct dasd_eckd_private *private;
5736
5737 private = (struct dasd_eckd_private *) device->private;
5738 dev_err(&device->cdev->dev,
5739 "High Performance FICON disabled\n");
5740 private->fcx_max_data = 0;
5741}
5742
5743static int dasd_eckd_hpf_enabled(struct dasd_device *device)
5744{
5745 struct dasd_eckd_private *private;
5746
5747 private = (struct dasd_eckd_private *) device->private;
5748 return private->fcx_max_data ? 1 : 0;
5749}
5750
5751static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
5752 struct irb *irb)
5753{
5754 struct dasd_eckd_private *private;
5755
5756 private = (struct dasd_eckd_private *) device->private;
5757 if (!private->fcx_max_data) {
5758
5759 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5760 "Trying to disable HPF for a non HPF device");
5761 return;
5762 }
5763 if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
5764 dasd_eckd_disable_hpf_device(device);
5765 } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
5766 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
5767 return;
5768 dasd_eckd_disable_hpf_device(device);
5769 dasd_path_set_tbvpm(device,
5770 dasd_path_get_hpfpm(device));
5771 }
5772
5773
5774
5775
5776 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
5777 dasd_schedule_requeue(device);
5778}
5779
5780static struct ccw_driver dasd_eckd_driver = {
5781 .driver = {
5782 .name = "dasd-eckd",
5783 .owner = THIS_MODULE,
5784 },
5785 .ids = dasd_eckd_ids,
5786 .probe = dasd_eckd_probe,
5787 .remove = dasd_generic_remove,
5788 .set_offline = dasd_generic_set_offline,
5789 .set_online = dasd_eckd_set_online,
5790 .notify = dasd_generic_notify,
5791 .path_event = dasd_generic_path_event,
5792 .shutdown = dasd_generic_shutdown,
5793 .freeze = dasd_generic_pm_freeze,
5794 .thaw = dasd_generic_restore_device,
5795 .restore = dasd_generic_restore_device,
5796 .uc_handler = dasd_generic_uc_handler,
5797 .int_class = IRQIO_DAS,
5798};
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812
5813static struct dasd_discipline dasd_eckd_discipline = {
5814 .owner = THIS_MODULE,
5815 .name = "ECKD",
5816 .ebcname = "ECKD",
5817 .max_blocks = 190,
5818 .check_device = dasd_eckd_check_characteristics,
5819 .uncheck_device = dasd_eckd_uncheck_device,
5820 .do_analysis = dasd_eckd_do_analysis,
5821 .verify_path = dasd_eckd_verify_path,
5822 .basic_to_ready = dasd_eckd_basic_to_ready,
5823 .online_to_ready = dasd_eckd_online_to_ready,
5824 .basic_to_known = dasd_eckd_basic_to_known,
5825 .fill_geometry = dasd_eckd_fill_geometry,
5826 .start_IO = dasd_start_IO,
5827 .term_IO = dasd_term_IO,
5828 .handle_terminated_request = dasd_eckd_handle_terminated_request,
5829 .format_device = dasd_eckd_format_device,
5830 .check_device_format = dasd_eckd_check_device_format,
5831 .erp_action = dasd_eckd_erp_action,
5832 .erp_postaction = dasd_eckd_erp_postaction,
5833 .check_for_device_change = dasd_eckd_check_for_device_change,
5834 .build_cp = dasd_eckd_build_alias_cp,
5835 .free_cp = dasd_eckd_free_alias_cp,
5836 .dump_sense = dasd_eckd_dump_sense,
5837 .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
5838 .fill_info = dasd_eckd_fill_info,
5839 .ioctl = dasd_eckd_ioctl,
5840 .freeze = dasd_eckd_pm_freeze,
5841 .restore = dasd_eckd_restore_device,
5842 .reload = dasd_eckd_reload_device,
5843 .get_uid = dasd_eckd_get_uid,
5844 .kick_validate = dasd_eckd_kick_validate_server,
5845 .check_attention = dasd_eckd_check_attention,
5846 .host_access_count = dasd_eckd_host_access_count,
5847 .hosts_print = dasd_hosts_print,
5848 .handle_hpf_error = dasd_eckd_handle_hpf_error,
5849 .disable_hpf = dasd_eckd_disable_hpf_device,
5850 .hpf_enabled = dasd_eckd_hpf_enabled,
5851 .reset_path = dasd_eckd_reset_path,
5852};
5853
5854static int __init
5855dasd_eckd_init(void)
5856{
5857 int ret;
5858
5859 ASCEBC(dasd_eckd_discipline.ebcname, 4);
5860 dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
5861 GFP_KERNEL | GFP_DMA);
5862 if (!dasd_reserve_req)
5863 return -ENOMEM;
5864 path_verification_worker = kmalloc(sizeof(*path_verification_worker),
5865 GFP_KERNEL | GFP_DMA);
5866 if (!path_verification_worker) {
5867 kfree(dasd_reserve_req);
5868 return -ENOMEM;
5869 }
5870 ret = ccw_driver_register(&dasd_eckd_driver);
5871 if (!ret)
5872 wait_for_device_probe();
5873 else {
5874 kfree(path_verification_worker);
5875 kfree(dasd_reserve_req);
5876 }
5877 return ret;
5878}
5879
5880static void __exit
5881dasd_eckd_cleanup(void)
5882{
5883 ccw_driver_unregister(&dasd_eckd_driver);
5884 kfree(path_verification_worker);
5885 kfree(dasd_reserve_req);
5886}
5887
5888module_init(dasd_eckd_init);
5889module_exit(dasd_eckd_cleanup);
5890