1
2
3
4
5
6
7
8#define KMSG_COMPONENT "dasd-eckd"
9
10#include <linux/list.h>
11#include <linux/slab.h>
12#include <asm/ebcdic.h>
13#include "dasd_int.h"
14#include "dasd_eckd.h"
15
16#ifdef PRINTK_HEADER
17#undef PRINTK_HEADER
18#endif
19#define PRINTK_HEADER "dasd(eckd):"
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43static void summary_unit_check_handling_work(struct work_struct *);
44static void lcu_update_work(struct work_struct *);
45static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
46
47static struct alias_root aliastree = {
48 .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
49 .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
50};
51
52static struct alias_server *_find_server(struct dasd_uid *uid)
53{
54 struct alias_server *pos;
55 list_for_each_entry(pos, &aliastree.serverlist, server) {
56 if (!strncmp(pos->uid.vendor, uid->vendor,
57 sizeof(uid->vendor))
58 && !strncmp(pos->uid.serial, uid->serial,
59 sizeof(uid->serial)))
60 return pos;
61 };
62 return NULL;
63}
64
65static struct alias_lcu *_find_lcu(struct alias_server *server,
66 struct dasd_uid *uid)
67{
68 struct alias_lcu *pos;
69 list_for_each_entry(pos, &server->lculist, lcu) {
70 if (pos->uid.ssid == uid->ssid)
71 return pos;
72 };
73 return NULL;
74}
75
76static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
77 struct dasd_uid *uid)
78{
79 struct alias_pav_group *pos;
80 __u8 search_unit_addr;
81
82
83 if (lcu->pav == HYPER_PAV) {
84 if (list_empty(&lcu->grouplist))
85 return NULL;
86 else
87 return list_first_entry(&lcu->grouplist,
88 struct alias_pav_group, group);
89 }
90
91
92 if (uid->type == UA_BASE_DEVICE)
93 search_unit_addr = uid->real_unit_addr;
94 else
95 search_unit_addr = uid->base_unit_addr;
96 list_for_each_entry(pos, &lcu->grouplist, group) {
97 if (pos->uid.base_unit_addr == search_unit_addr &&
98 !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
99 return pos;
100 };
101 return NULL;
102}
103
104static struct alias_server *_allocate_server(struct dasd_uid *uid)
105{
106 struct alias_server *server;
107
108 server = kzalloc(sizeof(*server), GFP_KERNEL);
109 if (!server)
110 return ERR_PTR(-ENOMEM);
111 memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
112 memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
113 INIT_LIST_HEAD(&server->server);
114 INIT_LIST_HEAD(&server->lculist);
115 return server;
116}
117
118static void _free_server(struct alias_server *server)
119{
120 kfree(server);
121}
122
123static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
124{
125 struct alias_lcu *lcu;
126
127 lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
128 if (!lcu)
129 return ERR_PTR(-ENOMEM);
130 lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
131 if (!lcu->uac)
132 goto out_err1;
133 lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
134 if (!lcu->rsu_cqr)
135 goto out_err2;
136 lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
137 GFP_KERNEL | GFP_DMA);
138 if (!lcu->rsu_cqr->cpaddr)
139 goto out_err3;
140 lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
141 if (!lcu->rsu_cqr->data)
142 goto out_err4;
143
144 memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
145 memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
146 lcu->uid.ssid = uid->ssid;
147 lcu->pav = NO_PAV;
148 lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
149 INIT_LIST_HEAD(&lcu->lcu);
150 INIT_LIST_HEAD(&lcu->inactive_devices);
151 INIT_LIST_HEAD(&lcu->active_devices);
152 INIT_LIST_HEAD(&lcu->grouplist);
153 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
154 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
155 spin_lock_init(&lcu->lock);
156 init_completion(&lcu->lcu_setup);
157 return lcu;
158
159out_err4:
160 kfree(lcu->rsu_cqr->cpaddr);
161out_err3:
162 kfree(lcu->rsu_cqr);
163out_err2:
164 kfree(lcu->uac);
165out_err1:
166 kfree(lcu);
167 return ERR_PTR(-ENOMEM);
168}
169
170static void _free_lcu(struct alias_lcu *lcu)
171{
172 kfree(lcu->rsu_cqr->data);
173 kfree(lcu->rsu_cqr->cpaddr);
174 kfree(lcu->rsu_cqr);
175 kfree(lcu->uac);
176 kfree(lcu);
177}
178
179
180
181
182
183
184
185
186int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
187{
188 struct dasd_eckd_private *private;
189 unsigned long flags;
190 struct alias_server *server, *newserver;
191 struct alias_lcu *lcu, *newlcu;
192 struct dasd_uid uid;
193
194 private = (struct dasd_eckd_private *) device->private;
195
196 device->discipline->get_uid(device, &uid);
197 spin_lock_irqsave(&aliastree.lock, flags);
198 server = _find_server(&uid);
199 if (!server) {
200 spin_unlock_irqrestore(&aliastree.lock, flags);
201 newserver = _allocate_server(&uid);
202 if (IS_ERR(newserver))
203 return PTR_ERR(newserver);
204 spin_lock_irqsave(&aliastree.lock, flags);
205 server = _find_server(&uid);
206 if (!server) {
207 list_add(&newserver->server, &aliastree.serverlist);
208 server = newserver;
209 } else {
210
211 _free_server(newserver);
212 }
213 }
214
215 lcu = _find_lcu(server, &uid);
216 if (!lcu) {
217 spin_unlock_irqrestore(&aliastree.lock, flags);
218 newlcu = _allocate_lcu(&uid);
219 if (IS_ERR(newlcu))
220 return PTR_ERR(newlcu);
221 spin_lock_irqsave(&aliastree.lock, flags);
222 lcu = _find_lcu(server, &uid);
223 if (!lcu) {
224 list_add(&newlcu->lcu, &server->lculist);
225 lcu = newlcu;
226 } else {
227
228 _free_lcu(newlcu);
229 }
230 }
231 spin_lock(&lcu->lock);
232 list_add(&device->alias_list, &lcu->inactive_devices);
233 private->lcu = lcu;
234 spin_unlock(&lcu->lock);
235 spin_unlock_irqrestore(&aliastree.lock, flags);
236
237 return 0;
238}
239
240
241
242
243
244
245void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
246{
247 struct dasd_eckd_private *private;
248 unsigned long flags;
249 struct alias_lcu *lcu;
250 struct alias_server *server;
251 int was_pending;
252 struct dasd_uid uid;
253
254 private = (struct dasd_eckd_private *) device->private;
255 lcu = private->lcu;
256
257 if (!lcu)
258 return;
259 device->discipline->get_uid(device, &uid);
260 spin_lock_irqsave(&lcu->lock, flags);
261 list_del_init(&device->alias_list);
262
263 if (device == lcu->suc_data.device) {
264 spin_unlock_irqrestore(&lcu->lock, flags);
265 cancel_work_sync(&lcu->suc_data.worker);
266 spin_lock_irqsave(&lcu->lock, flags);
267 if (device == lcu->suc_data.device)
268 lcu->suc_data.device = NULL;
269 }
270 was_pending = 0;
271 if (device == lcu->ruac_data.device) {
272 spin_unlock_irqrestore(&lcu->lock, flags);
273 was_pending = 1;
274 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
275 spin_lock_irqsave(&lcu->lock, flags);
276 if (device == lcu->ruac_data.device)
277 lcu->ruac_data.device = NULL;
278 }
279 private->lcu = NULL;
280 spin_unlock_irqrestore(&lcu->lock, flags);
281
282 spin_lock_irqsave(&aliastree.lock, flags);
283 spin_lock(&lcu->lock);
284 if (list_empty(&lcu->grouplist) &&
285 list_empty(&lcu->active_devices) &&
286 list_empty(&lcu->inactive_devices)) {
287 list_del(&lcu->lcu);
288 spin_unlock(&lcu->lock);
289 _free_lcu(lcu);
290 lcu = NULL;
291 } else {
292 if (was_pending)
293 _schedule_lcu_update(lcu, NULL);
294 spin_unlock(&lcu->lock);
295 }
296 server = _find_server(&uid);
297 if (server && list_empty(&server->lculist)) {
298 list_del(&server->server);
299 _free_server(server);
300 }
301 spin_unlock_irqrestore(&aliastree.lock, flags);
302}
303
304
305
306
307
308
309
310static int _add_device_to_lcu(struct alias_lcu *lcu,
311 struct dasd_device *device,
312 struct dasd_device *pos)
313{
314
315 struct dasd_eckd_private *private;
316 struct alias_pav_group *group;
317 struct dasd_uid uid;
318
319 private = (struct dasd_eckd_private *) device->private;
320
321 private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
322 private->uid.base_unit_addr =
323 lcu->uac->unit[private->uid.real_unit_addr].base_ua;
324 uid = private->uid;
325
326
327 if (lcu->pav == NO_PAV) {
328 list_move(&device->alias_list, &lcu->active_devices);
329 return 0;
330 }
331
332 group = _find_group(lcu, &uid);
333 if (!group) {
334 group = kzalloc(sizeof(*group), GFP_ATOMIC);
335 if (!group)
336 return -ENOMEM;
337 memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
338 memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
339 group->uid.ssid = uid.ssid;
340 if (uid.type == UA_BASE_DEVICE)
341 group->uid.base_unit_addr = uid.real_unit_addr;
342 else
343 group->uid.base_unit_addr = uid.base_unit_addr;
344 memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
345 INIT_LIST_HEAD(&group->group);
346 INIT_LIST_HEAD(&group->baselist);
347 INIT_LIST_HEAD(&group->aliaslist);
348 list_add(&group->group, &lcu->grouplist);
349 }
350 if (uid.type == UA_BASE_DEVICE)
351 list_move(&device->alias_list, &group->baselist);
352 else
353 list_move(&device->alias_list, &group->aliaslist);
354 private->pavgroup = group;
355 return 0;
356};
357
358static void _remove_device_from_lcu(struct alias_lcu *lcu,
359 struct dasd_device *device)
360{
361 struct dasd_eckd_private *private;
362 struct alias_pav_group *group;
363
364 private = (struct dasd_eckd_private *) device->private;
365 list_move(&device->alias_list, &lcu->inactive_devices);
366 group = private->pavgroup;
367 if (!group)
368 return;
369 private->pavgroup = NULL;
370 if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
371 list_del(&group->group);
372 kfree(group);
373 return;
374 }
375 if (group->next == device)
376 group->next = NULL;
377};
378
379static int
380suborder_not_supported(struct dasd_ccw_req *cqr)
381{
382 char *sense;
383 char reason;
384 char msg_format;
385 char msg_no;
386
387 sense = dasd_get_sense(&cqr->irb);
388 if (!sense)
389 return 0;
390
391 reason = sense[0];
392 msg_format = (sense[7] & 0xF0);
393 msg_no = (sense[7] & 0x0F);
394
395
396 if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
397 return 1;
398
399 return 0;
400}
401
402
403
404
405
406static struct dasd_device *_trylock_all_devices_on_lcu(struct alias_lcu *lcu,
407 struct dasd_device *pos)
408
409{
410 struct alias_pav_group *pavgroup;
411 struct dasd_device *device;
412
413 list_for_each_entry(device, &lcu->active_devices, alias_list) {
414 if (device == pos)
415 continue;
416 if (!spin_trylock(get_ccwdev_lock(device->cdev)))
417 return device;
418 }
419 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
420 if (device == pos)
421 continue;
422 if (!spin_trylock(get_ccwdev_lock(device->cdev)))
423 return device;
424 }
425 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
426 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
427 if (device == pos)
428 continue;
429 if (!spin_trylock(get_ccwdev_lock(device->cdev)))
430 return device;
431 }
432 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
433 if (device == pos)
434 continue;
435 if (!spin_trylock(get_ccwdev_lock(device->cdev)))
436 return device;
437 }
438 }
439 return NULL;
440}
441
442
443
444
445
446static void _unlock_all_devices_on_lcu(struct alias_lcu *lcu,
447 struct dasd_device *pos,
448 struct dasd_device *enddev)
449
450{
451 struct alias_pav_group *pavgroup;
452 struct dasd_device *device;
453
454 list_for_each_entry(device, &lcu->active_devices, alias_list) {
455 if (device == pos)
456 continue;
457 if (device == enddev)
458 return;
459 spin_unlock(get_ccwdev_lock(device->cdev));
460 }
461 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
462 if (device == pos)
463 continue;
464 if (device == enddev)
465 return;
466 spin_unlock(get_ccwdev_lock(device->cdev));
467 }
468 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
469 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
470 if (device == pos)
471 continue;
472 if (device == enddev)
473 return;
474 spin_unlock(get_ccwdev_lock(device->cdev));
475 }
476 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
477 if (device == pos)
478 continue;
479 if (device == enddev)
480 return;
481 spin_unlock(get_ccwdev_lock(device->cdev));
482 }
483 }
484}
485
486
487
488
489
490
491
492
493static void _trylock_and_lock_lcu_irqsave(struct alias_lcu *lcu,
494 struct dasd_device *pos,
495 unsigned long *flags)
496{
497 struct dasd_device *failed;
498
499 do {
500 spin_lock_irqsave(&lcu->lock, *flags);
501 failed = _trylock_all_devices_on_lcu(lcu, pos);
502 if (failed) {
503 _unlock_all_devices_on_lcu(lcu, pos, failed);
504 spin_unlock_irqrestore(&lcu->lock, *flags);
505 cpu_relax();
506 }
507 } while (failed);
508}
509
510static void _trylock_and_lock_lcu(struct alias_lcu *lcu,
511 struct dasd_device *pos)
512{
513 struct dasd_device *failed;
514
515 do {
516 spin_lock(&lcu->lock);
517 failed = _trylock_all_devices_on_lcu(lcu, pos);
518 if (failed) {
519 _unlock_all_devices_on_lcu(lcu, pos, failed);
520 spin_unlock(&lcu->lock);
521 cpu_relax();
522 }
523 } while (failed);
524}
525
526static int read_unit_address_configuration(struct dasd_device *device,
527 struct alias_lcu *lcu)
528{
529 struct dasd_psf_prssd_data *prssdp;
530 struct dasd_ccw_req *cqr;
531 struct ccw1 *ccw;
532 int rc;
533 unsigned long flags;
534
535 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 + 1 ,
536 (sizeof(struct dasd_psf_prssd_data)),
537 device);
538 if (IS_ERR(cqr))
539 return PTR_ERR(cqr);
540 cqr->startdev = device;
541 cqr->memdev = device;
542 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
543 cqr->retries = 10;
544 cqr->expires = 20 * HZ;
545
546
547 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
548 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
549 prssdp->order = PSF_ORDER_PRSSD;
550 prssdp->suborder = 0x0e;
551
552
553 ccw = cqr->cpaddr;
554 ccw->cmd_code = DASD_ECKD_CCW_PSF;
555 ccw->count = sizeof(struct dasd_psf_prssd_data);
556 ccw->flags |= CCW_FLAG_CC;
557 ccw->cda = (__u32)(addr_t) prssdp;
558
559
560 memset(lcu->uac, 0, sizeof(*(lcu->uac)));
561
562 ccw++;
563 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
564 ccw->count = sizeof(*(lcu->uac));
565 ccw->cda = (__u32)(addr_t) lcu->uac;
566
567 cqr->buildclk = get_tod_clock();
568 cqr->status = DASD_CQR_FILLED;
569
570
571 spin_lock_irqsave(&lcu->lock, flags);
572 lcu->flags &= ~NEED_UAC_UPDATE;
573 spin_unlock_irqrestore(&lcu->lock, flags);
574
575 do {
576 rc = dasd_sleep_on(cqr);
577 if (rc && suborder_not_supported(cqr))
578 return -EOPNOTSUPP;
579 } while (rc && (cqr->retries > 0));
580 if (rc) {
581 spin_lock_irqsave(&lcu->lock, flags);
582 lcu->flags |= NEED_UAC_UPDATE;
583 spin_unlock_irqrestore(&lcu->lock, flags);
584 }
585 dasd_kfree_request(cqr, cqr->memdev);
586 return rc;
587}
588
589static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
590{
591 unsigned long flags;
592 struct alias_pav_group *pavgroup, *tempgroup;
593 struct dasd_device *device, *tempdev;
594 int i, rc;
595 struct dasd_eckd_private *private;
596
597 spin_lock_irqsave(&lcu->lock, flags);
598 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
599 list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
600 alias_list) {
601 list_move(&device->alias_list, &lcu->active_devices);
602 private = (struct dasd_eckd_private *) device->private;
603 private->pavgroup = NULL;
604 }
605 list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
606 alias_list) {
607 list_move(&device->alias_list, &lcu->active_devices);
608 private = (struct dasd_eckd_private *) device->private;
609 private->pavgroup = NULL;
610 }
611 list_del(&pavgroup->group);
612 kfree(pavgroup);
613 }
614 spin_unlock_irqrestore(&lcu->lock, flags);
615
616 rc = read_unit_address_configuration(refdev, lcu);
617 if (rc)
618 return rc;
619
620 _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags);
621 lcu->pav = NO_PAV;
622 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
623 switch (lcu->uac->unit[i].ua_type) {
624 case UA_BASE_PAV_ALIAS:
625 lcu->pav = BASE_PAV;
626 break;
627 case UA_HYPER_PAV_ALIAS:
628 lcu->pav = HYPER_PAV;
629 break;
630 }
631 if (lcu->pav != NO_PAV)
632 break;
633 }
634
635 list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
636 alias_list) {
637 _add_device_to_lcu(lcu, device, refdev);
638 }
639 _unlock_all_devices_on_lcu(lcu, NULL, NULL);
640 spin_unlock_irqrestore(&lcu->lock, flags);
641 return 0;
642}
643
644static void lcu_update_work(struct work_struct *work)
645{
646 struct alias_lcu *lcu;
647 struct read_uac_work_data *ruac_data;
648 struct dasd_device *device;
649 unsigned long flags;
650 int rc;
651
652 ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
653 lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
654 device = ruac_data->device;
655 rc = _lcu_update(device, lcu);
656
657
658
659
660
661 spin_lock_irqsave(&lcu->lock, flags);
662 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
663 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
664 " alias data in lcu (rc = %d), retry later", rc);
665 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
666 } else {
667 lcu->ruac_data.device = NULL;
668 lcu->flags &= ~UPDATE_PENDING;
669 }
670 spin_unlock_irqrestore(&lcu->lock, flags);
671}
672
673static int _schedule_lcu_update(struct alias_lcu *lcu,
674 struct dasd_device *device)
675{
676 struct dasd_device *usedev = NULL;
677 struct alias_pav_group *group;
678
679 lcu->flags |= NEED_UAC_UPDATE;
680 if (lcu->ruac_data.device) {
681
682 return 0;
683 }
684 if (device && !list_empty(&device->alias_list))
685 usedev = device;
686
687 if (!usedev && !list_empty(&lcu->grouplist)) {
688 group = list_first_entry(&lcu->grouplist,
689 struct alias_pav_group, group);
690 if (!list_empty(&group->baselist))
691 usedev = list_first_entry(&group->baselist,
692 struct dasd_device,
693 alias_list);
694 else if (!list_empty(&group->aliaslist))
695 usedev = list_first_entry(&group->aliaslist,
696 struct dasd_device,
697 alias_list);
698 }
699 if (!usedev && !list_empty(&lcu->active_devices)) {
700 usedev = list_first_entry(&lcu->active_devices,
701 struct dasd_device, alias_list);
702 }
703
704
705
706
707 if (!usedev)
708 return -EINVAL;
709 lcu->ruac_data.device = usedev;
710 schedule_delayed_work(&lcu->ruac_data.dwork, 0);
711 return 0;
712}
713
714int dasd_alias_add_device(struct dasd_device *device)
715{
716 struct dasd_eckd_private *private;
717 struct alias_lcu *lcu;
718 unsigned long flags;
719 int rc;
720
721 private = (struct dasd_eckd_private *) device->private;
722 lcu = private->lcu;
723 rc = 0;
724 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
725 spin_lock(&lcu->lock);
726 if (!(lcu->flags & UPDATE_PENDING)) {
727 rc = _add_device_to_lcu(lcu, device, device);
728 if (rc)
729 lcu->flags |= UPDATE_PENDING;
730 }
731 if (lcu->flags & UPDATE_PENDING) {
732 list_move(&device->alias_list, &lcu->active_devices);
733 _schedule_lcu_update(lcu, device);
734 }
735 spin_unlock(&lcu->lock);
736 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
737 return rc;
738}
739
740int dasd_alias_update_add_device(struct dasd_device *device)
741{
742 struct dasd_eckd_private *private;
743 private = (struct dasd_eckd_private *) device->private;
744 private->lcu->flags |= UPDATE_PENDING;
745 return dasd_alias_add_device(device);
746}
747
748int dasd_alias_remove_device(struct dasd_device *device)
749{
750 struct dasd_eckd_private *private;
751 struct alias_lcu *lcu;
752 unsigned long flags;
753
754 private = (struct dasd_eckd_private *) device->private;
755 lcu = private->lcu;
756
757 if (!lcu)
758 return 0;
759 spin_lock_irqsave(&lcu->lock, flags);
760 _remove_device_from_lcu(lcu, device);
761 spin_unlock_irqrestore(&lcu->lock, flags);
762 return 0;
763}
764
765struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
766{
767
768 struct dasd_device *alias_device;
769 struct alias_pav_group *group;
770 struct alias_lcu *lcu;
771 struct dasd_eckd_private *private, *alias_priv;
772 unsigned long flags;
773
774 private = (struct dasd_eckd_private *) base_device->private;
775 group = private->pavgroup;
776 lcu = private->lcu;
777 if (!group || !lcu)
778 return NULL;
779 if (lcu->pav == NO_PAV ||
780 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
781 return NULL;
782 if (unlikely(!(private->features.feature[8] & 0x01))) {
783
784
785
786
787
788 DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
789 "Prefix not enabled with PAV enabled\n");
790 return NULL;
791 }
792
793 spin_lock_irqsave(&lcu->lock, flags);
794 alias_device = group->next;
795 if (!alias_device) {
796 if (list_empty(&group->aliaslist)) {
797 spin_unlock_irqrestore(&lcu->lock, flags);
798 return NULL;
799 } else {
800 alias_device = list_first_entry(&group->aliaslist,
801 struct dasd_device,
802 alias_list);
803 }
804 }
805 if (list_is_last(&alias_device->alias_list, &group->aliaslist))
806 group->next = list_first_entry(&group->aliaslist,
807 struct dasd_device, alias_list);
808 else
809 group->next = list_first_entry(&alias_device->alias_list,
810 struct dasd_device, alias_list);
811 spin_unlock_irqrestore(&lcu->lock, flags);
812 alias_priv = (struct dasd_eckd_private *) alias_device->private;
813 if ((alias_priv->count < private->count) && !alias_device->stopped &&
814 !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
815 return alias_device;
816 else
817 return NULL;
818}
819
820
821
822
823
824static int reset_summary_unit_check(struct alias_lcu *lcu,
825 struct dasd_device *device,
826 char reason)
827{
828 struct dasd_ccw_req *cqr;
829 int rc = 0;
830 struct ccw1 *ccw;
831
832 cqr = lcu->rsu_cqr;
833 strncpy((char *) &cqr->magic, "ECKD", 4);
834 ASCEBC((char *) &cqr->magic, 4);
835 ccw = cqr->cpaddr;
836 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
837 ccw->flags = CCW_FLAG_SLI;
838 ccw->count = 16;
839 ccw->cda = (__u32)(addr_t) cqr->data;
840 ((char *)cqr->data)[0] = reason;
841
842 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
843 cqr->retries = 255;
844 cqr->startdev = device;
845 cqr->memdev = device;
846 cqr->block = NULL;
847 cqr->expires = 5 * HZ;
848 cqr->buildclk = get_tod_clock();
849 cqr->status = DASD_CQR_FILLED;
850
851 rc = dasd_sleep_on_immediatly(cqr);
852 return rc;
853}
854
855static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
856{
857 struct alias_pav_group *pavgroup;
858 struct dasd_device *device;
859 struct dasd_eckd_private *private;
860
861
862 list_for_each_entry(device, &lcu->active_devices, alias_list) {
863 private = (struct dasd_eckd_private *) device->private;
864 if (private->uid.type != UA_BASE_DEVICE)
865 continue;
866 dasd_schedule_block_bh(device->block);
867 dasd_schedule_device_bh(device);
868 }
869 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
870 private = (struct dasd_eckd_private *) device->private;
871 if (private->uid.type != UA_BASE_DEVICE)
872 continue;
873 dasd_schedule_block_bh(device->block);
874 dasd_schedule_device_bh(device);
875 }
876 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
877 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
878 dasd_schedule_block_bh(device->block);
879 dasd_schedule_device_bh(device);
880 }
881 }
882}
883
884static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
885{
886 struct alias_pav_group *pavgroup;
887 struct dasd_device *device, *temp;
888 struct dasd_eckd_private *private;
889 int rc;
890 unsigned long flags;
891 LIST_HEAD(active);
892
893
894
895
896
897
898
899
900
901
902
903
904 spin_lock_irqsave(&lcu->lock, flags);
905 list_for_each_entry_safe(device, temp, &lcu->active_devices,
906 alias_list) {
907 private = (struct dasd_eckd_private *) device->private;
908 if (private->uid.type == UA_BASE_DEVICE)
909 continue;
910 list_move(&device->alias_list, &active);
911 }
912
913 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
914 list_splice_init(&pavgroup->aliaslist, &active);
915 }
916 while (!list_empty(&active)) {
917 device = list_first_entry(&active, struct dasd_device,
918 alias_list);
919 spin_unlock_irqrestore(&lcu->lock, flags);
920 rc = dasd_flush_device_queue(device);
921 spin_lock_irqsave(&lcu->lock, flags);
922
923
924
925
926 if (device == list_first_entry(&active,
927 struct dasd_device, alias_list)) {
928 list_move(&device->alias_list, &lcu->active_devices);
929 private = (struct dasd_eckd_private *) device->private;
930 private->pavgroup = NULL;
931 }
932 }
933 spin_unlock_irqrestore(&lcu->lock, flags);
934}
935
936static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
937{
938 struct alias_pav_group *pavgroup;
939 struct dasd_device *device;
940
941 list_for_each_entry(device, &lcu->active_devices, alias_list)
942 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
943 list_for_each_entry(device, &lcu->inactive_devices, alias_list)
944 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
945 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
946 list_for_each_entry(device, &pavgroup->baselist, alias_list)
947 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
948 list_for_each_entry(device, &pavgroup->aliaslist, alias_list)
949 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
950 }
951}
952
953static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
954{
955 struct alias_pav_group *pavgroup;
956 struct dasd_device *device;
957
958 list_for_each_entry(device, &lcu->active_devices, alias_list)
959 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
960 list_for_each_entry(device, &lcu->inactive_devices, alias_list)
961 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
962 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
963 list_for_each_entry(device, &pavgroup->baselist, alias_list)
964 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
965 list_for_each_entry(device, &pavgroup->aliaslist, alias_list)
966 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
967 }
968}
969
970static void summary_unit_check_handling_work(struct work_struct *work)
971{
972 struct alias_lcu *lcu;
973 struct summary_unit_check_work_data *suc_data;
974 unsigned long flags;
975 struct dasd_device *device;
976
977 suc_data = container_of(work, struct summary_unit_check_work_data,
978 worker);
979 lcu = container_of(suc_data, struct alias_lcu, suc_data);
980 device = suc_data->device;
981
982
983 flush_all_alias_devices_on_lcu(lcu);
984
985
986 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
987 dasd_device_remove_stop_bits(device,
988 (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
989 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
990 reset_summary_unit_check(lcu, device, suc_data->reason);
991
992 _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags);
993 _unstop_all_devices_on_lcu(lcu);
994 _restart_all_base_devices_on_lcu(lcu);
995
996 _schedule_lcu_update(lcu, device);
997 lcu->suc_data.device = NULL;
998 _unlock_all_devices_on_lcu(lcu, NULL, NULL);
999 spin_unlock_irqrestore(&lcu->lock, flags);
1000}
1001
1002
1003
1004
1005void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
1006 struct irb *irb)
1007{
1008 struct alias_lcu *lcu;
1009 char reason;
1010 struct dasd_eckd_private *private;
1011 char *sense;
1012
1013 private = (struct dasd_eckd_private *) device->private;
1014
1015 sense = dasd_get_sense(irb);
1016 if (sense) {
1017 reason = sense[8];
1018 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
1019 "eckd handle summary unit check: reason", reason);
1020 } else {
1021 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1022 "eckd handle summary unit check:"
1023 " no reason code available");
1024 return;
1025 }
1026
1027 lcu = private->lcu;
1028 if (!lcu) {
1029 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1030 "device not ready to handle summary"
1031 " unit check (no lcu structure)");
1032 return;
1033 }
1034 _trylock_and_lock_lcu(lcu, device);
1035
1036
1037
1038 if (list_empty(&device->alias_list)) {
1039 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1040 "device is in offline processing,"
1041 " don't do summary unit check handling");
1042 _unlock_all_devices_on_lcu(lcu, device, NULL);
1043 spin_unlock(&lcu->lock);
1044 return;
1045 }
1046 if (lcu->suc_data.device) {
1047
1048 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1049 "previous instance of summary unit check worker"
1050 " still pending");
1051 _unlock_all_devices_on_lcu(lcu, device, NULL);
1052 spin_unlock(&lcu->lock);
1053 return ;
1054 }
1055 _stop_all_devices_on_lcu(lcu);
1056
1057 private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
1058 lcu->suc_data.reason = reason;
1059 lcu->suc_data.device = device;
1060 _unlock_all_devices_on_lcu(lcu, device, NULL);
1061 spin_unlock(&lcu->lock);
1062 schedule_work(&lcu->suc_data.worker);
1063};
1064