1
2
3
4
5
6
7
8
9
10
11#define KMSG_COMPONENT "cio"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/spinlock.h>
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/slab.h>
20#include <linux/list.h>
21#include <linux/device.h>
22#include <linux/workqueue.h>
23#include <linux/delay.h>
24#include <linux/timer.h>
25#include <linux/kernel_stat.h>
26#include <linux/sched/signal.h>
27
28#include <asm/ccwdev.h>
29#include <asm/cio.h>
30#include <asm/param.h>
31#include <asm/cmb.h>
32#include <asm/isc.h>
33
34#include "chp.h"
35#include "cio.h"
36#include "cio_debug.h"
37#include "css.h"
38#include "device.h"
39#include "ioasm.h"
40#include "io_sch.h"
41#include "blacklist.h"
42#include "chsc.h"
43
44static struct timer_list recovery_timer;
45static DEFINE_SPINLOCK(recovery_lock);
46static int recovery_phase;
47static const unsigned long recovery_delay[] = { 3, 30, 300 };
48
49static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
50static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
51static struct bus_type ccw_bus_type;
52
53
54
55
56
57
58
59static int
60ccw_bus_match (struct device * dev, struct device_driver * drv)
61{
62 struct ccw_device *cdev = to_ccwdev(dev);
63 struct ccw_driver *cdrv = to_ccwdrv(drv);
64 const struct ccw_device_id *ids = cdrv->ids, *found;
65
66 if (!ids)
67 return 0;
68
69 found = ccw_device_id_match(ids, &cdev->id);
70 if (!found)
71 return 0;
72
73 cdev->id.driver_info = found->driver_info;
74
75 return 1;
76}
77
78
79
80
81static int snprint_alias(char *buf, size_t size,
82 struct ccw_device_id *id, const char *suffix)
83{
84 int len;
85
86 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
87 if (len > size)
88 return len;
89 buf += len;
90 size -= len;
91
92 if (id->dev_type != 0)
93 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
94 id->dev_model, suffix);
95 else
96 len += snprintf(buf, size, "dtdm%s", suffix);
97
98 return len;
99}
100
101
102
103static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
104{
105 struct ccw_device *cdev = to_ccwdev(dev);
106 struct ccw_device_id *id = &(cdev->id);
107 int ret;
108 char modalias_buf[30];
109
110
111 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
112 if (ret)
113 return ret;
114
115
116 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
117 if (ret)
118 return ret;
119
120
121
122 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
123 if (ret)
124 return ret;
125
126
127 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
128 if (ret)
129 return ret;
130
131
132 snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
133 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
134 return ret;
135}
136
137static void io_subchannel_irq(struct subchannel *);
138static int io_subchannel_probe(struct subchannel *);
139static int io_subchannel_remove(struct subchannel *);
140static void io_subchannel_shutdown(struct subchannel *);
141static int io_subchannel_sch_event(struct subchannel *, int);
142static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
143 int);
144static void recovery_func(struct timer_list *unused);
145
146static struct css_device_id io_subchannel_ids[] = {
147 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
148 { },
149};
150
151static int io_subchannel_prepare(struct subchannel *sch)
152{
153 struct ccw_device *cdev;
154
155
156
157
158 cdev = sch_get_cdev(sch);
159 if (cdev && !device_is_registered(&cdev->dev))
160 return -EAGAIN;
161 return 0;
162}
163
164static int io_subchannel_settle(void)
165{
166 int ret;
167
168 ret = wait_event_interruptible(ccw_device_init_wq,
169 atomic_read(&ccw_device_init_count) == 0);
170 if (ret)
171 return -EINTR;
172 flush_workqueue(cio_work_q);
173 return 0;
174}
175
176static struct css_driver io_subchannel_driver = {
177 .drv = {
178 .owner = THIS_MODULE,
179 .name = "io_subchannel",
180 },
181 .subchannel_type = io_subchannel_ids,
182 .irq = io_subchannel_irq,
183 .sch_event = io_subchannel_sch_event,
184 .chp_event = io_subchannel_chp_event,
185 .probe = io_subchannel_probe,
186 .remove = io_subchannel_remove,
187 .shutdown = io_subchannel_shutdown,
188 .prepare = io_subchannel_prepare,
189 .settle = io_subchannel_settle,
190};
191
192int __init io_subchannel_init(void)
193{
194 int ret;
195
196 timer_setup(&recovery_timer, recovery_func, 0);
197 ret = bus_register(&ccw_bus_type);
198 if (ret)
199 return ret;
200 ret = css_driver_register(&io_subchannel_driver);
201 if (ret)
202 bus_unregister(&ccw_bus_type);
203
204 return ret;
205}
206
207
208
209
210static ssize_t
211devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
212{
213 struct ccw_device *cdev = to_ccwdev(dev);
214 struct ccw_device_id *id = &(cdev->id);
215
216 if (id->dev_type != 0)
217 return sprintf(buf, "%04x/%02x\n",
218 id->dev_type, id->dev_model);
219 else
220 return sprintf(buf, "n/a\n");
221}
222
223static ssize_t
224cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
225{
226 struct ccw_device *cdev = to_ccwdev(dev);
227 struct ccw_device_id *id = &(cdev->id);
228
229 return sprintf(buf, "%04x/%02x\n",
230 id->cu_type, id->cu_model);
231}
232
233static ssize_t
234modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
235{
236 struct ccw_device *cdev = to_ccwdev(dev);
237 struct ccw_device_id *id = &(cdev->id);
238 int len;
239
240 len = snprint_alias(buf, PAGE_SIZE, id, "\n");
241
242 return len > PAGE_SIZE ? PAGE_SIZE : len;
243}
244
245static ssize_t
246online_show (struct device *dev, struct device_attribute *attr, char *buf)
247{
248 struct ccw_device *cdev = to_ccwdev(dev);
249
250 return sprintf(buf, cdev->online ? "1\n" : "0\n");
251}
252
253int ccw_device_is_orphan(struct ccw_device *cdev)
254{
255 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
256}
257
258static void ccw_device_unregister(struct ccw_device *cdev)
259{
260 if (device_is_registered(&cdev->dev)) {
261
262 device_del(&cdev->dev);
263 }
264 if (cdev->private->flags.initialized) {
265 cdev->private->flags.initialized = 0;
266
267 put_device(&cdev->dev);
268 }
269}
270
271static void io_subchannel_quiesce(struct subchannel *);
272
273
274
275
276
277
278
279
280
281
282
283
284int ccw_device_set_offline(struct ccw_device *cdev)
285{
286 struct subchannel *sch;
287 int ret, state;
288
289 if (!cdev)
290 return -ENODEV;
291 if (!cdev->online || !cdev->drv)
292 return -EINVAL;
293
294 if (cdev->drv->set_offline) {
295 ret = cdev->drv->set_offline(cdev);
296 if (ret != 0)
297 return ret;
298 }
299 spin_lock_irq(cdev->ccwlock);
300 sch = to_subchannel(cdev->dev.parent);
301 cdev->online = 0;
302
303 while (!dev_fsm_final_state(cdev) &&
304 cdev->private->state != DEV_STATE_DISCONNECTED) {
305 spin_unlock_irq(cdev->ccwlock);
306 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
307 cdev->private->state == DEV_STATE_DISCONNECTED));
308 spin_lock_irq(cdev->ccwlock);
309 }
310 do {
311 ret = ccw_device_offline(cdev);
312 if (!ret)
313 break;
314 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
315 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
316 cdev->private->dev_id.devno);
317 if (ret != -EBUSY)
318 goto error;
319 state = cdev->private->state;
320 spin_unlock_irq(cdev->ccwlock);
321 io_subchannel_quiesce(sch);
322 spin_lock_irq(cdev->ccwlock);
323 cdev->private->state = state;
324 } while (ret == -EBUSY);
325 spin_unlock_irq(cdev->ccwlock);
326 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
327 cdev->private->state == DEV_STATE_DISCONNECTED));
328
329 if (cdev->private->state == DEV_STATE_BOXED) {
330 pr_warn("%s: The device entered boxed state while being set offline\n",
331 dev_name(&cdev->dev));
332 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
333 pr_warn("%s: The device stopped operating while being set offline\n",
334 dev_name(&cdev->dev));
335 }
336
337 put_device(&cdev->dev);
338 return 0;
339
340error:
341 cdev->private->state = DEV_STATE_OFFLINE;
342 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
343 spin_unlock_irq(cdev->ccwlock);
344
345 put_device(&cdev->dev);
346 return -ENODEV;
347}
348
349
350
351
352
353
354
355
356
357
358
359
360
361int ccw_device_set_online(struct ccw_device *cdev)
362{
363 int ret;
364 int ret2;
365
366 if (!cdev)
367 return -ENODEV;
368 if (cdev->online || !cdev->drv)
369 return -EINVAL;
370
371 if (!get_device(&cdev->dev))
372 return -ENODEV;
373
374 spin_lock_irq(cdev->ccwlock);
375 ret = ccw_device_online(cdev);
376 spin_unlock_irq(cdev->ccwlock);
377 if (ret == 0)
378 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
379 else {
380 CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
381 "device 0.%x.%04x\n",
382 ret, cdev->private->dev_id.ssid,
383 cdev->private->dev_id.devno);
384
385 put_device(&cdev->dev);
386 return ret;
387 }
388 spin_lock_irq(cdev->ccwlock);
389
390 if ((cdev->private->state != DEV_STATE_ONLINE) &&
391 (cdev->private->state != DEV_STATE_W4SENSE)) {
392 spin_unlock_irq(cdev->ccwlock);
393
394 if (cdev->private->state == DEV_STATE_BOXED) {
395 pr_warn("%s: Setting the device online failed because it is boxed\n",
396 dev_name(&cdev->dev));
397 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
398 pr_warn("%s: Setting the device online failed because it is not operational\n",
399 dev_name(&cdev->dev));
400 }
401
402 put_device(&cdev->dev);
403 return -ENODEV;
404 }
405 spin_unlock_irq(cdev->ccwlock);
406 if (cdev->drv->set_online)
407 ret = cdev->drv->set_online(cdev);
408 if (ret)
409 goto rollback;
410
411 spin_lock_irq(cdev->ccwlock);
412 cdev->online = 1;
413 spin_unlock_irq(cdev->ccwlock);
414 return 0;
415
416rollback:
417 spin_lock_irq(cdev->ccwlock);
418
419 while (!dev_fsm_final_state(cdev) &&
420 cdev->private->state != DEV_STATE_DISCONNECTED) {
421 spin_unlock_irq(cdev->ccwlock);
422 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
423 cdev->private->state == DEV_STATE_DISCONNECTED));
424 spin_lock_irq(cdev->ccwlock);
425 }
426 ret2 = ccw_device_offline(cdev);
427 if (ret2)
428 goto error;
429 spin_unlock_irq(cdev->ccwlock);
430 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
431 cdev->private->state == DEV_STATE_DISCONNECTED));
432
433 put_device(&cdev->dev);
434 return ret;
435
436error:
437 CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
438 "device 0.%x.%04x\n",
439 ret2, cdev->private->dev_id.ssid,
440 cdev->private->dev_id.devno);
441 cdev->private->state = DEV_STATE_OFFLINE;
442 spin_unlock_irq(cdev->ccwlock);
443
444 put_device(&cdev->dev);
445 return ret;
446}
447
448static int online_store_handle_offline(struct ccw_device *cdev)
449{
450 if (cdev->private->state == DEV_STATE_DISCONNECTED) {
451 spin_lock_irq(cdev->ccwlock);
452 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
453 spin_unlock_irq(cdev->ccwlock);
454 return 0;
455 }
456 if (cdev->drv && cdev->drv->set_offline)
457 return ccw_device_set_offline(cdev);
458 return -EINVAL;
459}
460
461static int online_store_recog_and_online(struct ccw_device *cdev)
462{
463
464 if (cdev->private->state == DEV_STATE_BOXED) {
465 spin_lock_irq(cdev->ccwlock);
466 ccw_device_recognition(cdev);
467 spin_unlock_irq(cdev->ccwlock);
468 wait_event(cdev->private->wait_q,
469 cdev->private->flags.recog_done);
470 if (cdev->private->state != DEV_STATE_OFFLINE)
471
472 return -EAGAIN;
473 }
474 if (cdev->drv && cdev->drv->set_online)
475 return ccw_device_set_online(cdev);
476 return -EINVAL;
477}
478
479static int online_store_handle_online(struct ccw_device *cdev, int force)
480{
481 int ret;
482
483 ret = online_store_recog_and_online(cdev);
484 if (ret && !force)
485 return ret;
486 if (force && cdev->private->state == DEV_STATE_BOXED) {
487 ret = ccw_device_stlck(cdev);
488 if (ret)
489 return ret;
490 if (cdev->id.cu_type == 0)
491 cdev->private->state = DEV_STATE_NOT_OPER;
492 ret = online_store_recog_and_online(cdev);
493 if (ret)
494 return ret;
495 }
496 return 0;
497}
498
499static ssize_t online_store (struct device *dev, struct device_attribute *attr,
500 const char *buf, size_t count)
501{
502 struct ccw_device *cdev = to_ccwdev(dev);
503 int force, ret;
504 unsigned long i;
505
506
507 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
508 return -EAGAIN;
509
510 if (!dev_fsm_final_state(cdev) &&
511 cdev->private->state != DEV_STATE_DISCONNECTED) {
512 ret = -EAGAIN;
513 goto out;
514 }
515
516 if (work_pending(&cdev->private->todo_work)) {
517 ret = -EAGAIN;
518 goto out;
519 }
520 if (!strncmp(buf, "force\n", count)) {
521 force = 1;
522 i = 1;
523 ret = 0;
524 } else {
525 force = 0;
526 ret = kstrtoul(buf, 16, &i);
527 }
528 if (ret)
529 goto out;
530
531 device_lock(dev);
532 switch (i) {
533 case 0:
534 ret = online_store_handle_offline(cdev);
535 break;
536 case 1:
537 ret = online_store_handle_online(cdev, force);
538 break;
539 default:
540 ret = -EINVAL;
541 }
542 device_unlock(dev);
543
544out:
545 atomic_set(&cdev->private->onoff, 0);
546 return (ret < 0) ? ret : count;
547}
548
549static ssize_t
550available_show (struct device *dev, struct device_attribute *attr, char *buf)
551{
552 struct ccw_device *cdev = to_ccwdev(dev);
553 struct subchannel *sch;
554
555 if (ccw_device_is_orphan(cdev))
556 return sprintf(buf, "no device\n");
557 switch (cdev->private->state) {
558 case DEV_STATE_BOXED:
559 return sprintf(buf, "boxed\n");
560 case DEV_STATE_DISCONNECTED:
561 case DEV_STATE_DISCONNECTED_SENSE_ID:
562 case DEV_STATE_NOT_OPER:
563 sch = to_subchannel(dev->parent);
564 if (!sch->lpm)
565 return sprintf(buf, "no path\n");
566 else
567 return sprintf(buf, "no device\n");
568 default:
569
570 return sprintf(buf, "good\n");
571 }
572}
573
574static ssize_t
575initiate_logging(struct device *dev, struct device_attribute *attr,
576 const char *buf, size_t count)
577{
578 struct subchannel *sch = to_subchannel(dev);
579 int rc;
580
581 rc = chsc_siosl(sch->schid);
582 if (rc < 0) {
583 pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
584 sch->schid.ssid, sch->schid.sch_no, rc);
585 return rc;
586 }
587 pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
588 sch->schid.ssid, sch->schid.sch_no);
589 return count;
590}
591
592static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
593 char *buf)
594{
595 struct subchannel *sch = to_subchannel(dev);
596
597 return sprintf(buf, "%02x\n", sch->vpm);
598}
599
600static DEVICE_ATTR_RO(devtype);
601static DEVICE_ATTR_RO(cutype);
602static DEVICE_ATTR_RO(modalias);
603static DEVICE_ATTR_RW(online);
604static DEVICE_ATTR(availability, 0444, available_show, NULL);
605static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
606static DEVICE_ATTR_RO(vpm);
607
608static struct attribute *io_subchannel_attrs[] = {
609 &dev_attr_logging.attr,
610 &dev_attr_vpm.attr,
611 NULL,
612};
613
614static const struct attribute_group io_subchannel_attr_group = {
615 .attrs = io_subchannel_attrs,
616};
617
618static struct attribute * ccwdev_attrs[] = {
619 &dev_attr_devtype.attr,
620 &dev_attr_cutype.attr,
621 &dev_attr_modalias.attr,
622 &dev_attr_online.attr,
623 &dev_attr_cmb_enable.attr,
624 &dev_attr_availability.attr,
625 NULL,
626};
627
628static const struct attribute_group ccwdev_attr_group = {
629 .attrs = ccwdev_attrs,
630};
631
632static const struct attribute_group *ccwdev_attr_groups[] = {
633 &ccwdev_attr_group,
634 NULL,
635};
636
637static int ccw_device_add(struct ccw_device *cdev)
638{
639 struct device *dev = &cdev->dev;
640
641 dev->bus = &ccw_bus_type;
642 return device_add(dev);
643}
644
645static int match_dev_id(struct device *dev, void *data)
646{
647 struct ccw_device *cdev = to_ccwdev(dev);
648 struct ccw_dev_id *dev_id = data;
649
650 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
651}
652
653
654
655
656
657
658
659
660
661
662
663struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
664{
665 struct device *dev;
666
667 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
668
669 return dev ? to_ccwdev(dev) : NULL;
670}
671EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
672
673static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
674{
675 int ret;
676
677 if (device_is_registered(&cdev->dev)) {
678 device_release_driver(&cdev->dev);
679 ret = device_attach(&cdev->dev);
680 WARN_ON(ret == -ENODEV);
681 }
682}
683
684static void
685ccw_device_release(struct device *dev)
686{
687 struct ccw_device *cdev;
688
689 cdev = to_ccwdev(dev);
690
691 put_device(cdev->dev.parent);
692 kfree(cdev->private);
693 kfree(cdev);
694}
695
696static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
697{
698 struct ccw_device *cdev;
699
700 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
701 if (cdev) {
702 cdev->private = kzalloc(sizeof(struct ccw_device_private),
703 GFP_KERNEL | GFP_DMA);
704 if (cdev->private)
705 return cdev;
706 }
707 kfree(cdev);
708 return ERR_PTR(-ENOMEM);
709}
710
711static void ccw_device_todo(struct work_struct *work);
712
713static int io_subchannel_initialize_dev(struct subchannel *sch,
714 struct ccw_device *cdev)
715{
716 struct ccw_device_private *priv = cdev->private;
717 int ret;
718
719 priv->cdev = cdev;
720 priv->int_class = IRQIO_CIO;
721 priv->state = DEV_STATE_NOT_OPER;
722 priv->dev_id.devno = sch->schib.pmcw.dev;
723 priv->dev_id.ssid = sch->schid.ssid;
724
725 INIT_WORK(&priv->todo_work, ccw_device_todo);
726 INIT_LIST_HEAD(&priv->cmb_list);
727 init_waitqueue_head(&priv->wait_q);
728 timer_setup(&priv->timer, ccw_device_timeout, 0);
729
730 atomic_set(&priv->onoff, 0);
731 cdev->ccwlock = sch->lock;
732 cdev->dev.parent = &sch->dev;
733 cdev->dev.release = ccw_device_release;
734 cdev->dev.groups = ccwdev_attr_groups;
735
736 device_initialize(&cdev->dev);
737 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
738 cdev->private->dev_id.devno);
739 if (ret)
740 goto out_put;
741 if (!get_device(&sch->dev)) {
742 ret = -ENODEV;
743 goto out_put;
744 }
745 priv->flags.initialized = 1;
746 spin_lock_irq(sch->lock);
747 sch_set_cdev(sch, cdev);
748 spin_unlock_irq(sch->lock);
749 return 0;
750
751out_put:
752
753 put_device(&cdev->dev);
754 return ret;
755}
756
757static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
758{
759 struct ccw_device *cdev;
760 int ret;
761
762 cdev = io_subchannel_allocate_dev(sch);
763 if (!IS_ERR(cdev)) {
764 ret = io_subchannel_initialize_dev(sch, cdev);
765 if (ret)
766 cdev = ERR_PTR(ret);
767 }
768 return cdev;
769}
770
771static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
772
773static void sch_create_and_recog_new_device(struct subchannel *sch)
774{
775 struct ccw_device *cdev;
776
777
778 cdev = io_subchannel_create_ccwdev(sch);
779 if (IS_ERR(cdev)) {
780
781 css_sch_device_unregister(sch);
782 return;
783 }
784
785 io_subchannel_recog(cdev, sch);
786}
787
788
789
790
791static void io_subchannel_register(struct ccw_device *cdev)
792{
793 struct subchannel *sch;
794 int ret, adjust_init_count = 1;
795 unsigned long flags;
796
797 sch = to_subchannel(cdev->dev.parent);
798
799
800
801
802
803
804 if (!device_is_registered(&sch->dev))
805 goto out_err;
806 css_update_ssd_info(sch);
807
808
809
810
811
812
813 if (device_is_registered(&cdev->dev)) {
814 if (!cdev->drv) {
815 ret = device_reprobe(&cdev->dev);
816 if (ret)
817
818 CIO_MSG_EVENT(0, "device_reprobe() returned"
819 " %d for 0.%x.%04x\n", ret,
820 cdev->private->dev_id.ssid,
821 cdev->private->dev_id.devno);
822 }
823 adjust_init_count = 0;
824 goto out;
825 }
826
827
828
829
830 dev_set_uevent_suppress(&sch->dev, 0);
831 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
832
833 ret = ccw_device_add(cdev);
834 if (ret) {
835 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
836 cdev->private->dev_id.ssid,
837 cdev->private->dev_id.devno, ret);
838 spin_lock_irqsave(sch->lock, flags);
839 sch_set_cdev(sch, NULL);
840 spin_unlock_irqrestore(sch->lock, flags);
841
842 put_device(&cdev->dev);
843 goto out_err;
844 }
845out:
846 cdev->private->flags.recog_done = 1;
847 wake_up(&cdev->private->wait_q);
848out_err:
849 if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
850 wake_up(&ccw_device_init_wq);
851}
852
853static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
854{
855 struct subchannel *sch;
856
857
858 if (!get_device(cdev->dev.parent))
859 return;
860 sch = to_subchannel(cdev->dev.parent);
861 css_sch_device_unregister(sch);
862
863 put_device(&sch->dev);
864}
865
866
867
868
869void
870io_subchannel_recog_done(struct ccw_device *cdev)
871{
872 if (css_init_done == 0) {
873 cdev->private->flags.recog_done = 1;
874 return;
875 }
876 switch (cdev->private->state) {
877 case DEV_STATE_BOXED:
878
879 case DEV_STATE_NOT_OPER:
880 cdev->private->flags.recog_done = 1;
881
882 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
883 if (atomic_dec_and_test(&ccw_device_init_count))
884 wake_up(&ccw_device_init_wq);
885 break;
886 case DEV_STATE_OFFLINE:
887
888
889
890
891 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
892 break;
893 }
894}
895
896static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
897{
898
899 atomic_inc(&ccw_device_init_count);
900
901
902 spin_lock_irq(sch->lock);
903 ccw_device_recognition(cdev);
904 spin_unlock_irq(sch->lock);
905}
906
907static int ccw_device_move_to_sch(struct ccw_device *cdev,
908 struct subchannel *sch)
909{
910 struct subchannel *old_sch;
911 int rc, old_enabled = 0;
912
913 old_sch = to_subchannel(cdev->dev.parent);
914
915 if (!get_device(&sch->dev))
916 return -ENODEV;
917
918 if (!sch_is_pseudo_sch(old_sch)) {
919 spin_lock_irq(old_sch->lock);
920 old_enabled = old_sch->schib.pmcw.ena;
921 rc = 0;
922 if (old_enabled)
923 rc = cio_disable_subchannel(old_sch);
924 spin_unlock_irq(old_sch->lock);
925 if (rc == -EBUSY) {
926
927 put_device(&sch->dev);
928 return rc;
929 }
930 }
931
932 mutex_lock(&sch->reg_mutex);
933 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
934 mutex_unlock(&sch->reg_mutex);
935 if (rc) {
936 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
937 cdev->private->dev_id.ssid,
938 cdev->private->dev_id.devno, sch->schid.ssid,
939 sch->schib.pmcw.dev, rc);
940 if (old_enabled) {
941
942 spin_lock_irq(old_sch->lock);
943 cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
944 spin_unlock_irq(old_sch->lock);
945 }
946
947 put_device(&sch->dev);
948 return rc;
949 }
950
951 if (!sch_is_pseudo_sch(old_sch)) {
952 spin_lock_irq(old_sch->lock);
953 sch_set_cdev(old_sch, NULL);
954 spin_unlock_irq(old_sch->lock);
955 css_schedule_eval(old_sch->schid);
956 }
957
958 put_device(&old_sch->dev);
959
960 spin_lock_irq(sch->lock);
961 cdev->ccwlock = sch->lock;
962 if (!sch_is_pseudo_sch(sch))
963 sch_set_cdev(sch, cdev);
964 spin_unlock_irq(sch->lock);
965 if (!sch_is_pseudo_sch(sch))
966 css_update_ssd_info(sch);
967 return 0;
968}
969
970static int ccw_device_move_to_orph(struct ccw_device *cdev)
971{
972 struct subchannel *sch = to_subchannel(cdev->dev.parent);
973 struct channel_subsystem *css = to_css(sch->dev.parent);
974
975 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
976}
977
978static void io_subchannel_irq(struct subchannel *sch)
979{
980 struct ccw_device *cdev;
981
982 cdev = sch_get_cdev(sch);
983
984 CIO_TRACE_EVENT(6, "IRQ");
985 CIO_TRACE_EVENT(6, dev_name(&sch->dev));
986 if (cdev)
987 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
988 else
989 inc_irq_stat(IRQIO_CIO);
990}
991
992void io_subchannel_init_config(struct subchannel *sch)
993{
994 memset(&sch->config, 0, sizeof(sch->config));
995 sch->config.csense = 1;
996}
997
998static void io_subchannel_init_fields(struct subchannel *sch)
999{
1000 if (cio_is_console(sch->schid))
1001 sch->opm = 0xff;
1002 else
1003 sch->opm = chp_get_sch_opm(sch);
1004 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1005 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1006
1007 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1008 " - PIM = %02X, PAM = %02X, POM = %02X\n",
1009 sch->schib.pmcw.dev, sch->schid.ssid,
1010 sch->schid.sch_no, sch->schib.pmcw.pim,
1011 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1012
1013 io_subchannel_init_config(sch);
1014}
1015
1016
1017
1018
1019
1020static int io_subchannel_probe(struct subchannel *sch)
1021{
1022 struct io_subchannel_private *io_priv;
1023 struct ccw_device *cdev;
1024 int rc;
1025
1026 if (cio_is_console(sch->schid)) {
1027 rc = sysfs_create_group(&sch->dev.kobj,
1028 &io_subchannel_attr_group);
1029 if (rc)
1030 CIO_MSG_EVENT(0, "Failed to create io subchannel "
1031 "attributes for subchannel "
1032 "0.%x.%04x (rc=%d)\n",
1033 sch->schid.ssid, sch->schid.sch_no, rc);
1034
1035
1036
1037
1038
1039 dev_set_uevent_suppress(&sch->dev, 0);
1040 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1041 cdev = sch_get_cdev(sch);
1042 rc = ccw_device_add(cdev);
1043 if (rc) {
1044
1045 put_device(&cdev->dev);
1046 goto out_schedule;
1047 }
1048 if (atomic_dec_and_test(&ccw_device_init_count))
1049 wake_up(&ccw_device_init_wq);
1050 return 0;
1051 }
1052 io_subchannel_init_fields(sch);
1053 rc = cio_commit_config(sch);
1054 if (rc)
1055 goto out_schedule;
1056 rc = sysfs_create_group(&sch->dev.kobj,
1057 &io_subchannel_attr_group);
1058 if (rc)
1059 goto out_schedule;
1060
1061 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1062 if (!io_priv)
1063 goto out_schedule;
1064
1065 set_io_private(sch, io_priv);
1066 css_schedule_eval(sch->schid);
1067 return 0;
1068
1069out_schedule:
1070 spin_lock_irq(sch->lock);
1071 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1072 spin_unlock_irq(sch->lock);
1073 return 0;
1074}
1075
1076static int io_subchannel_remove(struct subchannel *sch)
1077{
1078 struct io_subchannel_private *io_priv = to_io_private(sch);
1079 struct ccw_device *cdev;
1080
1081 cdev = sch_get_cdev(sch);
1082 if (!cdev)
1083 goto out_free;
1084
1085 ccw_device_unregister(cdev);
1086 spin_lock_irq(sch->lock);
1087 sch_set_cdev(sch, NULL);
1088 set_io_private(sch, NULL);
1089 spin_unlock_irq(sch->lock);
1090out_free:
1091 kfree(io_priv);
1092 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1093 return 0;
1094}
1095
1096static void io_subchannel_verify(struct subchannel *sch)
1097{
1098 struct ccw_device *cdev;
1099
1100 cdev = sch_get_cdev(sch);
1101 if (cdev)
1102 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1103}
1104
1105static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1106{
1107 struct ccw_device *cdev;
1108
1109 cdev = sch_get_cdev(sch);
1110 if (!cdev)
1111 return;
1112 if (cio_update_schib(sch))
1113 goto err;
1114
1115 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1116 goto out;
1117 if (cdev->private->state == DEV_STATE_ONLINE) {
1118 ccw_device_kill_io(cdev);
1119 goto out;
1120 }
1121 if (cio_clear(sch))
1122 goto err;
1123out:
1124
1125 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1126 return;
1127
1128err:
1129 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1130}
1131
1132static int io_subchannel_chp_event(struct subchannel *sch,
1133 struct chp_link *link, int event)
1134{
1135 struct ccw_device *cdev = sch_get_cdev(sch);
1136 int mask;
1137
1138 mask = chp_ssd_get_mask(&sch->ssd_info, link);
1139 if (!mask)
1140 return 0;
1141 switch (event) {
1142 case CHP_VARY_OFF:
1143 sch->opm &= ~mask;
1144 sch->lpm &= ~mask;
1145 if (cdev)
1146 cdev->private->path_gone_mask |= mask;
1147 io_subchannel_terminate_path(sch, mask);
1148 break;
1149 case CHP_VARY_ON:
1150 sch->opm |= mask;
1151 sch->lpm |= mask;
1152 if (cdev)
1153 cdev->private->path_new_mask |= mask;
1154 io_subchannel_verify(sch);
1155 break;
1156 case CHP_OFFLINE:
1157 if (cio_update_schib(sch))
1158 return -ENODEV;
1159 if (cdev)
1160 cdev->private->path_gone_mask |= mask;
1161 io_subchannel_terminate_path(sch, mask);
1162 break;
1163 case CHP_ONLINE:
1164 if (cio_update_schib(sch))
1165 return -ENODEV;
1166 sch->lpm |= mask & sch->opm;
1167 if (cdev)
1168 cdev->private->path_new_mask |= mask;
1169 io_subchannel_verify(sch);
1170 break;
1171 }
1172 return 0;
1173}
1174
1175static void io_subchannel_quiesce(struct subchannel *sch)
1176{
1177 struct ccw_device *cdev;
1178 int ret;
1179
1180 spin_lock_irq(sch->lock);
1181 cdev = sch_get_cdev(sch);
1182 if (cio_is_console(sch->schid))
1183 goto out_unlock;
1184 if (!sch->schib.pmcw.ena)
1185 goto out_unlock;
1186 ret = cio_disable_subchannel(sch);
1187 if (ret != -EBUSY)
1188 goto out_unlock;
1189 if (cdev->handler)
1190 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1191 while (ret == -EBUSY) {
1192 cdev->private->state = DEV_STATE_QUIESCE;
1193 cdev->private->iretry = 255;
1194 ret = ccw_device_cancel_halt_clear(cdev);
1195 if (ret == -EBUSY) {
1196 ccw_device_set_timeout(cdev, HZ/10);
1197 spin_unlock_irq(sch->lock);
1198 wait_event(cdev->private->wait_q,
1199 cdev->private->state != DEV_STATE_QUIESCE);
1200 spin_lock_irq(sch->lock);
1201 }
1202 ret = cio_disable_subchannel(sch);
1203 }
1204out_unlock:
1205 spin_unlock_irq(sch->lock);
1206}
1207
1208static void io_subchannel_shutdown(struct subchannel *sch)
1209{
1210 io_subchannel_quiesce(sch);
1211}
1212
1213static int device_is_disconnected(struct ccw_device *cdev)
1214{
1215 if (!cdev)
1216 return 0;
1217 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1218 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1219}
1220
1221static int recovery_check(struct device *dev, void *data)
1222{
1223 struct ccw_device *cdev = to_ccwdev(dev);
1224 struct subchannel *sch;
1225 int *redo = data;
1226
1227 spin_lock_irq(cdev->ccwlock);
1228 switch (cdev->private->state) {
1229 case DEV_STATE_ONLINE:
1230 sch = to_subchannel(cdev->dev.parent);
1231 if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1232 break;
1233
1234 case DEV_STATE_DISCONNECTED:
1235 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1236 cdev->private->dev_id.ssid,
1237 cdev->private->dev_id.devno);
1238 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1239 *redo = 1;
1240 break;
1241 case DEV_STATE_DISCONNECTED_SENSE_ID:
1242 *redo = 1;
1243 break;
1244 }
1245 spin_unlock_irq(cdev->ccwlock);
1246
1247 return 0;
1248}
1249
1250static void recovery_work_func(struct work_struct *unused)
1251{
1252 int redo = 0;
1253
1254 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1255 if (redo) {
1256 spin_lock_irq(&recovery_lock);
1257 if (!timer_pending(&recovery_timer)) {
1258 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1259 recovery_phase++;
1260 mod_timer(&recovery_timer, jiffies +
1261 recovery_delay[recovery_phase] * HZ);
1262 }
1263 spin_unlock_irq(&recovery_lock);
1264 } else
1265 CIO_MSG_EVENT(3, "recovery: end\n");
1266}
1267
1268static DECLARE_WORK(recovery_work, recovery_work_func);
1269
1270static void recovery_func(struct timer_list *unused)
1271{
1272
1273
1274
1275
1276 schedule_work(&recovery_work);
1277}
1278
1279void ccw_device_schedule_recovery(void)
1280{
1281 unsigned long flags;
1282
1283 CIO_MSG_EVENT(3, "recovery: schedule\n");
1284 spin_lock_irqsave(&recovery_lock, flags);
1285 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1286 recovery_phase = 0;
1287 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1288 }
1289 spin_unlock_irqrestore(&recovery_lock, flags);
1290}
1291
1292static int purge_fn(struct device *dev, void *data)
1293{
1294 struct ccw_device *cdev = to_ccwdev(dev);
1295 struct ccw_dev_id *id = &cdev->private->dev_id;
1296
1297 spin_lock_irq(cdev->ccwlock);
1298 if (is_blacklisted(id->ssid, id->devno) &&
1299 (cdev->private->state == DEV_STATE_OFFLINE) &&
1300 (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1301 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1302 id->devno);
1303 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1304 atomic_set(&cdev->private->onoff, 0);
1305 }
1306 spin_unlock_irq(cdev->ccwlock);
1307
1308 if (signal_pending(current))
1309 return -EINTR;
1310
1311 return 0;
1312}
1313
1314
1315
1316
1317
1318
1319int ccw_purge_blacklisted(void)
1320{
1321 CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1322 bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1323 return 0;
1324}
1325
1326void ccw_device_set_disconnected(struct ccw_device *cdev)
1327{
1328 if (!cdev)
1329 return;
1330 ccw_device_set_timeout(cdev, 0);
1331 cdev->private->flags.fake_irb = 0;
1332 cdev->private->state = DEV_STATE_DISCONNECTED;
1333 if (cdev->online)
1334 ccw_device_schedule_recovery();
1335}
1336
1337void ccw_device_set_notoper(struct ccw_device *cdev)
1338{
1339 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1340
1341 CIO_TRACE_EVENT(2, "notoper");
1342 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1343 ccw_device_set_timeout(cdev, 0);
1344 cio_disable_subchannel(sch);
1345 cdev->private->state = DEV_STATE_NOT_OPER;
1346}
1347
1348enum io_sch_action {
1349 IO_SCH_UNREG,
1350 IO_SCH_ORPH_UNREG,
1351 IO_SCH_ATTACH,
1352 IO_SCH_UNREG_ATTACH,
1353 IO_SCH_ORPH_ATTACH,
1354 IO_SCH_REPROBE,
1355 IO_SCH_VERIFY,
1356 IO_SCH_DISC,
1357 IO_SCH_NOP,
1358};
1359
1360static enum io_sch_action sch_get_action(struct subchannel *sch)
1361{
1362 struct ccw_device *cdev;
1363
1364 cdev = sch_get_cdev(sch);
1365 if (cio_update_schib(sch)) {
1366
1367 if (!cdev)
1368 return IO_SCH_UNREG;
1369 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1370 return IO_SCH_UNREG;
1371 return IO_SCH_ORPH_UNREG;
1372 }
1373
1374 if (!cdev)
1375 return IO_SCH_ATTACH;
1376 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1377 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1378 return IO_SCH_UNREG_ATTACH;
1379 return IO_SCH_ORPH_ATTACH;
1380 }
1381 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1382 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1383 return IO_SCH_UNREG;
1384 return IO_SCH_DISC;
1385 }
1386 if (device_is_disconnected(cdev))
1387 return IO_SCH_REPROBE;
1388 if (cdev->online && !cdev->private->flags.resuming)
1389 return IO_SCH_VERIFY;
1390 if (cdev->private->state == DEV_STATE_NOT_OPER)
1391 return IO_SCH_UNREG_ATTACH;
1392 return IO_SCH_NOP;
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405static int io_subchannel_sch_event(struct subchannel *sch, int process)
1406{
1407 unsigned long flags;
1408 struct ccw_device *cdev;
1409 struct ccw_dev_id dev_id;
1410 enum io_sch_action action;
1411 int rc = -EAGAIN;
1412
1413 spin_lock_irqsave(sch->lock, flags);
1414 if (!device_is_registered(&sch->dev))
1415 goto out_unlock;
1416 if (work_pending(&sch->todo_work))
1417 goto out_unlock;
1418 cdev = sch_get_cdev(sch);
1419 if (cdev && work_pending(&cdev->private->todo_work))
1420 goto out_unlock;
1421 action = sch_get_action(sch);
1422 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1423 sch->schid.ssid, sch->schid.sch_no, process,
1424 action);
1425
1426 switch (action) {
1427 case IO_SCH_REPROBE:
1428
1429 ccw_device_trigger_reprobe(cdev);
1430 rc = 0;
1431 goto out_unlock;
1432 case IO_SCH_VERIFY:
1433
1434 io_subchannel_verify(sch);
1435 rc = 0;
1436 goto out_unlock;
1437 case IO_SCH_DISC:
1438 ccw_device_set_disconnected(cdev);
1439 rc = 0;
1440 goto out_unlock;
1441 case IO_SCH_ORPH_UNREG:
1442 case IO_SCH_ORPH_ATTACH:
1443 ccw_device_set_disconnected(cdev);
1444 break;
1445 case IO_SCH_UNREG_ATTACH:
1446 case IO_SCH_UNREG:
1447 if (!cdev)
1448 break;
1449 if (cdev->private->state == DEV_STATE_SENSE_ID) {
1450
1451
1452
1453
1454
1455 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1456 } else
1457 ccw_device_set_notoper(cdev);
1458 break;
1459 case IO_SCH_NOP:
1460 rc = 0;
1461 goto out_unlock;
1462 default:
1463 break;
1464 }
1465 spin_unlock_irqrestore(sch->lock, flags);
1466
1467 if (!process)
1468 goto out;
1469
1470 switch (action) {
1471 case IO_SCH_ORPH_UNREG:
1472 case IO_SCH_ORPH_ATTACH:
1473
1474 rc = ccw_device_move_to_orph(cdev);
1475 if (rc)
1476 goto out;
1477 break;
1478 case IO_SCH_UNREG_ATTACH:
1479 spin_lock_irqsave(sch->lock, flags);
1480 if (cdev->private->flags.resuming) {
1481
1482 rc = 0;
1483 goto out_unlock;
1484 }
1485 sch_set_cdev(sch, NULL);
1486 spin_unlock_irqrestore(sch->lock, flags);
1487
1488 ccw_device_unregister(cdev);
1489 break;
1490 default:
1491 break;
1492 }
1493
1494 switch (action) {
1495 case IO_SCH_ORPH_UNREG:
1496 case IO_SCH_UNREG:
1497 if (!cdev || !cdev->private->flags.resuming)
1498 css_sch_device_unregister(sch);
1499 break;
1500 case IO_SCH_ORPH_ATTACH:
1501 case IO_SCH_UNREG_ATTACH:
1502 case IO_SCH_ATTACH:
1503 dev_id.ssid = sch->schid.ssid;
1504 dev_id.devno = sch->schib.pmcw.dev;
1505 cdev = get_ccwdev_by_dev_id(&dev_id);
1506 if (!cdev) {
1507 sch_create_and_recog_new_device(sch);
1508 break;
1509 }
1510 rc = ccw_device_move_to_sch(cdev, sch);
1511 if (rc) {
1512
1513 put_device(&cdev->dev);
1514 goto out;
1515 }
1516 spin_lock_irqsave(sch->lock, flags);
1517 ccw_device_trigger_reprobe(cdev);
1518 spin_unlock_irqrestore(sch->lock, flags);
1519
1520 put_device(&cdev->dev);
1521 break;
1522 default:
1523 break;
1524 }
1525 return 0;
1526
1527out_unlock:
1528 spin_unlock_irqrestore(sch->lock, flags);
1529out:
1530 return rc;
1531}
1532
1533static void ccw_device_set_int_class(struct ccw_device *cdev)
1534{
1535 struct ccw_driver *cdrv = cdev->drv;
1536
1537
1538
1539 if (cdrv->int_class != 0)
1540 cdev->private->int_class = cdrv->int_class;
1541 else
1542 cdev->private->int_class = IRQIO_CIO;
1543}
1544
1545#ifdef CONFIG_CCW_CONSOLE
1546int __init ccw_device_enable_console(struct ccw_device *cdev)
1547{
1548 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1549 int rc;
1550
1551 if (!cdev->drv || !cdev->handler)
1552 return -EINVAL;
1553
1554 io_subchannel_init_fields(sch);
1555 rc = cio_commit_config(sch);
1556 if (rc)
1557 return rc;
1558 sch->driver = &io_subchannel_driver;
1559 io_subchannel_recog(cdev, sch);
1560
1561 spin_lock_irq(cdev->ccwlock);
1562 while (!dev_fsm_final_state(cdev))
1563 ccw_device_wait_idle(cdev);
1564
1565
1566 get_device(&cdev->dev);
1567 rc = ccw_device_online(cdev);
1568 if (rc)
1569 goto out_unlock;
1570
1571 while (!dev_fsm_final_state(cdev))
1572 ccw_device_wait_idle(cdev);
1573
1574 if (cdev->private->state == DEV_STATE_ONLINE)
1575 cdev->online = 1;
1576 else
1577 rc = -EIO;
1578out_unlock:
1579 spin_unlock_irq(cdev->ccwlock);
1580 if (rc)
1581 put_device(&cdev->dev);
1582 return rc;
1583}
1584
1585struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
1586{
1587 struct io_subchannel_private *io_priv;
1588 struct ccw_device *cdev;
1589 struct subchannel *sch;
1590
1591 sch = cio_probe_console();
1592 if (IS_ERR(sch))
1593 return ERR_CAST(sch);
1594
1595 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1596 if (!io_priv) {
1597 put_device(&sch->dev);
1598 return ERR_PTR(-ENOMEM);
1599 }
1600 set_io_private(sch, io_priv);
1601 cdev = io_subchannel_create_ccwdev(sch);
1602 if (IS_ERR(cdev)) {
1603 put_device(&sch->dev);
1604 kfree(io_priv);
1605 return cdev;
1606 }
1607 cdev->drv = drv;
1608 ccw_device_set_int_class(cdev);
1609 return cdev;
1610}
1611
1612void __init ccw_device_destroy_console(struct ccw_device *cdev)
1613{
1614 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1615 struct io_subchannel_private *io_priv = to_io_private(sch);
1616
1617 set_io_private(sch, NULL);
1618 put_device(&sch->dev);
1619 put_device(&cdev->dev);
1620 kfree(io_priv);
1621}
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631void ccw_device_wait_idle(struct ccw_device *cdev)
1632{
1633 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1634
1635 while (1) {
1636 cio_tsch(sch);
1637 if (sch->schib.scsw.cmd.actl == 0)
1638 break;
1639 udelay_simple(100);
1640 }
1641}
1642
1643static int ccw_device_pm_restore(struct device *dev);
1644
1645int ccw_device_force_console(struct ccw_device *cdev)
1646{
1647 return ccw_device_pm_restore(&cdev->dev);
1648}
1649EXPORT_SYMBOL_GPL(ccw_device_force_console);
1650#endif
1651
1652
1653
1654
1655static int
1656__ccwdev_check_busid(struct device *dev, void *id)
1657{
1658 char *bus_id;
1659
1660 bus_id = id;
1661
1662 return (strcmp(bus_id, dev_name(dev)) == 0);
1663}
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1678 const char *bus_id)
1679{
1680 struct device *dev;
1681
1682 dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id,
1683 __ccwdev_check_busid);
1684
1685 return dev ? to_ccwdev(dev) : NULL;
1686}
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698static int
1699ccw_device_probe (struct device *dev)
1700{
1701 struct ccw_device *cdev = to_ccwdev(dev);
1702 struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1703 int ret;
1704
1705 cdev->drv = cdrv;
1706 ccw_device_set_int_class(cdev);
1707 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1708 if (ret) {
1709 cdev->drv = NULL;
1710 cdev->private->int_class = IRQIO_CIO;
1711 return ret;
1712 }
1713
1714 return 0;
1715}
1716
1717static int ccw_device_remove(struct device *dev)
1718{
1719 struct ccw_device *cdev = to_ccwdev(dev);
1720 struct ccw_driver *cdrv = cdev->drv;
1721 struct subchannel *sch;
1722 int ret;
1723
1724 if (cdrv->remove)
1725 cdrv->remove(cdev);
1726
1727 spin_lock_irq(cdev->ccwlock);
1728 if (cdev->online) {
1729 cdev->online = 0;
1730 ret = ccw_device_offline(cdev);
1731 spin_unlock_irq(cdev->ccwlock);
1732 if (ret == 0)
1733 wait_event(cdev->private->wait_q,
1734 dev_fsm_final_state(cdev));
1735 else
1736 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1737 "device 0.%x.%04x\n",
1738 ret, cdev->private->dev_id.ssid,
1739 cdev->private->dev_id.devno);
1740
1741 put_device(&cdev->dev);
1742 spin_lock_irq(cdev->ccwlock);
1743 }
1744 ccw_device_set_timeout(cdev, 0);
1745 cdev->drv = NULL;
1746 cdev->private->int_class = IRQIO_CIO;
1747 sch = to_subchannel(cdev->dev.parent);
1748 spin_unlock_irq(cdev->ccwlock);
1749 io_subchannel_quiesce(sch);
1750 __disable_cmf(cdev);
1751
1752 return 0;
1753}
1754
1755static void ccw_device_shutdown(struct device *dev)
1756{
1757 struct ccw_device *cdev;
1758
1759 cdev = to_ccwdev(dev);
1760 if (cdev->drv && cdev->drv->shutdown)
1761 cdev->drv->shutdown(cdev);
1762 __disable_cmf(cdev);
1763}
1764
1765static int ccw_device_pm_prepare(struct device *dev)
1766{
1767 struct ccw_device *cdev = to_ccwdev(dev);
1768
1769 if (work_pending(&cdev->private->todo_work))
1770 return -EAGAIN;
1771
1772 if (atomic_read(&cdev->private->onoff))
1773 return -EAGAIN;
1774
1775 if (cdev->online && cdev->drv && cdev->drv->prepare)
1776 return cdev->drv->prepare(cdev);
1777
1778 return 0;
1779}
1780
1781static void ccw_device_pm_complete(struct device *dev)
1782{
1783 struct ccw_device *cdev = to_ccwdev(dev);
1784
1785 if (cdev->online && cdev->drv && cdev->drv->complete)
1786 cdev->drv->complete(cdev);
1787}
1788
1789static int ccw_device_pm_freeze(struct device *dev)
1790{
1791 struct ccw_device *cdev = to_ccwdev(dev);
1792 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1793 int ret, cm_enabled;
1794
1795
1796 if (!dev_fsm_final_state(cdev))
1797 return -EAGAIN;
1798 if (!cdev->online)
1799 return 0;
1800 if (cdev->drv && cdev->drv->freeze) {
1801 ret = cdev->drv->freeze(cdev);
1802 if (ret)
1803 return ret;
1804 }
1805
1806 spin_lock_irq(sch->lock);
1807 cm_enabled = cdev->private->cmb != NULL;
1808 spin_unlock_irq(sch->lock);
1809 if (cm_enabled) {
1810
1811 ret = ccw_set_cmf(cdev, 0);
1812 if (ret)
1813 return ret;
1814 }
1815
1816 spin_lock_irq(sch->lock);
1817 ret = cio_disable_subchannel(sch);
1818 spin_unlock_irq(sch->lock);
1819
1820 return ret;
1821}
1822
1823static int ccw_device_pm_thaw(struct device *dev)
1824{
1825 struct ccw_device *cdev = to_ccwdev(dev);
1826 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1827 int ret, cm_enabled;
1828
1829 if (!cdev->online)
1830 return 0;
1831
1832 spin_lock_irq(sch->lock);
1833
1834 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1835 cm_enabled = cdev->private->cmb != NULL;
1836 spin_unlock_irq(sch->lock);
1837 if (ret)
1838 return ret;
1839
1840 if (cm_enabled) {
1841 ret = ccw_set_cmf(cdev, 1);
1842 if (ret)
1843 return ret;
1844 }
1845
1846 if (cdev->drv && cdev->drv->thaw)
1847 ret = cdev->drv->thaw(cdev);
1848
1849 return ret;
1850}
1851
1852static void __ccw_device_pm_restore(struct ccw_device *cdev)
1853{
1854 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1855
1856 spin_lock_irq(sch->lock);
1857 if (cio_is_console(sch->schid)) {
1858 cio_enable_subchannel(sch, (u32)(addr_t)sch);
1859 goto out_unlock;
1860 }
1861
1862
1863
1864
1865 cdev->private->flags.resuming = 1;
1866 cdev->private->path_new_mask = LPM_ANYPATH;
1867 css_sched_sch_todo(sch, SCH_TODO_EVAL);
1868 spin_unlock_irq(sch->lock);
1869 css_wait_for_slow_path();
1870
1871
1872 sch = to_subchannel(cdev->dev.parent);
1873 spin_lock_irq(sch->lock);
1874 if (cdev->private->state != DEV_STATE_ONLINE &&
1875 cdev->private->state != DEV_STATE_OFFLINE)
1876 goto out_unlock;
1877
1878 ccw_device_recognition(cdev);
1879 spin_unlock_irq(sch->lock);
1880 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1881 cdev->private->state == DEV_STATE_DISCONNECTED);
1882 spin_lock_irq(sch->lock);
1883
1884out_unlock:
1885 cdev->private->flags.resuming = 0;
1886 spin_unlock_irq(sch->lock);
1887}
1888
1889static int resume_handle_boxed(struct ccw_device *cdev)
1890{
1891 cdev->private->state = DEV_STATE_BOXED;
1892 if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
1893 return 0;
1894 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1895 return -ENODEV;
1896}
1897
1898static int resume_handle_disc(struct ccw_device *cdev)
1899{
1900 cdev->private->state = DEV_STATE_DISCONNECTED;
1901 if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
1902 return 0;
1903 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1904 return -ENODEV;
1905}
1906
1907static int ccw_device_pm_restore(struct device *dev)
1908{
1909 struct ccw_device *cdev = to_ccwdev(dev);
1910 struct subchannel *sch;
1911 int ret = 0;
1912
1913 __ccw_device_pm_restore(cdev);
1914 sch = to_subchannel(cdev->dev.parent);
1915 spin_lock_irq(sch->lock);
1916 if (cio_is_console(sch->schid))
1917 goto out_restore;
1918
1919
1920 switch (cdev->private->state) {
1921 case DEV_STATE_OFFLINE:
1922 case DEV_STATE_ONLINE:
1923 cdev->private->flags.donotify = 0;
1924 break;
1925 case DEV_STATE_BOXED:
1926 ret = resume_handle_boxed(cdev);
1927 if (ret)
1928 goto out_unlock;
1929 goto out_restore;
1930 default:
1931 ret = resume_handle_disc(cdev);
1932 if (ret)
1933 goto out_unlock;
1934 goto out_restore;
1935 }
1936
1937 if (!ccw_device_test_sense_data(cdev)) {
1938 ccw_device_update_sense_data(cdev);
1939 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
1940 ret = -ENODEV;
1941 goto out_unlock;
1942 }
1943 if (!cdev->online)
1944 goto out_unlock;
1945
1946 if (ccw_device_online(cdev)) {
1947 ret = resume_handle_disc(cdev);
1948 if (ret)
1949 goto out_unlock;
1950 goto out_restore;
1951 }
1952 spin_unlock_irq(sch->lock);
1953 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1954 spin_lock_irq(sch->lock);
1955
1956 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
1957 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1958 ret = -ENODEV;
1959 goto out_unlock;
1960 }
1961
1962
1963 if (cdev->private->cmb) {
1964 spin_unlock_irq(sch->lock);
1965 ret = ccw_set_cmf(cdev, 1);
1966 spin_lock_irq(sch->lock);
1967 if (ret) {
1968 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
1969 "(rc=%d)\n", cdev->private->dev_id.ssid,
1970 cdev->private->dev_id.devno, ret);
1971 ret = 0;
1972 }
1973 }
1974
1975out_restore:
1976 spin_unlock_irq(sch->lock);
1977 if (cdev->online && cdev->drv && cdev->drv->restore)
1978 ret = cdev->drv->restore(cdev);
1979 return ret;
1980
1981out_unlock:
1982 spin_unlock_irq(sch->lock);
1983 return ret;
1984}
1985
1986static const struct dev_pm_ops ccw_pm_ops = {
1987 .prepare = ccw_device_pm_prepare,
1988 .complete = ccw_device_pm_complete,
1989 .freeze = ccw_device_pm_freeze,
1990 .thaw = ccw_device_pm_thaw,
1991 .restore = ccw_device_pm_restore,
1992};
1993
1994static struct bus_type ccw_bus_type = {
1995 .name = "ccw",
1996 .match = ccw_bus_match,
1997 .uevent = ccw_uevent,
1998 .probe = ccw_device_probe,
1999 .remove = ccw_device_remove,
2000 .shutdown = ccw_device_shutdown,
2001 .pm = &ccw_pm_ops,
2002};
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012int ccw_driver_register(struct ccw_driver *cdriver)
2013{
2014 struct device_driver *drv = &cdriver->driver;
2015
2016 drv->bus = &ccw_bus_type;
2017
2018 return driver_register(drv);
2019}
2020
2021
2022
2023
2024
2025
2026
2027void ccw_driver_unregister(struct ccw_driver *cdriver)
2028{
2029 driver_unregister(&cdriver->driver);
2030}
2031
2032static void ccw_device_todo(struct work_struct *work)
2033{
2034 struct ccw_device_private *priv;
2035 struct ccw_device *cdev;
2036 struct subchannel *sch;
2037 enum cdev_todo todo;
2038
2039 priv = container_of(work, struct ccw_device_private, todo_work);
2040 cdev = priv->cdev;
2041 sch = to_subchannel(cdev->dev.parent);
2042
2043 spin_lock_irq(cdev->ccwlock);
2044 todo = priv->todo;
2045 priv->todo = CDEV_TODO_NOTHING;
2046 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
2047 priv->dev_id.ssid, priv->dev_id.devno, todo);
2048 spin_unlock_irq(cdev->ccwlock);
2049
2050 switch (todo) {
2051 case CDEV_TODO_ENABLE_CMF:
2052 cmf_reenable(cdev);
2053 break;
2054 case CDEV_TODO_REBIND:
2055 ccw_device_do_unbind_bind(cdev);
2056 break;
2057 case CDEV_TODO_REGISTER:
2058 io_subchannel_register(cdev);
2059 break;
2060 case CDEV_TODO_UNREG_EVAL:
2061 if (!sch_is_pseudo_sch(sch))
2062 css_schedule_eval(sch->schid);
2063
2064 case CDEV_TODO_UNREG:
2065 if (sch_is_pseudo_sch(sch))
2066 ccw_device_unregister(cdev);
2067 else
2068 ccw_device_call_sch_unregister(cdev);
2069 break;
2070 default:
2071 break;
2072 }
2073
2074 put_device(&cdev->dev);
2075}
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2087{
2088 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2089 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2090 todo);
2091 if (cdev->private->todo >= todo)
2092 return;
2093 cdev->private->todo = todo;
2094
2095 if (!get_device(&cdev->dev))
2096 return;
2097 if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
2098
2099 put_device(&cdev->dev);
2100 }
2101}
2102
2103
2104
2105
2106
2107
2108
2109
2110int ccw_device_siosl(struct ccw_device *cdev)
2111{
2112 struct subchannel *sch = to_subchannel(cdev->dev.parent);
2113
2114 return chsc_siosl(sch->schid);
2115}
2116EXPORT_SYMBOL_GPL(ccw_device_siosl);
2117
2118EXPORT_SYMBOL(ccw_device_set_online);
2119EXPORT_SYMBOL(ccw_device_set_offline);
2120EXPORT_SYMBOL(ccw_driver_register);
2121EXPORT_SYMBOL(ccw_driver_unregister);
2122EXPORT_SYMBOL(get_ccwdev_by_busid);
2123