1
2
3
4
5
6
7
8
9
10
11#define KMSG_COMPONENT "cio"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/spinlock.h>
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/slab.h>
20#include <linux/list.h>
21#include <linux/device.h>
22#include <linux/workqueue.h>
23#include <linux/delay.h>
24#include <linux/timer.h>
25#include <linux/kernel_stat.h>
26#include <linux/sched/signal.h>
27#include <linux/dma-mapping.h>
28
29#include <asm/ccwdev.h>
30#include <asm/cio.h>
31#include <asm/param.h>
32#include <asm/cmb.h>
33#include <asm/isc.h>
34
35#include "chp.h"
36#include "cio.h"
37#include "cio_debug.h"
38#include "css.h"
39#include "device.h"
40#include "ioasm.h"
41#include "io_sch.h"
42#include "blacklist.h"
43#include "chsc.h"
44
45static struct timer_list recovery_timer;
46static DEFINE_SPINLOCK(recovery_lock);
47static int recovery_phase;
48static const unsigned long recovery_delay[] = { 3, 30, 300 };
49
50static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
51static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
52static struct bus_type ccw_bus_type;
53
54
55
56
57
58
59
60static int
61ccw_bus_match (struct device * dev, struct device_driver * drv)
62{
63 struct ccw_device *cdev = to_ccwdev(dev);
64 struct ccw_driver *cdrv = to_ccwdrv(drv);
65 const struct ccw_device_id *ids = cdrv->ids, *found;
66
67 if (!ids)
68 return 0;
69
70 found = ccw_device_id_match(ids, &cdev->id);
71 if (!found)
72 return 0;
73
74 cdev->id.driver_info = found->driver_info;
75
76 return 1;
77}
78
79
80
81
82static int snprint_alias(char *buf, size_t size,
83 struct ccw_device_id *id, const char *suffix)
84{
85 int len;
86
87 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
88 if (len > size)
89 return len;
90 buf += len;
91 size -= len;
92
93 if (id->dev_type != 0)
94 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
95 id->dev_model, suffix);
96 else
97 len += snprintf(buf, size, "dtdm%s", suffix);
98
99 return len;
100}
101
102
103
104static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
105{
106 struct ccw_device *cdev = to_ccwdev(dev);
107 struct ccw_device_id *id = &(cdev->id);
108 int ret;
109 char modalias_buf[30];
110
111
112 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
113 if (ret)
114 return ret;
115
116
117 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
118 if (ret)
119 return ret;
120
121
122
123 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
124 if (ret)
125 return ret;
126
127
128 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
129 if (ret)
130 return ret;
131
132
133 snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
134 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
135 return ret;
136}
137
138static void io_subchannel_irq(struct subchannel *);
139static int io_subchannel_probe(struct subchannel *);
140static int io_subchannel_remove(struct subchannel *);
141static void io_subchannel_shutdown(struct subchannel *);
142static int io_subchannel_sch_event(struct subchannel *, int);
143static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
144 int);
145static void recovery_func(struct timer_list *unused);
146
147static struct css_device_id io_subchannel_ids[] = {
148 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
149 { },
150};
151
152static int io_subchannel_settle(void)
153{
154 int ret;
155
156 ret = wait_event_interruptible(ccw_device_init_wq,
157 atomic_read(&ccw_device_init_count) == 0);
158 if (ret)
159 return -EINTR;
160 flush_workqueue(cio_work_q);
161 return 0;
162}
163
164static struct css_driver io_subchannel_driver = {
165 .drv = {
166 .owner = THIS_MODULE,
167 .name = "io_subchannel",
168 },
169 .subchannel_type = io_subchannel_ids,
170 .irq = io_subchannel_irq,
171 .sch_event = io_subchannel_sch_event,
172 .chp_event = io_subchannel_chp_event,
173 .probe = io_subchannel_probe,
174 .remove = io_subchannel_remove,
175 .shutdown = io_subchannel_shutdown,
176 .settle = io_subchannel_settle,
177};
178
179int __init io_subchannel_init(void)
180{
181 int ret;
182
183 timer_setup(&recovery_timer, recovery_func, 0);
184 ret = bus_register(&ccw_bus_type);
185 if (ret)
186 return ret;
187 ret = css_driver_register(&io_subchannel_driver);
188 if (ret)
189 bus_unregister(&ccw_bus_type);
190
191 return ret;
192}
193
194
195
196
197static ssize_t
198devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
199{
200 struct ccw_device *cdev = to_ccwdev(dev);
201 struct ccw_device_id *id = &(cdev->id);
202
203 if (id->dev_type != 0)
204 return sprintf(buf, "%04x/%02x\n",
205 id->dev_type, id->dev_model);
206 else
207 return sprintf(buf, "n/a\n");
208}
209
210static ssize_t
211cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
212{
213 struct ccw_device *cdev = to_ccwdev(dev);
214 struct ccw_device_id *id = &(cdev->id);
215
216 return sprintf(buf, "%04x/%02x\n",
217 id->cu_type, id->cu_model);
218}
219
220static ssize_t
221modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
222{
223 struct ccw_device *cdev = to_ccwdev(dev);
224 struct ccw_device_id *id = &(cdev->id);
225 int len;
226
227 len = snprint_alias(buf, PAGE_SIZE, id, "\n");
228
229 return len > PAGE_SIZE ? PAGE_SIZE : len;
230}
231
232static ssize_t
233online_show (struct device *dev, struct device_attribute *attr, char *buf)
234{
235 struct ccw_device *cdev = to_ccwdev(dev);
236
237 return sprintf(buf, cdev->online ? "1\n" : "0\n");
238}
239
240int ccw_device_is_orphan(struct ccw_device *cdev)
241{
242 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
243}
244
245static void ccw_device_unregister(struct ccw_device *cdev)
246{
247 if (device_is_registered(&cdev->dev)) {
248
249 device_del(&cdev->dev);
250 }
251 if (cdev->private->flags.initialized) {
252 cdev->private->flags.initialized = 0;
253
254 put_device(&cdev->dev);
255 }
256}
257
258static void io_subchannel_quiesce(struct subchannel *);
259
260
261
262
263
264
265
266
267
268
269
270
271int ccw_device_set_offline(struct ccw_device *cdev)
272{
273 struct subchannel *sch;
274 int ret, state;
275
276 if (!cdev)
277 return -ENODEV;
278 if (!cdev->online || !cdev->drv)
279 return -EINVAL;
280
281 if (cdev->drv->set_offline) {
282 ret = cdev->drv->set_offline(cdev);
283 if (ret != 0)
284 return ret;
285 }
286 spin_lock_irq(cdev->ccwlock);
287 sch = to_subchannel(cdev->dev.parent);
288 cdev->online = 0;
289
290 while (!dev_fsm_final_state(cdev) &&
291 cdev->private->state != DEV_STATE_DISCONNECTED) {
292 spin_unlock_irq(cdev->ccwlock);
293 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
294 cdev->private->state == DEV_STATE_DISCONNECTED));
295 spin_lock_irq(cdev->ccwlock);
296 }
297 do {
298 ret = ccw_device_offline(cdev);
299 if (!ret)
300 break;
301 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
302 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
303 cdev->private->dev_id.devno);
304 if (ret != -EBUSY)
305 goto error;
306 state = cdev->private->state;
307 spin_unlock_irq(cdev->ccwlock);
308 io_subchannel_quiesce(sch);
309 spin_lock_irq(cdev->ccwlock);
310 cdev->private->state = state;
311 } while (ret == -EBUSY);
312 spin_unlock_irq(cdev->ccwlock);
313 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
314 cdev->private->state == DEV_STATE_DISCONNECTED));
315
316 if (cdev->private->state == DEV_STATE_BOXED) {
317 pr_warn("%s: The device entered boxed state while being set offline\n",
318 dev_name(&cdev->dev));
319 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
320 pr_warn("%s: The device stopped operating while being set offline\n",
321 dev_name(&cdev->dev));
322 }
323
324 put_device(&cdev->dev);
325 return 0;
326
327error:
328 cdev->private->state = DEV_STATE_OFFLINE;
329 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
330 spin_unlock_irq(cdev->ccwlock);
331
332 put_device(&cdev->dev);
333 return -ENODEV;
334}
335
336
337
338
339
340
341
342
343
344
345
346
347
348int ccw_device_set_online(struct ccw_device *cdev)
349{
350 int ret;
351 int ret2;
352
353 if (!cdev)
354 return -ENODEV;
355 if (cdev->online || !cdev->drv)
356 return -EINVAL;
357
358 if (!get_device(&cdev->dev))
359 return -ENODEV;
360
361 spin_lock_irq(cdev->ccwlock);
362 ret = ccw_device_online(cdev);
363 spin_unlock_irq(cdev->ccwlock);
364 if (ret == 0)
365 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
366 else {
367 CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
368 "device 0.%x.%04x\n",
369 ret, cdev->private->dev_id.ssid,
370 cdev->private->dev_id.devno);
371
372 put_device(&cdev->dev);
373 return ret;
374 }
375 spin_lock_irq(cdev->ccwlock);
376
377 if ((cdev->private->state != DEV_STATE_ONLINE) &&
378 (cdev->private->state != DEV_STATE_W4SENSE)) {
379 spin_unlock_irq(cdev->ccwlock);
380
381 if (cdev->private->state == DEV_STATE_BOXED) {
382 pr_warn("%s: Setting the device online failed because it is boxed\n",
383 dev_name(&cdev->dev));
384 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
385 pr_warn("%s: Setting the device online failed because it is not operational\n",
386 dev_name(&cdev->dev));
387 }
388
389 put_device(&cdev->dev);
390 return -ENODEV;
391 }
392 spin_unlock_irq(cdev->ccwlock);
393 if (cdev->drv->set_online)
394 ret = cdev->drv->set_online(cdev);
395 if (ret)
396 goto rollback;
397
398 spin_lock_irq(cdev->ccwlock);
399 cdev->online = 1;
400 spin_unlock_irq(cdev->ccwlock);
401 return 0;
402
403rollback:
404 spin_lock_irq(cdev->ccwlock);
405
406 while (!dev_fsm_final_state(cdev) &&
407 cdev->private->state != DEV_STATE_DISCONNECTED) {
408 spin_unlock_irq(cdev->ccwlock);
409 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
410 cdev->private->state == DEV_STATE_DISCONNECTED));
411 spin_lock_irq(cdev->ccwlock);
412 }
413 ret2 = ccw_device_offline(cdev);
414 if (ret2)
415 goto error;
416 spin_unlock_irq(cdev->ccwlock);
417 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
418 cdev->private->state == DEV_STATE_DISCONNECTED));
419
420 put_device(&cdev->dev);
421 return ret;
422
423error:
424 CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
425 "device 0.%x.%04x\n",
426 ret2, cdev->private->dev_id.ssid,
427 cdev->private->dev_id.devno);
428 cdev->private->state = DEV_STATE_OFFLINE;
429 spin_unlock_irq(cdev->ccwlock);
430
431 put_device(&cdev->dev);
432 return ret;
433}
434
435static int online_store_handle_offline(struct ccw_device *cdev)
436{
437 if (cdev->private->state == DEV_STATE_DISCONNECTED) {
438 spin_lock_irq(cdev->ccwlock);
439 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
440 spin_unlock_irq(cdev->ccwlock);
441 return 0;
442 }
443 if (cdev->drv && cdev->drv->set_offline)
444 return ccw_device_set_offline(cdev);
445 return -EINVAL;
446}
447
448static int online_store_recog_and_online(struct ccw_device *cdev)
449{
450
451 if (cdev->private->state == DEV_STATE_BOXED) {
452 spin_lock_irq(cdev->ccwlock);
453 ccw_device_recognition(cdev);
454 spin_unlock_irq(cdev->ccwlock);
455 wait_event(cdev->private->wait_q,
456 cdev->private->flags.recog_done);
457 if (cdev->private->state != DEV_STATE_OFFLINE)
458
459 return -EAGAIN;
460 }
461 if (cdev->drv && cdev->drv->set_online)
462 return ccw_device_set_online(cdev);
463 return -EINVAL;
464}
465
466static int online_store_handle_online(struct ccw_device *cdev, int force)
467{
468 int ret;
469
470 ret = online_store_recog_and_online(cdev);
471 if (ret && !force)
472 return ret;
473 if (force && cdev->private->state == DEV_STATE_BOXED) {
474 ret = ccw_device_stlck(cdev);
475 if (ret)
476 return ret;
477 if (cdev->id.cu_type == 0)
478 cdev->private->state = DEV_STATE_NOT_OPER;
479 ret = online_store_recog_and_online(cdev);
480 if (ret)
481 return ret;
482 }
483 return 0;
484}
485
486static ssize_t online_store (struct device *dev, struct device_attribute *attr,
487 const char *buf, size_t count)
488{
489 struct ccw_device *cdev = to_ccwdev(dev);
490 int force, ret;
491 unsigned long i;
492
493
494 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
495 return -EAGAIN;
496
497 if (!dev_fsm_final_state(cdev) &&
498 cdev->private->state != DEV_STATE_DISCONNECTED) {
499 ret = -EAGAIN;
500 goto out;
501 }
502
503 if (work_pending(&cdev->private->todo_work)) {
504 ret = -EAGAIN;
505 goto out;
506 }
507 if (!strncmp(buf, "force\n", count)) {
508 force = 1;
509 i = 1;
510 ret = 0;
511 } else {
512 force = 0;
513 ret = kstrtoul(buf, 16, &i);
514 }
515 if (ret)
516 goto out;
517
518 device_lock(dev);
519 switch (i) {
520 case 0:
521 ret = online_store_handle_offline(cdev);
522 break;
523 case 1:
524 ret = online_store_handle_online(cdev, force);
525 break;
526 default:
527 ret = -EINVAL;
528 }
529 device_unlock(dev);
530
531out:
532 atomic_set(&cdev->private->onoff, 0);
533 return (ret < 0) ? ret : count;
534}
535
536static ssize_t
537available_show (struct device *dev, struct device_attribute *attr, char *buf)
538{
539 struct ccw_device *cdev = to_ccwdev(dev);
540 struct subchannel *sch;
541
542 if (ccw_device_is_orphan(cdev))
543 return sprintf(buf, "no device\n");
544 switch (cdev->private->state) {
545 case DEV_STATE_BOXED:
546 return sprintf(buf, "boxed\n");
547 case DEV_STATE_DISCONNECTED:
548 case DEV_STATE_DISCONNECTED_SENSE_ID:
549 case DEV_STATE_NOT_OPER:
550 sch = to_subchannel(dev->parent);
551 if (!sch->lpm)
552 return sprintf(buf, "no path\n");
553 else
554 return sprintf(buf, "no device\n");
555 default:
556
557 return sprintf(buf, "good\n");
558 }
559}
560
561static ssize_t
562initiate_logging(struct device *dev, struct device_attribute *attr,
563 const char *buf, size_t count)
564{
565 struct subchannel *sch = to_subchannel(dev);
566 int rc;
567
568 rc = chsc_siosl(sch->schid);
569 if (rc < 0) {
570 pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
571 sch->schid.ssid, sch->schid.sch_no, rc);
572 return rc;
573 }
574 pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
575 sch->schid.ssid, sch->schid.sch_no);
576 return count;
577}
578
579static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
580 char *buf)
581{
582 struct subchannel *sch = to_subchannel(dev);
583
584 return sprintf(buf, "%02x\n", sch->vpm);
585}
586
587static DEVICE_ATTR_RO(devtype);
588static DEVICE_ATTR_RO(cutype);
589static DEVICE_ATTR_RO(modalias);
590static DEVICE_ATTR_RW(online);
591static DEVICE_ATTR(availability, 0444, available_show, NULL);
592static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
593static DEVICE_ATTR_RO(vpm);
594
595static struct attribute *io_subchannel_attrs[] = {
596 &dev_attr_logging.attr,
597 &dev_attr_vpm.attr,
598 NULL,
599};
600
601static const struct attribute_group io_subchannel_attr_group = {
602 .attrs = io_subchannel_attrs,
603};
604
605static struct attribute * ccwdev_attrs[] = {
606 &dev_attr_devtype.attr,
607 &dev_attr_cutype.attr,
608 &dev_attr_modalias.attr,
609 &dev_attr_online.attr,
610 &dev_attr_cmb_enable.attr,
611 &dev_attr_availability.attr,
612 NULL,
613};
614
615static const struct attribute_group ccwdev_attr_group = {
616 .attrs = ccwdev_attrs,
617};
618
619static const struct attribute_group *ccwdev_attr_groups[] = {
620 &ccwdev_attr_group,
621 NULL,
622};
623
624static int match_dev_id(struct device *dev, const void *data)
625{
626 struct ccw_device *cdev = to_ccwdev(dev);
627 struct ccw_dev_id *dev_id = (void *)data;
628
629 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
630}
631
632
633
634
635
636
637
638
639
640
641
642struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
643{
644 struct device *dev;
645
646 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
647
648 return dev ? to_ccwdev(dev) : NULL;
649}
650EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
651
652static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
653{
654 int ret;
655
656 if (device_is_registered(&cdev->dev)) {
657 device_release_driver(&cdev->dev);
658 ret = device_attach(&cdev->dev);
659 WARN_ON(ret == -ENODEV);
660 }
661}
662
663static void
664ccw_device_release(struct device *dev)
665{
666 struct ccw_device *cdev;
667
668 cdev = to_ccwdev(dev);
669 cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
670 sizeof(*cdev->private->dma_area));
671 cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
672
673 put_device(cdev->dev.parent);
674 kfree(cdev->private);
675 kfree(cdev);
676}
677
678static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
679{
680 struct ccw_device *cdev;
681 struct gen_pool *dma_pool;
682 int ret;
683
684 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
685 if (!cdev) {
686 ret = -ENOMEM;
687 goto err_cdev;
688 }
689 cdev->private = kzalloc(sizeof(struct ccw_device_private),
690 GFP_KERNEL | GFP_DMA);
691 if (!cdev->private) {
692 ret = -ENOMEM;
693 goto err_priv;
694 }
695
696 cdev->dev.dma_mask = sch->dev.dma_mask;
697 ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask);
698 if (ret)
699 goto err_coherent_mask;
700
701 dma_pool = cio_gp_dma_create(&cdev->dev, 1);
702 if (!dma_pool) {
703 ret = -ENOMEM;
704 goto err_dma_pool;
705 }
706 cdev->private->dma_pool = dma_pool;
707 cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
708 sizeof(*cdev->private->dma_area));
709 if (!cdev->private->dma_area) {
710 ret = -ENOMEM;
711 goto err_dma_area;
712 }
713 return cdev;
714err_dma_area:
715 cio_gp_dma_destroy(dma_pool, &cdev->dev);
716err_dma_pool:
717err_coherent_mask:
718 kfree(cdev->private);
719err_priv:
720 kfree(cdev);
721err_cdev:
722 return ERR_PTR(ret);
723}
724
725static void ccw_device_todo(struct work_struct *work);
726
727static int io_subchannel_initialize_dev(struct subchannel *sch,
728 struct ccw_device *cdev)
729{
730 struct ccw_device_private *priv = cdev->private;
731 int ret;
732
733 priv->cdev = cdev;
734 priv->int_class = IRQIO_CIO;
735 priv->state = DEV_STATE_NOT_OPER;
736 priv->dev_id.devno = sch->schib.pmcw.dev;
737 priv->dev_id.ssid = sch->schid.ssid;
738
739 INIT_WORK(&priv->todo_work, ccw_device_todo);
740 INIT_LIST_HEAD(&priv->cmb_list);
741 init_waitqueue_head(&priv->wait_q);
742 timer_setup(&priv->timer, ccw_device_timeout, 0);
743
744 atomic_set(&priv->onoff, 0);
745 cdev->ccwlock = sch->lock;
746 cdev->dev.parent = &sch->dev;
747 cdev->dev.release = ccw_device_release;
748 cdev->dev.bus = &ccw_bus_type;
749 cdev->dev.groups = ccwdev_attr_groups;
750
751 device_initialize(&cdev->dev);
752 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
753 cdev->private->dev_id.devno);
754 if (ret)
755 goto out_put;
756 if (!get_device(&sch->dev)) {
757 ret = -ENODEV;
758 goto out_put;
759 }
760 priv->flags.initialized = 1;
761 spin_lock_irq(sch->lock);
762 sch_set_cdev(sch, cdev);
763 spin_unlock_irq(sch->lock);
764 return 0;
765
766out_put:
767
768 put_device(&cdev->dev);
769 return ret;
770}
771
772static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
773{
774 struct ccw_device *cdev;
775 int ret;
776
777 cdev = io_subchannel_allocate_dev(sch);
778 if (!IS_ERR(cdev)) {
779 ret = io_subchannel_initialize_dev(sch, cdev);
780 if (ret)
781 cdev = ERR_PTR(ret);
782 }
783 return cdev;
784}
785
786static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
787
788static void sch_create_and_recog_new_device(struct subchannel *sch)
789{
790 struct ccw_device *cdev;
791
792
793 cdev = io_subchannel_create_ccwdev(sch);
794 if (IS_ERR(cdev)) {
795
796 css_sch_device_unregister(sch);
797 return;
798 }
799
800 io_subchannel_recog(cdev, sch);
801}
802
803
804
805
806static void io_subchannel_register(struct ccw_device *cdev)
807{
808 struct subchannel *sch;
809 int ret, adjust_init_count = 1;
810 unsigned long flags;
811
812 sch = to_subchannel(cdev->dev.parent);
813
814
815
816
817
818
819 if (!device_is_registered(&sch->dev))
820 goto out_err;
821 css_update_ssd_info(sch);
822
823
824
825
826
827
828 if (device_is_registered(&cdev->dev)) {
829 if (!cdev->drv) {
830 ret = device_reprobe(&cdev->dev);
831 if (ret)
832
833 CIO_MSG_EVENT(0, "device_reprobe() returned"
834 " %d for 0.%x.%04x\n", ret,
835 cdev->private->dev_id.ssid,
836 cdev->private->dev_id.devno);
837 }
838 adjust_init_count = 0;
839 goto out;
840 }
841
842
843
844
845 if (dev_get_uevent_suppress(&sch->dev)) {
846 dev_set_uevent_suppress(&sch->dev, 0);
847 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
848 }
849
850 ret = device_add(&cdev->dev);
851 if (ret) {
852 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
853 cdev->private->dev_id.ssid,
854 cdev->private->dev_id.devno, ret);
855 spin_lock_irqsave(sch->lock, flags);
856 sch_set_cdev(sch, NULL);
857 spin_unlock_irqrestore(sch->lock, flags);
858
859 put_device(&cdev->dev);
860 goto out_err;
861 }
862out:
863 cdev->private->flags.recog_done = 1;
864 wake_up(&cdev->private->wait_q);
865out_err:
866 if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
867 wake_up(&ccw_device_init_wq);
868}
869
870static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
871{
872 struct subchannel *sch;
873
874
875 if (!get_device(cdev->dev.parent))
876 return;
877 sch = to_subchannel(cdev->dev.parent);
878 css_sch_device_unregister(sch);
879
880 put_device(&sch->dev);
881}
882
883
884
885
886void
887io_subchannel_recog_done(struct ccw_device *cdev)
888{
889 if (css_init_done == 0) {
890 cdev->private->flags.recog_done = 1;
891 return;
892 }
893 switch (cdev->private->state) {
894 case DEV_STATE_BOXED:
895
896 case DEV_STATE_NOT_OPER:
897 cdev->private->flags.recog_done = 1;
898
899 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
900 if (atomic_dec_and_test(&ccw_device_init_count))
901 wake_up(&ccw_device_init_wq);
902 break;
903 case DEV_STATE_OFFLINE:
904
905
906
907
908 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
909 break;
910 }
911}
912
913static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
914{
915
916 atomic_inc(&ccw_device_init_count);
917
918
919 spin_lock_irq(sch->lock);
920 ccw_device_recognition(cdev);
921 spin_unlock_irq(sch->lock);
922}
923
924static int ccw_device_move_to_sch(struct ccw_device *cdev,
925 struct subchannel *sch)
926{
927 struct subchannel *old_sch;
928 int rc, old_enabled = 0;
929
930 old_sch = to_subchannel(cdev->dev.parent);
931
932 if (!get_device(&sch->dev))
933 return -ENODEV;
934
935 if (!sch_is_pseudo_sch(old_sch)) {
936 spin_lock_irq(old_sch->lock);
937 old_enabled = old_sch->schib.pmcw.ena;
938 rc = 0;
939 if (old_enabled)
940 rc = cio_disable_subchannel(old_sch);
941 spin_unlock_irq(old_sch->lock);
942 if (rc == -EBUSY) {
943
944 put_device(&sch->dev);
945 return rc;
946 }
947 }
948
949 mutex_lock(&sch->reg_mutex);
950 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
951 mutex_unlock(&sch->reg_mutex);
952 if (rc) {
953 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
954 cdev->private->dev_id.ssid,
955 cdev->private->dev_id.devno, sch->schid.ssid,
956 sch->schib.pmcw.dev, rc);
957 if (old_enabled) {
958
959 spin_lock_irq(old_sch->lock);
960 cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
961 spin_unlock_irq(old_sch->lock);
962 }
963
964 put_device(&sch->dev);
965 return rc;
966 }
967
968 if (!sch_is_pseudo_sch(old_sch)) {
969 spin_lock_irq(old_sch->lock);
970 sch_set_cdev(old_sch, NULL);
971 spin_unlock_irq(old_sch->lock);
972 css_schedule_eval(old_sch->schid);
973 }
974
975 put_device(&old_sch->dev);
976
977 spin_lock_irq(sch->lock);
978 cdev->ccwlock = sch->lock;
979 if (!sch_is_pseudo_sch(sch))
980 sch_set_cdev(sch, cdev);
981 spin_unlock_irq(sch->lock);
982 if (!sch_is_pseudo_sch(sch))
983 css_update_ssd_info(sch);
984 return 0;
985}
986
987static int ccw_device_move_to_orph(struct ccw_device *cdev)
988{
989 struct subchannel *sch = to_subchannel(cdev->dev.parent);
990 struct channel_subsystem *css = to_css(sch->dev.parent);
991
992 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
993}
994
995static void io_subchannel_irq(struct subchannel *sch)
996{
997 struct ccw_device *cdev;
998
999 cdev = sch_get_cdev(sch);
1000
1001 CIO_TRACE_EVENT(6, "IRQ");
1002 CIO_TRACE_EVENT(6, dev_name(&sch->dev));
1003 if (cdev)
1004 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1005 else
1006 inc_irq_stat(IRQIO_CIO);
1007}
1008
1009void io_subchannel_init_config(struct subchannel *sch)
1010{
1011 memset(&sch->config, 0, sizeof(sch->config));
1012 sch->config.csense = 1;
1013}
1014
1015static void io_subchannel_init_fields(struct subchannel *sch)
1016{
1017 if (cio_is_console(sch->schid))
1018 sch->opm = 0xff;
1019 else
1020 sch->opm = chp_get_sch_opm(sch);
1021 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1022 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1023
1024 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1025 " - PIM = %02X, PAM = %02X, POM = %02X\n",
1026 sch->schib.pmcw.dev, sch->schid.ssid,
1027 sch->schid.sch_no, sch->schib.pmcw.pim,
1028 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1029
1030 io_subchannel_init_config(sch);
1031}
1032
1033
1034
1035
1036
1037static int io_subchannel_probe(struct subchannel *sch)
1038{
1039 struct io_subchannel_private *io_priv;
1040 struct ccw_device *cdev;
1041 int rc;
1042
1043 if (cio_is_console(sch->schid)) {
1044 rc = sysfs_create_group(&sch->dev.kobj,
1045 &io_subchannel_attr_group);
1046 if (rc)
1047 CIO_MSG_EVENT(0, "Failed to create io subchannel "
1048 "attributes for subchannel "
1049 "0.%x.%04x (rc=%d)\n",
1050 sch->schid.ssid, sch->schid.sch_no, rc);
1051
1052
1053
1054
1055
1056 if (dev_get_uevent_suppress(&sch->dev)) {
1057
1058 dev_set_uevent_suppress(&sch->dev, 0);
1059 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1060 }
1061 cdev = sch_get_cdev(sch);
1062 rc = device_add(&cdev->dev);
1063 if (rc) {
1064
1065 put_device(&cdev->dev);
1066 goto out_schedule;
1067 }
1068 if (atomic_dec_and_test(&ccw_device_init_count))
1069 wake_up(&ccw_device_init_wq);
1070 return 0;
1071 }
1072 io_subchannel_init_fields(sch);
1073 rc = cio_commit_config(sch);
1074 if (rc)
1075 goto out_schedule;
1076 rc = sysfs_create_group(&sch->dev.kobj,
1077 &io_subchannel_attr_group);
1078 if (rc)
1079 goto out_schedule;
1080
1081 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1082 if (!io_priv)
1083 goto out_schedule;
1084
1085 io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1086 sizeof(*io_priv->dma_area),
1087 &io_priv->dma_area_dma, GFP_KERNEL);
1088 if (!io_priv->dma_area) {
1089 kfree(io_priv);
1090 goto out_schedule;
1091 }
1092
1093 set_io_private(sch, io_priv);
1094 css_schedule_eval(sch->schid);
1095 return 0;
1096
1097out_schedule:
1098 spin_lock_irq(sch->lock);
1099 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1100 spin_unlock_irq(sch->lock);
1101 return 0;
1102}
1103
1104static int io_subchannel_remove(struct subchannel *sch)
1105{
1106 struct io_subchannel_private *io_priv = to_io_private(sch);
1107 struct ccw_device *cdev;
1108
1109 cdev = sch_get_cdev(sch);
1110 if (!cdev)
1111 goto out_free;
1112
1113 ccw_device_unregister(cdev);
1114 spin_lock_irq(sch->lock);
1115 sch_set_cdev(sch, NULL);
1116 set_io_private(sch, NULL);
1117 spin_unlock_irq(sch->lock);
1118out_free:
1119 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1120 io_priv->dma_area, io_priv->dma_area_dma);
1121 kfree(io_priv);
1122 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1123 return 0;
1124}
1125
1126static void io_subchannel_verify(struct subchannel *sch)
1127{
1128 struct ccw_device *cdev;
1129
1130 cdev = sch_get_cdev(sch);
1131 if (cdev)
1132 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1133}
1134
1135static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1136{
1137 struct ccw_device *cdev;
1138
1139 cdev = sch_get_cdev(sch);
1140 if (!cdev)
1141 return;
1142 if (cio_update_schib(sch))
1143 goto err;
1144
1145 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1146 goto out;
1147 if (cdev->private->state == DEV_STATE_ONLINE) {
1148 ccw_device_kill_io(cdev);
1149 goto out;
1150 }
1151 if (cio_clear(sch))
1152 goto err;
1153out:
1154
1155 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1156 return;
1157
1158err:
1159 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1160}
1161
1162static int io_subchannel_chp_event(struct subchannel *sch,
1163 struct chp_link *link, int event)
1164{
1165 struct ccw_device *cdev = sch_get_cdev(sch);
1166 int mask, chpid, valid_bit;
1167 int path_event[8];
1168
1169 mask = chp_ssd_get_mask(&sch->ssd_info, link);
1170 if (!mask)
1171 return 0;
1172 switch (event) {
1173 case CHP_VARY_OFF:
1174 sch->opm &= ~mask;
1175 sch->lpm &= ~mask;
1176 if (cdev)
1177 cdev->private->path_gone_mask |= mask;
1178 io_subchannel_terminate_path(sch, mask);
1179 break;
1180 case CHP_VARY_ON:
1181 sch->opm |= mask;
1182 sch->lpm |= mask;
1183 if (cdev)
1184 cdev->private->path_new_mask |= mask;
1185 io_subchannel_verify(sch);
1186 break;
1187 case CHP_OFFLINE:
1188 if (cio_update_schib(sch))
1189 return -ENODEV;
1190 if (cdev)
1191 cdev->private->path_gone_mask |= mask;
1192 io_subchannel_terminate_path(sch, mask);
1193 break;
1194 case CHP_ONLINE:
1195 if (cio_update_schib(sch))
1196 return -ENODEV;
1197 sch->lpm |= mask & sch->opm;
1198 if (cdev)
1199 cdev->private->path_new_mask |= mask;
1200 io_subchannel_verify(sch);
1201 break;
1202 case CHP_FCES_EVENT:
1203
1204 for (chpid = 0, valid_bit = 0x80; chpid < 8; chpid++,
1205 valid_bit >>= 1) {
1206 if (mask & valid_bit)
1207 path_event[chpid] = PE_PATH_FCES_EVENT;
1208 else
1209 path_event[chpid] = PE_NONE;
1210 }
1211 if (cdev)
1212 cdev->drv->path_event(cdev, path_event);
1213 break;
1214 }
1215 return 0;
1216}
1217
1218static void io_subchannel_quiesce(struct subchannel *sch)
1219{
1220 struct ccw_device *cdev;
1221 int ret;
1222
1223 spin_lock_irq(sch->lock);
1224 cdev = sch_get_cdev(sch);
1225 if (cio_is_console(sch->schid))
1226 goto out_unlock;
1227 if (!sch->schib.pmcw.ena)
1228 goto out_unlock;
1229 ret = cio_disable_subchannel(sch);
1230 if (ret != -EBUSY)
1231 goto out_unlock;
1232 if (cdev->handler)
1233 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1234 while (ret == -EBUSY) {
1235 cdev->private->state = DEV_STATE_QUIESCE;
1236 cdev->private->iretry = 255;
1237 ret = ccw_device_cancel_halt_clear(cdev);
1238 if (ret == -EBUSY) {
1239 ccw_device_set_timeout(cdev, HZ/10);
1240 spin_unlock_irq(sch->lock);
1241 wait_event(cdev->private->wait_q,
1242 cdev->private->state != DEV_STATE_QUIESCE);
1243 spin_lock_irq(sch->lock);
1244 }
1245 ret = cio_disable_subchannel(sch);
1246 }
1247out_unlock:
1248 spin_unlock_irq(sch->lock);
1249}
1250
1251static void io_subchannel_shutdown(struct subchannel *sch)
1252{
1253 io_subchannel_quiesce(sch);
1254}
1255
1256static int device_is_disconnected(struct ccw_device *cdev)
1257{
1258 if (!cdev)
1259 return 0;
1260 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1261 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1262}
1263
1264static int recovery_check(struct device *dev, void *data)
1265{
1266 struct ccw_device *cdev = to_ccwdev(dev);
1267 struct subchannel *sch;
1268 int *redo = data;
1269
1270 spin_lock_irq(cdev->ccwlock);
1271 switch (cdev->private->state) {
1272 case DEV_STATE_ONLINE:
1273 sch = to_subchannel(cdev->dev.parent);
1274 if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1275 break;
1276 fallthrough;
1277 case DEV_STATE_DISCONNECTED:
1278 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1279 cdev->private->dev_id.ssid,
1280 cdev->private->dev_id.devno);
1281 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1282 *redo = 1;
1283 break;
1284 case DEV_STATE_DISCONNECTED_SENSE_ID:
1285 *redo = 1;
1286 break;
1287 }
1288 spin_unlock_irq(cdev->ccwlock);
1289
1290 return 0;
1291}
1292
1293static void recovery_work_func(struct work_struct *unused)
1294{
1295 int redo = 0;
1296
1297 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1298 if (redo) {
1299 spin_lock_irq(&recovery_lock);
1300 if (!timer_pending(&recovery_timer)) {
1301 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1302 recovery_phase++;
1303 mod_timer(&recovery_timer, jiffies +
1304 recovery_delay[recovery_phase] * HZ);
1305 }
1306 spin_unlock_irq(&recovery_lock);
1307 } else
1308 CIO_MSG_EVENT(3, "recovery: end\n");
1309}
1310
1311static DECLARE_WORK(recovery_work, recovery_work_func);
1312
1313static void recovery_func(struct timer_list *unused)
1314{
1315
1316
1317
1318
1319 schedule_work(&recovery_work);
1320}
1321
1322void ccw_device_schedule_recovery(void)
1323{
1324 unsigned long flags;
1325
1326 CIO_MSG_EVENT(3, "recovery: schedule\n");
1327 spin_lock_irqsave(&recovery_lock, flags);
1328 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1329 recovery_phase = 0;
1330 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1331 }
1332 spin_unlock_irqrestore(&recovery_lock, flags);
1333}
1334
1335static int purge_fn(struct device *dev, void *data)
1336{
1337 struct ccw_device *cdev = to_ccwdev(dev);
1338 struct ccw_dev_id *id = &cdev->private->dev_id;
1339
1340 spin_lock_irq(cdev->ccwlock);
1341 if (is_blacklisted(id->ssid, id->devno) &&
1342 (cdev->private->state == DEV_STATE_OFFLINE) &&
1343 (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1344 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1345 id->devno);
1346 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1347 atomic_set(&cdev->private->onoff, 0);
1348 }
1349 spin_unlock_irq(cdev->ccwlock);
1350
1351 if (signal_pending(current))
1352 return -EINTR;
1353
1354 return 0;
1355}
1356
1357
1358
1359
1360
1361
1362int ccw_purge_blacklisted(void)
1363{
1364 CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1365 bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1366 return 0;
1367}
1368
1369void ccw_device_set_disconnected(struct ccw_device *cdev)
1370{
1371 if (!cdev)
1372 return;
1373 ccw_device_set_timeout(cdev, 0);
1374 cdev->private->flags.fake_irb = 0;
1375 cdev->private->state = DEV_STATE_DISCONNECTED;
1376 if (cdev->online)
1377 ccw_device_schedule_recovery();
1378}
1379
1380void ccw_device_set_notoper(struct ccw_device *cdev)
1381{
1382 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1383
1384 CIO_TRACE_EVENT(2, "notoper");
1385 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1386 ccw_device_set_timeout(cdev, 0);
1387 cio_disable_subchannel(sch);
1388 cdev->private->state = DEV_STATE_NOT_OPER;
1389}
1390
1391enum io_sch_action {
1392 IO_SCH_UNREG,
1393 IO_SCH_ORPH_UNREG,
1394 IO_SCH_ATTACH,
1395 IO_SCH_UNREG_ATTACH,
1396 IO_SCH_ORPH_ATTACH,
1397 IO_SCH_REPROBE,
1398 IO_SCH_VERIFY,
1399 IO_SCH_DISC,
1400 IO_SCH_NOP,
1401};
1402
1403static enum io_sch_action sch_get_action(struct subchannel *sch)
1404{
1405 struct ccw_device *cdev;
1406
1407 cdev = sch_get_cdev(sch);
1408 if (cio_update_schib(sch)) {
1409
1410 if (!cdev)
1411 return IO_SCH_UNREG;
1412 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1413 return IO_SCH_UNREG;
1414 return IO_SCH_ORPH_UNREG;
1415 }
1416
1417 if (!cdev)
1418 return IO_SCH_ATTACH;
1419 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1420 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1421 return IO_SCH_UNREG_ATTACH;
1422 return IO_SCH_ORPH_ATTACH;
1423 }
1424 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1425 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1426 return IO_SCH_UNREG;
1427 return IO_SCH_DISC;
1428 }
1429 if (device_is_disconnected(cdev))
1430 return IO_SCH_REPROBE;
1431 if (cdev->online)
1432 return IO_SCH_VERIFY;
1433 if (cdev->private->state == DEV_STATE_NOT_OPER)
1434 return IO_SCH_UNREG_ATTACH;
1435 return IO_SCH_NOP;
1436}
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448static int io_subchannel_sch_event(struct subchannel *sch, int process)
1449{
1450 unsigned long flags;
1451 struct ccw_device *cdev;
1452 struct ccw_dev_id dev_id;
1453 enum io_sch_action action;
1454 int rc = -EAGAIN;
1455
1456 spin_lock_irqsave(sch->lock, flags);
1457 if (!device_is_registered(&sch->dev))
1458 goto out_unlock;
1459 if (work_pending(&sch->todo_work))
1460 goto out_unlock;
1461 cdev = sch_get_cdev(sch);
1462 if (cdev && work_pending(&cdev->private->todo_work))
1463 goto out_unlock;
1464 action = sch_get_action(sch);
1465 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1466 sch->schid.ssid, sch->schid.sch_no, process,
1467 action);
1468
1469 switch (action) {
1470 case IO_SCH_REPROBE:
1471
1472 ccw_device_trigger_reprobe(cdev);
1473 rc = 0;
1474 goto out_unlock;
1475 case IO_SCH_VERIFY:
1476
1477 io_subchannel_verify(sch);
1478 rc = 0;
1479 goto out_unlock;
1480 case IO_SCH_DISC:
1481 ccw_device_set_disconnected(cdev);
1482 rc = 0;
1483 goto out_unlock;
1484 case IO_SCH_ORPH_UNREG:
1485 case IO_SCH_ORPH_ATTACH:
1486 ccw_device_set_disconnected(cdev);
1487 break;
1488 case IO_SCH_UNREG_ATTACH:
1489 case IO_SCH_UNREG:
1490 if (!cdev)
1491 break;
1492 if (cdev->private->state == DEV_STATE_SENSE_ID) {
1493
1494
1495
1496
1497
1498 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1499 } else
1500 ccw_device_set_notoper(cdev);
1501 break;
1502 case IO_SCH_NOP:
1503 rc = 0;
1504 goto out_unlock;
1505 default:
1506 break;
1507 }
1508 spin_unlock_irqrestore(sch->lock, flags);
1509
1510 if (!process)
1511 goto out;
1512
1513 switch (action) {
1514 case IO_SCH_ORPH_UNREG:
1515 case IO_SCH_ORPH_ATTACH:
1516
1517 rc = ccw_device_move_to_orph(cdev);
1518 if (rc)
1519 goto out;
1520 break;
1521 case IO_SCH_UNREG_ATTACH:
1522 spin_lock_irqsave(sch->lock, flags);
1523 sch_set_cdev(sch, NULL);
1524 spin_unlock_irqrestore(sch->lock, flags);
1525
1526 ccw_device_unregister(cdev);
1527 break;
1528 default:
1529 break;
1530 }
1531
1532 switch (action) {
1533 case IO_SCH_ORPH_UNREG:
1534 case IO_SCH_UNREG:
1535 css_sch_device_unregister(sch);
1536 break;
1537 case IO_SCH_ORPH_ATTACH:
1538 case IO_SCH_UNREG_ATTACH:
1539 case IO_SCH_ATTACH:
1540 dev_id.ssid = sch->schid.ssid;
1541 dev_id.devno = sch->schib.pmcw.dev;
1542 cdev = get_ccwdev_by_dev_id(&dev_id);
1543 if (!cdev) {
1544 sch_create_and_recog_new_device(sch);
1545 break;
1546 }
1547 rc = ccw_device_move_to_sch(cdev, sch);
1548 if (rc) {
1549
1550 put_device(&cdev->dev);
1551 goto out;
1552 }
1553 spin_lock_irqsave(sch->lock, flags);
1554 ccw_device_trigger_reprobe(cdev);
1555 spin_unlock_irqrestore(sch->lock, flags);
1556
1557 put_device(&cdev->dev);
1558 break;
1559 default:
1560 break;
1561 }
1562 return 0;
1563
1564out_unlock:
1565 spin_unlock_irqrestore(sch->lock, flags);
1566out:
1567 return rc;
1568}
1569
1570static void ccw_device_set_int_class(struct ccw_device *cdev)
1571{
1572 struct ccw_driver *cdrv = cdev->drv;
1573
1574
1575
1576 if (cdrv->int_class != 0)
1577 cdev->private->int_class = cdrv->int_class;
1578 else
1579 cdev->private->int_class = IRQIO_CIO;
1580}
1581
1582#ifdef CONFIG_CCW_CONSOLE
1583int __init ccw_device_enable_console(struct ccw_device *cdev)
1584{
1585 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1586 int rc;
1587
1588 if (!cdev->drv || !cdev->handler)
1589 return -EINVAL;
1590
1591 io_subchannel_init_fields(sch);
1592 rc = cio_commit_config(sch);
1593 if (rc)
1594 return rc;
1595 sch->driver = &io_subchannel_driver;
1596 io_subchannel_recog(cdev, sch);
1597
1598 spin_lock_irq(cdev->ccwlock);
1599 while (!dev_fsm_final_state(cdev))
1600 ccw_device_wait_idle(cdev);
1601
1602
1603 get_device(&cdev->dev);
1604 rc = ccw_device_online(cdev);
1605 if (rc)
1606 goto out_unlock;
1607
1608 while (!dev_fsm_final_state(cdev))
1609 ccw_device_wait_idle(cdev);
1610
1611 if (cdev->private->state == DEV_STATE_ONLINE)
1612 cdev->online = 1;
1613 else
1614 rc = -EIO;
1615out_unlock:
1616 spin_unlock_irq(cdev->ccwlock);
1617 if (rc)
1618 put_device(&cdev->dev);
1619 return rc;
1620}
1621
1622struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
1623{
1624 struct io_subchannel_private *io_priv;
1625 struct ccw_device *cdev;
1626 struct subchannel *sch;
1627
1628 sch = cio_probe_console();
1629 if (IS_ERR(sch))
1630 return ERR_CAST(sch);
1631
1632 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1633 if (!io_priv)
1634 goto err_priv;
1635 io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1636 sizeof(*io_priv->dma_area),
1637 &io_priv->dma_area_dma, GFP_KERNEL);
1638 if (!io_priv->dma_area)
1639 goto err_dma_area;
1640 set_io_private(sch, io_priv);
1641 cdev = io_subchannel_create_ccwdev(sch);
1642 if (IS_ERR(cdev)) {
1643 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1644 io_priv->dma_area, io_priv->dma_area_dma);
1645 set_io_private(sch, NULL);
1646 put_device(&sch->dev);
1647 kfree(io_priv);
1648 return cdev;
1649 }
1650 cdev->drv = drv;
1651 ccw_device_set_int_class(cdev);
1652 return cdev;
1653
1654err_dma_area:
1655 kfree(io_priv);
1656err_priv:
1657 put_device(&sch->dev);
1658 return ERR_PTR(-ENOMEM);
1659}
1660
1661void __init ccw_device_destroy_console(struct ccw_device *cdev)
1662{
1663 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1664 struct io_subchannel_private *io_priv = to_io_private(sch);
1665
1666 set_io_private(sch, NULL);
1667 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1668 io_priv->dma_area, io_priv->dma_area_dma);
1669 put_device(&sch->dev);
1670 put_device(&cdev->dev);
1671 kfree(io_priv);
1672}
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682void ccw_device_wait_idle(struct ccw_device *cdev)
1683{
1684 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1685
1686 while (1) {
1687 cio_tsch(sch);
1688 if (sch->schib.scsw.cmd.actl == 0)
1689 break;
1690 udelay(100);
1691 }
1692}
1693#endif
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1707 const char *bus_id)
1708{
1709 struct device *dev;
1710
1711 dev = driver_find_device_by_name(&cdrv->driver, bus_id);
1712
1713 return dev ? to_ccwdev(dev) : NULL;
1714}
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726static int
1727ccw_device_probe (struct device *dev)
1728{
1729 struct ccw_device *cdev = to_ccwdev(dev);
1730 struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1731 int ret;
1732
1733 cdev->drv = cdrv;
1734 ccw_device_set_int_class(cdev);
1735 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1736 if (ret) {
1737 cdev->drv = NULL;
1738 cdev->private->int_class = IRQIO_CIO;
1739 return ret;
1740 }
1741
1742 return 0;
1743}
1744
1745static int ccw_device_remove(struct device *dev)
1746{
1747 struct ccw_device *cdev = to_ccwdev(dev);
1748 struct ccw_driver *cdrv = cdev->drv;
1749 struct subchannel *sch;
1750 int ret;
1751
1752 if (cdrv->remove)
1753 cdrv->remove(cdev);
1754
1755 spin_lock_irq(cdev->ccwlock);
1756 if (cdev->online) {
1757 cdev->online = 0;
1758 ret = ccw_device_offline(cdev);
1759 spin_unlock_irq(cdev->ccwlock);
1760 if (ret == 0)
1761 wait_event(cdev->private->wait_q,
1762 dev_fsm_final_state(cdev));
1763 else
1764 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1765 "device 0.%x.%04x\n",
1766 ret, cdev->private->dev_id.ssid,
1767 cdev->private->dev_id.devno);
1768
1769 put_device(&cdev->dev);
1770 spin_lock_irq(cdev->ccwlock);
1771 }
1772 ccw_device_set_timeout(cdev, 0);
1773 cdev->drv = NULL;
1774 cdev->private->int_class = IRQIO_CIO;
1775 sch = to_subchannel(cdev->dev.parent);
1776 spin_unlock_irq(cdev->ccwlock);
1777 io_subchannel_quiesce(sch);
1778 __disable_cmf(cdev);
1779
1780 return 0;
1781}
1782
1783static void ccw_device_shutdown(struct device *dev)
1784{
1785 struct ccw_device *cdev;
1786
1787 cdev = to_ccwdev(dev);
1788 if (cdev->drv && cdev->drv->shutdown)
1789 cdev->drv->shutdown(cdev);
1790 __disable_cmf(cdev);
1791}
1792
1793static struct bus_type ccw_bus_type = {
1794 .name = "ccw",
1795 .match = ccw_bus_match,
1796 .uevent = ccw_uevent,
1797 .probe = ccw_device_probe,
1798 .remove = ccw_device_remove,
1799 .shutdown = ccw_device_shutdown,
1800};
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810int ccw_driver_register(struct ccw_driver *cdriver)
1811{
1812 struct device_driver *drv = &cdriver->driver;
1813
1814 drv->bus = &ccw_bus_type;
1815
1816 return driver_register(drv);
1817}
1818
1819
1820
1821
1822
1823
1824
1825void ccw_driver_unregister(struct ccw_driver *cdriver)
1826{
1827 driver_unregister(&cdriver->driver);
1828}
1829
1830static void ccw_device_todo(struct work_struct *work)
1831{
1832 struct ccw_device_private *priv;
1833 struct ccw_device *cdev;
1834 struct subchannel *sch;
1835 enum cdev_todo todo;
1836
1837 priv = container_of(work, struct ccw_device_private, todo_work);
1838 cdev = priv->cdev;
1839 sch = to_subchannel(cdev->dev.parent);
1840
1841 spin_lock_irq(cdev->ccwlock);
1842 todo = priv->todo;
1843 priv->todo = CDEV_TODO_NOTHING;
1844 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
1845 priv->dev_id.ssid, priv->dev_id.devno, todo);
1846 spin_unlock_irq(cdev->ccwlock);
1847
1848 switch (todo) {
1849 case CDEV_TODO_ENABLE_CMF:
1850 cmf_reenable(cdev);
1851 break;
1852 case CDEV_TODO_REBIND:
1853 ccw_device_do_unbind_bind(cdev);
1854 break;
1855 case CDEV_TODO_REGISTER:
1856 io_subchannel_register(cdev);
1857 break;
1858 case CDEV_TODO_UNREG_EVAL:
1859 if (!sch_is_pseudo_sch(sch))
1860 css_schedule_eval(sch->schid);
1861 fallthrough;
1862 case CDEV_TODO_UNREG:
1863 if (sch_is_pseudo_sch(sch))
1864 ccw_device_unregister(cdev);
1865 else
1866 ccw_device_call_sch_unregister(cdev);
1867 break;
1868 default:
1869 break;
1870 }
1871
1872 put_device(&cdev->dev);
1873}
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
1885{
1886 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
1887 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
1888 todo);
1889 if (cdev->private->todo >= todo)
1890 return;
1891 cdev->private->todo = todo;
1892
1893 if (!get_device(&cdev->dev))
1894 return;
1895 if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
1896
1897 put_device(&cdev->dev);
1898 }
1899}
1900
1901
1902
1903
1904
1905
1906
1907
1908int ccw_device_siosl(struct ccw_device *cdev)
1909{
1910 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1911
1912 return chsc_siosl(sch->schid);
1913}
1914EXPORT_SYMBOL_GPL(ccw_device_siosl);
1915
1916EXPORT_SYMBOL(ccw_device_set_online);
1917EXPORT_SYMBOL(ccw_device_set_offline);
1918EXPORT_SYMBOL(ccw_driver_register);
1919EXPORT_SYMBOL(ccw_driver_unregister);
1920EXPORT_SYMBOL(get_ccwdev_by_busid);
1921