1
2
3
4
5
6
7
8
9
10
11#define KMSG_COMPONENT "cio"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/device.h>
17#include <linux/slab.h>
18#include <linux/errno.h>
19#include <linux/list.h>
20#include <linux/reboot.h>
21#include <linux/proc_fs.h>
22#include <linux/genalloc.h>
23#include <linux/dma-mapping.h>
24#include <asm/isc.h>
25#include <asm/crw.h>
26
27#include "css.h"
28#include "cio.h"
29#include "blacklist.h"
30#include "cio_debug.h"
31#include "ioasm.h"
32#include "chsc.h"
33#include "device.h"
34#include "idset.h"
35#include "chp.h"
36
37int css_init_done = 0;
38int max_ssid;
39
40#define MAX_CSS_IDX 0
41struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
42static struct bus_type css_bus_type;
43
44int
45for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
46{
47 struct subchannel_id schid;
48 int ret;
49
50 init_subchannel_id(&schid);
51 do {
52 do {
53 ret = fn(schid, data);
54 if (ret)
55 break;
56 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
57 schid.sch_no = 0;
58 } while (schid.ssid++ < max_ssid);
59 return ret;
60}
61
62struct cb_data {
63 void *data;
64 struct idset *set;
65 int (*fn_known_sch)(struct subchannel *, void *);
66 int (*fn_unknown_sch)(struct subchannel_id, void *);
67};
68
69static int call_fn_known_sch(struct device *dev, void *data)
70{
71 struct subchannel *sch = to_subchannel(dev);
72 struct cb_data *cb = data;
73 int rc = 0;
74
75 if (cb->set)
76 idset_sch_del(cb->set, sch->schid);
77 if (cb->fn_known_sch)
78 rc = cb->fn_known_sch(sch, cb->data);
79 return rc;
80}
81
82static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
83{
84 struct cb_data *cb = data;
85 int rc = 0;
86
87 if (idset_sch_contains(cb->set, schid))
88 rc = cb->fn_unknown_sch(schid, cb->data);
89 return rc;
90}
91
92static int call_fn_all_sch(struct subchannel_id schid, void *data)
93{
94 struct cb_data *cb = data;
95 struct subchannel *sch;
96 int rc = 0;
97
98 sch = get_subchannel_by_schid(schid);
99 if (sch) {
100 if (cb->fn_known_sch)
101 rc = cb->fn_known_sch(sch, cb->data);
102 put_device(&sch->dev);
103 } else {
104 if (cb->fn_unknown_sch)
105 rc = cb->fn_unknown_sch(schid, cb->data);
106 }
107
108 return rc;
109}
110
111int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
112 int (*fn_unknown)(struct subchannel_id,
113 void *), void *data)
114{
115 struct cb_data cb;
116 int rc;
117
118 cb.data = data;
119 cb.fn_known_sch = fn_known;
120 cb.fn_unknown_sch = fn_unknown;
121
122 if (fn_known && !fn_unknown) {
123
124 cb.set = NULL;
125 return bus_for_each_dev(&css_bus_type, NULL, &cb,
126 call_fn_known_sch);
127 }
128
129 cb.set = idset_sch_new();
130 if (!cb.set)
131
132 return for_each_subchannel(call_fn_all_sch, &cb);
133
134 idset_fill(cb.set);
135
136
137 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
138 if (rc)
139 goto out;
140
141 if (fn_unknown)
142 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
143out:
144 idset_free(cb.set);
145
146 return rc;
147}
148
149static void css_sch_todo(struct work_struct *work);
150
151static int css_sch_create_locks(struct subchannel *sch)
152{
153 sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
154 if (!sch->lock)
155 return -ENOMEM;
156
157 spin_lock_init(sch->lock);
158 mutex_init(&sch->reg_mutex);
159
160 return 0;
161}
162
163static void css_subchannel_release(struct device *dev)
164{
165 struct subchannel *sch = to_subchannel(dev);
166
167 sch->config.intparm = 0;
168 cio_commit_config(sch);
169 kfree(sch->driver_override);
170 kfree(sch->lock);
171 kfree(sch);
172}
173
174static int css_validate_subchannel(struct subchannel_id schid,
175 struct schib *schib)
176{
177 int err;
178
179 switch (schib->pmcw.st) {
180 case SUBCHANNEL_TYPE_IO:
181 case SUBCHANNEL_TYPE_MSG:
182 if (!css_sch_is_valid(schib))
183 err = -ENODEV;
184 else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
185 CIO_MSG_EVENT(6, "Blacklisted device detected "
186 "at devno %04X, subchannel set %x\n",
187 schib->pmcw.dev, schid.ssid);
188 err = -ENODEV;
189 } else
190 err = 0;
191 break;
192 default:
193 err = 0;
194 }
195 if (err)
196 goto out;
197
198 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
199 schid.ssid, schid.sch_no, schib->pmcw.st);
200out:
201 return err;
202}
203
204struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
205 struct schib *schib)
206{
207 struct subchannel *sch;
208 int ret;
209
210 ret = css_validate_subchannel(schid, schib);
211 if (ret < 0)
212 return ERR_PTR(ret);
213
214 sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
215 if (!sch)
216 return ERR_PTR(-ENOMEM);
217
218 sch->schid = schid;
219 sch->schib = *schib;
220 sch->st = schib->pmcw.st;
221
222 ret = css_sch_create_locks(sch);
223 if (ret)
224 goto err;
225
226 INIT_WORK(&sch->todo_work, css_sch_todo);
227 sch->dev.release = &css_subchannel_release;
228 sch->dev.dma_mask = &sch->dma_mask;
229 device_initialize(&sch->dev);
230
231
232
233
234 ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
235 if (ret)
236 goto err;
237
238
239
240
241 ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
242 if (ret)
243 goto err;
244
245 return sch;
246
247err:
248 kfree(sch);
249 return ERR_PTR(ret);
250}
251
252static int css_sch_device_register(struct subchannel *sch)
253{
254 int ret;
255
256 mutex_lock(&sch->reg_mutex);
257 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
258 sch->schid.sch_no);
259 ret = device_add(&sch->dev);
260 mutex_unlock(&sch->reg_mutex);
261 return ret;
262}
263
264
265
266
267
268void css_sch_device_unregister(struct subchannel *sch)
269{
270 mutex_lock(&sch->reg_mutex);
271 if (device_is_registered(&sch->dev))
272 device_unregister(&sch->dev);
273 mutex_unlock(&sch->reg_mutex);
274}
275EXPORT_SYMBOL_GPL(css_sch_device_unregister);
276
277static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
278{
279 int i;
280 int mask;
281
282 memset(ssd, 0, sizeof(struct chsc_ssd_info));
283 ssd->path_mask = pmcw->pim;
284 for (i = 0; i < 8; i++) {
285 mask = 0x80 >> i;
286 if (pmcw->pim & mask) {
287 chp_id_init(&ssd->chpid[i]);
288 ssd->chpid[i].id = pmcw->chpid[i];
289 }
290 }
291}
292
293static void ssd_register_chpids(struct chsc_ssd_info *ssd)
294{
295 int i;
296 int mask;
297
298 for (i = 0; i < 8; i++) {
299 mask = 0x80 >> i;
300 if (ssd->path_mask & mask)
301 chp_new(ssd->chpid[i]);
302 }
303}
304
305void css_update_ssd_info(struct subchannel *sch)
306{
307 int ret;
308
309 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
310 if (ret)
311 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
312
313 ssd_register_chpids(&sch->ssd_info);
314}
315
316static ssize_t type_show(struct device *dev, struct device_attribute *attr,
317 char *buf)
318{
319 struct subchannel *sch = to_subchannel(dev);
320
321 return sprintf(buf, "%01x\n", sch->st);
322}
323
324static DEVICE_ATTR_RO(type);
325
326static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
327 char *buf)
328{
329 struct subchannel *sch = to_subchannel(dev);
330
331 return sprintf(buf, "css:t%01X\n", sch->st);
332}
333
334static DEVICE_ATTR_RO(modalias);
335
336static ssize_t driver_override_store(struct device *dev,
337 struct device_attribute *attr,
338 const char *buf, size_t count)
339{
340 struct subchannel *sch = to_subchannel(dev);
341 char *driver_override, *old, *cp;
342
343
344 if (count >= (PAGE_SIZE - 1))
345 return -EINVAL;
346
347 driver_override = kstrndup(buf, count, GFP_KERNEL);
348 if (!driver_override)
349 return -ENOMEM;
350
351 cp = strchr(driver_override, '\n');
352 if (cp)
353 *cp = '\0';
354
355 device_lock(dev);
356 old = sch->driver_override;
357 if (strlen(driver_override)) {
358 sch->driver_override = driver_override;
359 } else {
360 kfree(driver_override);
361 sch->driver_override = NULL;
362 }
363 device_unlock(dev);
364
365 kfree(old);
366
367 return count;
368}
369
370static ssize_t driver_override_show(struct device *dev,
371 struct device_attribute *attr, char *buf)
372{
373 struct subchannel *sch = to_subchannel(dev);
374 ssize_t len;
375
376 device_lock(dev);
377 len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
378 device_unlock(dev);
379 return len;
380}
381static DEVICE_ATTR_RW(driver_override);
382
383static struct attribute *subch_attrs[] = {
384 &dev_attr_type.attr,
385 &dev_attr_modalias.attr,
386 &dev_attr_driver_override.attr,
387 NULL,
388};
389
390static struct attribute_group subch_attr_group = {
391 .attrs = subch_attrs,
392};
393
394static const struct attribute_group *default_subch_attr_groups[] = {
395 &subch_attr_group,
396 NULL,
397};
398
399static ssize_t chpids_show(struct device *dev,
400 struct device_attribute *attr,
401 char *buf)
402{
403 struct subchannel *sch = to_subchannel(dev);
404 struct chsc_ssd_info *ssd = &sch->ssd_info;
405 ssize_t ret = 0;
406 int mask;
407 int chp;
408
409 for (chp = 0; chp < 8; chp++) {
410 mask = 0x80 >> chp;
411 if (ssd->path_mask & mask)
412 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
413 else
414 ret += sprintf(buf + ret, "00 ");
415 }
416 ret += sprintf(buf + ret, "\n");
417 return ret;
418}
419static DEVICE_ATTR_RO(chpids);
420
421static ssize_t pimpampom_show(struct device *dev,
422 struct device_attribute *attr,
423 char *buf)
424{
425 struct subchannel *sch = to_subchannel(dev);
426 struct pmcw *pmcw = &sch->schib.pmcw;
427
428 return sprintf(buf, "%02x %02x %02x\n",
429 pmcw->pim, pmcw->pam, pmcw->pom);
430}
431static DEVICE_ATTR_RO(pimpampom);
432
433static ssize_t dev_busid_show(struct device *dev,
434 struct device_attribute *attr,
435 char *buf)
436{
437 struct subchannel *sch = to_subchannel(dev);
438 struct pmcw *pmcw = &sch->schib.pmcw;
439
440 if ((pmcw->st == SUBCHANNEL_TYPE_IO ||
441 pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv)
442 return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
443 pmcw->dev);
444 else
445 return sysfs_emit(buf, "none\n");
446}
447static DEVICE_ATTR_RO(dev_busid);
448
449static struct attribute *io_subchannel_type_attrs[] = {
450 &dev_attr_chpids.attr,
451 &dev_attr_pimpampom.attr,
452 &dev_attr_dev_busid.attr,
453 NULL,
454};
455ATTRIBUTE_GROUPS(io_subchannel_type);
456
457static const struct device_type io_subchannel_type = {
458 .groups = io_subchannel_type_groups,
459};
460
461int css_register_subchannel(struct subchannel *sch)
462{
463 int ret;
464
465
466 sch->dev.parent = &channel_subsystems[0]->device;
467 sch->dev.bus = &css_bus_type;
468 sch->dev.groups = default_subch_attr_groups;
469
470 if (sch->st == SUBCHANNEL_TYPE_IO)
471 sch->dev.type = &io_subchannel_type;
472
473
474
475
476
477
478
479
480
481
482 dev_set_uevent_suppress(&sch->dev, 1);
483 css_update_ssd_info(sch);
484
485 ret = css_sch_device_register(sch);
486 if (ret) {
487 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
488 sch->schid.ssid, sch->schid.sch_no, ret);
489 return ret;
490 }
491 if (!sch->driver) {
492
493
494
495
496
497 dev_set_uevent_suppress(&sch->dev, 0);
498 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
499 }
500 return ret;
501}
502
503static int css_probe_device(struct subchannel_id schid, struct schib *schib)
504{
505 struct subchannel *sch;
506 int ret;
507
508 sch = css_alloc_subchannel(schid, schib);
509 if (IS_ERR(sch))
510 return PTR_ERR(sch);
511
512 ret = css_register_subchannel(sch);
513 if (ret)
514 put_device(&sch->dev);
515
516 return ret;
517}
518
519static int
520check_subchannel(struct device *dev, const void *data)
521{
522 struct subchannel *sch;
523 struct subchannel_id *schid = (void *)data;
524
525 sch = to_subchannel(dev);
526 return schid_equal(&sch->schid, schid);
527}
528
529struct subchannel *
530get_subchannel_by_schid(struct subchannel_id schid)
531{
532 struct device *dev;
533
534 dev = bus_find_device(&css_bus_type, NULL,
535 &schid, check_subchannel);
536
537 return dev ? to_subchannel(dev) : NULL;
538}
539
540
541
542
543
544int css_sch_is_valid(struct schib *schib)
545{
546 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
547 return 0;
548 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
549 return 0;
550 return 1;
551}
552EXPORT_SYMBOL_GPL(css_sch_is_valid);
553
554static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
555{
556 struct schib schib;
557 int ccode;
558
559 if (!slow) {
560
561 return -EAGAIN;
562 }
563
564
565
566
567
568
569 ccode = stsch(schid, &schib);
570 if (ccode)
571 return (ccode == 3) ? -ENXIO : ccode;
572
573 return css_probe_device(schid, &schib);
574}
575
576static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
577{
578 int ret = 0;
579
580 if (sch->driver) {
581 if (sch->driver->sch_event)
582 ret = sch->driver->sch_event(sch, slow);
583 else
584 dev_dbg(&sch->dev,
585 "Got subchannel machine check but "
586 "no sch_event handler provided.\n");
587 }
588 if (ret != 0 && ret != -EAGAIN) {
589 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
590 sch->schid.ssid, sch->schid.sch_no, ret);
591 }
592 return ret;
593}
594
595static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
596{
597 struct subchannel *sch;
598 int ret;
599
600 sch = get_subchannel_by_schid(schid);
601 if (sch) {
602 ret = css_evaluate_known_subchannel(sch, slow);
603 put_device(&sch->dev);
604 } else
605 ret = css_evaluate_new_subchannel(schid, slow);
606 if (ret == -EAGAIN)
607 css_schedule_eval(schid);
608}
609
610
611
612
613
614
615
616
617
618
619void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
620{
621 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
622 sch->schid.ssid, sch->schid.sch_no, todo);
623 if (sch->todo >= todo)
624 return;
625
626 if (!get_device(&sch->dev))
627 return;
628 sch->todo = todo;
629 if (!queue_work(cio_work_q, &sch->todo_work)) {
630
631 put_device(&sch->dev);
632 }
633}
634EXPORT_SYMBOL_GPL(css_sched_sch_todo);
635
636static void css_sch_todo(struct work_struct *work)
637{
638 struct subchannel *sch;
639 enum sch_todo todo;
640 int ret;
641
642 sch = container_of(work, struct subchannel, todo_work);
643
644 spin_lock_irq(sch->lock);
645 todo = sch->todo;
646 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
647 sch->schid.sch_no, todo);
648 sch->todo = SCH_TODO_NOTHING;
649 spin_unlock_irq(sch->lock);
650
651 switch (todo) {
652 case SCH_TODO_NOTHING:
653 break;
654 case SCH_TODO_EVAL:
655 ret = css_evaluate_known_subchannel(sch, 1);
656 if (ret == -EAGAIN) {
657 spin_lock_irq(sch->lock);
658 css_sched_sch_todo(sch, todo);
659 spin_unlock_irq(sch->lock);
660 }
661 break;
662 case SCH_TODO_UNREG:
663 css_sch_device_unregister(sch);
664 break;
665 }
666
667 put_device(&sch->dev);
668}
669
670static struct idset *slow_subchannel_set;
671static DEFINE_SPINLOCK(slow_subchannel_lock);
672static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
673static atomic_t css_eval_scheduled;
674
675static int __init slow_subchannel_init(void)
676{
677 atomic_set(&css_eval_scheduled, 0);
678 slow_subchannel_set = idset_sch_new();
679 if (!slow_subchannel_set) {
680 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
681 return -ENOMEM;
682 }
683 return 0;
684}
685
686static int slow_eval_known_fn(struct subchannel *sch, void *data)
687{
688 int eval;
689 int rc;
690
691 spin_lock_irq(&slow_subchannel_lock);
692 eval = idset_sch_contains(slow_subchannel_set, sch->schid);
693 idset_sch_del(slow_subchannel_set, sch->schid);
694 spin_unlock_irq(&slow_subchannel_lock);
695 if (eval) {
696 rc = css_evaluate_known_subchannel(sch, 1);
697 if (rc == -EAGAIN)
698 css_schedule_eval(sch->schid);
699
700
701
702
703 cond_resched();
704 }
705 return 0;
706}
707
708static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
709{
710 int eval;
711 int rc = 0;
712
713 spin_lock_irq(&slow_subchannel_lock);
714 eval = idset_sch_contains(slow_subchannel_set, schid);
715 idset_sch_del(slow_subchannel_set, schid);
716 spin_unlock_irq(&slow_subchannel_lock);
717 if (eval) {
718 rc = css_evaluate_new_subchannel(schid, 1);
719 switch (rc) {
720 case -EAGAIN:
721 css_schedule_eval(schid);
722 rc = 0;
723 break;
724 case -ENXIO:
725 case -ENOMEM:
726 case -EIO:
727
728 spin_lock_irq(&slow_subchannel_lock);
729 idset_sch_del_subseq(slow_subchannel_set, schid);
730 spin_unlock_irq(&slow_subchannel_lock);
731 break;
732 default:
733 rc = 0;
734 }
735
736
737 cond_resched();
738 }
739 return rc;
740}
741
742static void css_slow_path_func(struct work_struct *unused)
743{
744 unsigned long flags;
745
746 CIO_TRACE_EVENT(4, "slowpath");
747 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
748 NULL);
749 spin_lock_irqsave(&slow_subchannel_lock, flags);
750 if (idset_is_empty(slow_subchannel_set)) {
751 atomic_set(&css_eval_scheduled, 0);
752 wake_up(&css_eval_wq);
753 }
754 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
755}
756
757static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
758struct workqueue_struct *cio_work_q;
759
760void css_schedule_eval(struct subchannel_id schid)
761{
762 unsigned long flags;
763
764 spin_lock_irqsave(&slow_subchannel_lock, flags);
765 idset_sch_add(slow_subchannel_set, schid);
766 atomic_set(&css_eval_scheduled, 1);
767 queue_delayed_work(cio_work_q, &slow_path_work, 0);
768 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
769}
770
771void css_schedule_eval_all(void)
772{
773 unsigned long flags;
774
775 spin_lock_irqsave(&slow_subchannel_lock, flags);
776 idset_fill(slow_subchannel_set);
777 atomic_set(&css_eval_scheduled, 1);
778 queue_delayed_work(cio_work_q, &slow_path_work, 0);
779 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
780}
781
782static int __unset_registered(struct device *dev, void *data)
783{
784 struct idset *set = data;
785 struct subchannel *sch = to_subchannel(dev);
786
787 idset_sch_del(set, sch->schid);
788 return 0;
789}
790
791static int __unset_online(struct device *dev, void *data)
792{
793 struct idset *set = data;
794 struct subchannel *sch = to_subchannel(dev);
795 struct ccw_device *cdev = sch_get_cdev(sch);
796
797 if (cdev && cdev->online)
798 idset_sch_del(set, sch->schid);
799
800 return 0;
801}
802
803void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
804{
805 unsigned long flags;
806 struct idset *set;
807
808
809 set = idset_sch_new();
810 if (!set) {
811
812 css_schedule_eval_all();
813 return;
814 }
815 idset_fill(set);
816 switch (cond) {
817 case CSS_EVAL_UNREG:
818 bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
819 break;
820 case CSS_EVAL_NOT_ONLINE:
821 bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
822 break;
823 default:
824 break;
825 }
826
827
828 spin_lock_irqsave(&slow_subchannel_lock, flags);
829 idset_add_set(slow_subchannel_set, set);
830 atomic_set(&css_eval_scheduled, 1);
831 queue_delayed_work(cio_work_q, &slow_path_work, delay);
832 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
833 idset_free(set);
834}
835
836void css_wait_for_slow_path(void)
837{
838 flush_workqueue(cio_work_q);
839}
840
841
842void css_schedule_reprobe(void)
843{
844
845 css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
846}
847EXPORT_SYMBOL_GPL(css_schedule_reprobe);
848
849
850
851
852static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
853{
854 struct subchannel_id mchk_schid;
855 struct subchannel *sch;
856
857 if (overflow) {
858 css_schedule_eval_all();
859 return;
860 }
861 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
862 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
863 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
864 crw0->erc, crw0->rsid);
865 if (crw1)
866 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
867 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
868 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
869 crw1->anc, crw1->erc, crw1->rsid);
870 init_subchannel_id(&mchk_schid);
871 mchk_schid.sch_no = crw0->rsid;
872 if (crw1)
873 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
874
875 if (crw0->erc == CRW_ERC_PMOD) {
876 sch = get_subchannel_by_schid(mchk_schid);
877 if (sch) {
878 css_update_ssd_info(sch);
879 put_device(&sch->dev);
880 }
881 }
882
883
884
885
886
887 css_evaluate_subchannel(mchk_schid, 0);
888}
889
890static void __init
891css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
892{
893 struct cpuid cpu_id;
894
895 if (css_general_characteristics.mcss) {
896 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
897 css->global_pgid.pgid_high.ext_cssid.cssid =
898 css->id_valid ? css->cssid : 0;
899 } else {
900 css->global_pgid.pgid_high.cpu_addr = stap();
901 }
902 get_cpu_id(&cpu_id);
903 css->global_pgid.cpu_id = cpu_id.ident;
904 css->global_pgid.cpu_model = cpu_id.machine;
905 css->global_pgid.tod_high = tod_high;
906}
907
908static void channel_subsystem_release(struct device *dev)
909{
910 struct channel_subsystem *css = to_css(dev);
911
912 mutex_destroy(&css->mutex);
913 kfree(css);
914}
915
916static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
917 char *buf)
918{
919 struct channel_subsystem *css = to_css(dev);
920
921 if (!css->id_valid)
922 return -EINVAL;
923
924 return sprintf(buf, "%x\n", css->cssid);
925}
926static DEVICE_ATTR_RO(real_cssid);
927
928static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
929 const char *buf, size_t count)
930{
931 CIO_TRACE_EVENT(4, "usr-rescan");
932
933 css_schedule_eval_all();
934 css_complete_work();
935
936 return count;
937}
938static DEVICE_ATTR_WO(rescan);
939
940static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
941 char *buf)
942{
943 struct channel_subsystem *css = to_css(dev);
944 int ret;
945
946 mutex_lock(&css->mutex);
947 ret = sprintf(buf, "%x\n", css->cm_enabled);
948 mutex_unlock(&css->mutex);
949 return ret;
950}
951
952static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
953 const char *buf, size_t count)
954{
955 struct channel_subsystem *css = to_css(dev);
956 unsigned long val;
957 int ret;
958
959 ret = kstrtoul(buf, 16, &val);
960 if (ret)
961 return ret;
962 mutex_lock(&css->mutex);
963 switch (val) {
964 case 0:
965 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
966 break;
967 case 1:
968 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
969 break;
970 default:
971 ret = -EINVAL;
972 }
973 mutex_unlock(&css->mutex);
974 return ret < 0 ? ret : count;
975}
976static DEVICE_ATTR_RW(cm_enable);
977
978static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
979 int index)
980{
981 return css_chsc_characteristics.secm ? attr->mode : 0;
982}
983
984static struct attribute *cssdev_attrs[] = {
985 &dev_attr_real_cssid.attr,
986 &dev_attr_rescan.attr,
987 NULL,
988};
989
990static struct attribute_group cssdev_attr_group = {
991 .attrs = cssdev_attrs,
992};
993
994static struct attribute *cssdev_cm_attrs[] = {
995 &dev_attr_cm_enable.attr,
996 NULL,
997};
998
999static struct attribute_group cssdev_cm_attr_group = {
1000 .attrs = cssdev_cm_attrs,
1001 .is_visible = cm_enable_mode,
1002};
1003
1004static const struct attribute_group *cssdev_attr_groups[] = {
1005 &cssdev_attr_group,
1006 &cssdev_cm_attr_group,
1007 NULL,
1008};
1009
1010static int __init setup_css(int nr)
1011{
1012 struct channel_subsystem *css;
1013 int ret;
1014
1015 css = kzalloc(sizeof(*css), GFP_KERNEL);
1016 if (!css)
1017 return -ENOMEM;
1018
1019 channel_subsystems[nr] = css;
1020 dev_set_name(&css->device, "css%x", nr);
1021 css->device.groups = cssdev_attr_groups;
1022 css->device.release = channel_subsystem_release;
1023
1024
1025
1026
1027
1028 ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
1029 if (ret) {
1030 kfree(css);
1031 goto out_err;
1032 }
1033
1034 mutex_init(&css->mutex);
1035 ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
1036 if (!ret) {
1037 css->id_valid = true;
1038 pr_info("Partition identifier %01x.%01x\n", css->cssid,
1039 css->iid);
1040 }
1041 css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
1042
1043 ret = device_register(&css->device);
1044 if (ret) {
1045 put_device(&css->device);
1046 goto out_err;
1047 }
1048
1049 css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
1050 GFP_KERNEL);
1051 if (!css->pseudo_subchannel) {
1052 device_unregister(&css->device);
1053 ret = -ENOMEM;
1054 goto out_err;
1055 }
1056
1057 css->pseudo_subchannel->dev.parent = &css->device;
1058 css->pseudo_subchannel->dev.release = css_subchannel_release;
1059 mutex_init(&css->pseudo_subchannel->reg_mutex);
1060 ret = css_sch_create_locks(css->pseudo_subchannel);
1061 if (ret) {
1062 kfree(css->pseudo_subchannel);
1063 device_unregister(&css->device);
1064 goto out_err;
1065 }
1066
1067 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
1068 ret = device_register(&css->pseudo_subchannel->dev);
1069 if (ret) {
1070 put_device(&css->pseudo_subchannel->dev);
1071 device_unregister(&css->device);
1072 goto out_err;
1073 }
1074
1075 return ret;
1076out_err:
1077 channel_subsystems[nr] = NULL;
1078 return ret;
1079}
1080
1081static int css_reboot_event(struct notifier_block *this,
1082 unsigned long event,
1083 void *ptr)
1084{
1085 struct channel_subsystem *css;
1086 int ret;
1087
1088 ret = NOTIFY_DONE;
1089 for_each_css(css) {
1090 mutex_lock(&css->mutex);
1091 if (css->cm_enabled)
1092 if (chsc_secm(css, 0))
1093 ret = NOTIFY_BAD;
1094 mutex_unlock(&css->mutex);
1095 }
1096
1097 return ret;
1098}
1099
1100static struct notifier_block css_reboot_notifier = {
1101 .notifier_call = css_reboot_event,
1102};
1103
1104#define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1105static struct gen_pool *cio_dma_pool;
1106
1107
1108struct device *cio_get_dma_css_dev(void)
1109{
1110 return &channel_subsystems[0]->device;
1111}
1112
1113struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
1114{
1115 struct gen_pool *gp_dma;
1116 void *cpu_addr;
1117 dma_addr_t dma_addr;
1118 int i;
1119
1120 gp_dma = gen_pool_create(3, -1);
1121 if (!gp_dma)
1122 return NULL;
1123 for (i = 0; i < nr_pages; ++i) {
1124 cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
1125 CIO_DMA_GFP);
1126 if (!cpu_addr)
1127 return gp_dma;
1128 gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
1129 dma_addr, PAGE_SIZE, -1);
1130 }
1131 return gp_dma;
1132}
1133
1134static void __gp_dma_free_dma(struct gen_pool *pool,
1135 struct gen_pool_chunk *chunk, void *data)
1136{
1137 size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
1138
1139 dma_free_coherent((struct device *) data, chunk_size,
1140 (void *) chunk->start_addr,
1141 (dma_addr_t) chunk->phys_addr);
1142}
1143
1144void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
1145{
1146 if (!gp_dma)
1147 return;
1148
1149 gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
1150 gen_pool_destroy(gp_dma);
1151}
1152
1153static int cio_dma_pool_init(void)
1154{
1155
1156 cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1157 if (!cio_dma_pool)
1158 return -ENOMEM;
1159 return 0;
1160}
1161
1162void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
1163 size_t size)
1164{
1165 dma_addr_t dma_addr;
1166 unsigned long addr;
1167 size_t chunk_size;
1168
1169 if (!gp_dma)
1170 return NULL;
1171 addr = gen_pool_alloc(gp_dma, size);
1172 while (!addr) {
1173 chunk_size = round_up(size, PAGE_SIZE);
1174 addr = (unsigned long) dma_alloc_coherent(dma_dev,
1175 chunk_size, &dma_addr, CIO_DMA_GFP);
1176 if (!addr)
1177 return NULL;
1178 gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
1179 addr = gen_pool_alloc(gp_dma, size);
1180 }
1181 return (void *) addr;
1182}
1183
1184void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
1185{
1186 if (!cpu_addr)
1187 return;
1188 memset(cpu_addr, 0, size);
1189 gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
1190}
1191
1192
1193
1194
1195
1196
1197
1198
1199void *cio_dma_zalloc(size_t size)
1200{
1201 return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
1202}
1203
1204void cio_dma_free(void *cpu_addr, size_t size)
1205{
1206 cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
1207}
1208
1209
1210
1211
1212
1213static int __init css_bus_init(void)
1214{
1215 int ret, i;
1216
1217 ret = chsc_init();
1218 if (ret)
1219 return ret;
1220
1221 chsc_determine_css_characteristics();
1222
1223 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1224 if (ret)
1225 max_ssid = 0;
1226 else
1227 max_ssid = __MAX_SSID;
1228
1229 ret = slow_subchannel_init();
1230 if (ret)
1231 goto out;
1232
1233 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1234 if (ret)
1235 goto out;
1236
1237 if ((ret = bus_register(&css_bus_type)))
1238 goto out;
1239
1240
1241 for (i = 0; i <= MAX_CSS_IDX; i++) {
1242 ret = setup_css(i);
1243 if (ret)
1244 goto out_unregister;
1245 }
1246 ret = register_reboot_notifier(&css_reboot_notifier);
1247 if (ret)
1248 goto out_unregister;
1249 ret = cio_dma_pool_init();
1250 if (ret)
1251 goto out_unregister_rn;
1252 airq_init();
1253 css_init_done = 1;
1254
1255
1256 isc_register(IO_SCH_ISC);
1257
1258 return 0;
1259out_unregister_rn:
1260 unregister_reboot_notifier(&css_reboot_notifier);
1261out_unregister:
1262 while (i-- > 0) {
1263 struct channel_subsystem *css = channel_subsystems[i];
1264 device_unregister(&css->pseudo_subchannel->dev);
1265 device_unregister(&css->device);
1266 }
1267 bus_unregister(&css_bus_type);
1268out:
1269 crw_unregister_handler(CRW_RSC_SCH);
1270 idset_free(slow_subchannel_set);
1271 chsc_init_cleanup();
1272 pr_alert("The CSS device driver initialization failed with "
1273 "errno=%d\n", ret);
1274 return ret;
1275}
1276
1277static void __init css_bus_cleanup(void)
1278{
1279 struct channel_subsystem *css;
1280
1281 for_each_css(css) {
1282 device_unregister(&css->pseudo_subchannel->dev);
1283 device_unregister(&css->device);
1284 }
1285 bus_unregister(&css_bus_type);
1286 crw_unregister_handler(CRW_RSC_SCH);
1287 idset_free(slow_subchannel_set);
1288 chsc_init_cleanup();
1289 isc_unregister(IO_SCH_ISC);
1290}
1291
1292static int __init channel_subsystem_init(void)
1293{
1294 int ret;
1295
1296 ret = css_bus_init();
1297 if (ret)
1298 return ret;
1299 cio_work_q = create_singlethread_workqueue("cio");
1300 if (!cio_work_q) {
1301 ret = -ENOMEM;
1302 goto out_bus;
1303 }
1304 ret = io_subchannel_init();
1305 if (ret)
1306 goto out_wq;
1307
1308
1309 cio_register_early_subchannels();
1310
1311 css_schedule_eval_all();
1312
1313 return ret;
1314out_wq:
1315 destroy_workqueue(cio_work_q);
1316out_bus:
1317 css_bus_cleanup();
1318 return ret;
1319}
1320subsys_initcall(channel_subsystem_init);
1321
1322static int css_settle(struct device_driver *drv, void *unused)
1323{
1324 struct css_driver *cssdrv = to_cssdriver(drv);
1325
1326 if (cssdrv->settle)
1327 return cssdrv->settle();
1328 return 0;
1329}
1330
1331int css_complete_work(void)
1332{
1333 int ret;
1334
1335
1336 ret = wait_event_interruptible(css_eval_wq,
1337 atomic_read(&css_eval_scheduled) == 0);
1338 if (ret)
1339 return -EINTR;
1340 flush_workqueue(cio_work_q);
1341
1342 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1343}
1344
1345
1346
1347
1348
1349
1350static int __init channel_subsystem_init_sync(void)
1351{
1352 css_complete_work();
1353 return 0;
1354}
1355subsys_initcall_sync(channel_subsystem_init_sync);
1356
1357#ifdef CONFIG_PROC_FS
1358static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1359 size_t count, loff_t *ppos)
1360{
1361 int ret;
1362
1363
1364 crw_wait_for_channel_report();
1365 ret = css_complete_work();
1366
1367 return ret ? ret : count;
1368}
1369
1370static const struct proc_ops cio_settle_proc_ops = {
1371 .proc_open = nonseekable_open,
1372 .proc_write = cio_settle_write,
1373 .proc_lseek = no_llseek,
1374};
1375
1376static int __init cio_settle_init(void)
1377{
1378 struct proc_dir_entry *entry;
1379
1380 entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
1381 if (!entry)
1382 return -ENOMEM;
1383 return 0;
1384}
1385device_initcall(cio_settle_init);
1386#endif
1387
1388int sch_is_pseudo_sch(struct subchannel *sch)
1389{
1390 if (!sch->dev.parent)
1391 return 0;
1392 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1393}
1394
1395static int css_bus_match(struct device *dev, struct device_driver *drv)
1396{
1397 struct subchannel *sch = to_subchannel(dev);
1398 struct css_driver *driver = to_cssdriver(drv);
1399 struct css_device_id *id;
1400
1401
1402 if (sch->driver_override && strcmp(sch->driver_override, drv->name))
1403 return 0;
1404
1405 for (id = driver->subchannel_type; id->match_flags; id++) {
1406 if (sch->st == id->type)
1407 return 1;
1408 }
1409
1410 return 0;
1411}
1412
1413static int css_probe(struct device *dev)
1414{
1415 struct subchannel *sch;
1416 int ret;
1417
1418 sch = to_subchannel(dev);
1419 sch->driver = to_cssdriver(dev->driver);
1420 ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1421 if (ret)
1422 sch->driver = NULL;
1423 return ret;
1424}
1425
1426static void css_remove(struct device *dev)
1427{
1428 struct subchannel *sch;
1429
1430 sch = to_subchannel(dev);
1431 if (sch->driver->remove)
1432 sch->driver->remove(sch);
1433 sch->driver = NULL;
1434}
1435
1436static void css_shutdown(struct device *dev)
1437{
1438 struct subchannel *sch;
1439
1440 sch = to_subchannel(dev);
1441 if (sch->driver && sch->driver->shutdown)
1442 sch->driver->shutdown(sch);
1443}
1444
1445static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1446{
1447 struct subchannel *sch = to_subchannel(dev);
1448 int ret;
1449
1450 ret = add_uevent_var(env, "ST=%01X", sch->st);
1451 if (ret)
1452 return ret;
1453 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1454 return ret;
1455}
1456
1457static struct bus_type css_bus_type = {
1458 .name = "css",
1459 .match = css_bus_match,
1460 .probe = css_probe,
1461 .remove = css_remove,
1462 .shutdown = css_shutdown,
1463 .uevent = css_uevent,
1464};
1465
1466
1467
1468
1469
1470
1471
1472
1473int css_driver_register(struct css_driver *cdrv)
1474{
1475 cdrv->drv.bus = &css_bus_type;
1476 return driver_register(&cdrv->drv);
1477}
1478EXPORT_SYMBOL_GPL(css_driver_register);
1479
1480
1481
1482
1483
1484
1485
1486void css_driver_unregister(struct css_driver *cdrv)
1487{
1488 driver_unregister(&cdrv->drv);
1489}
1490EXPORT_SYMBOL_GPL(css_driver_unregister);
1491