1
2
3
4
5
6
7
8
9
10
11#define KMSG_COMPONENT "hvc_iucv"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/types.h>
15#include <asm/ebcdic.h>
16#include <linux/ctype.h>
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/init.h>
20#include <linux/mempool.h>
21#include <linux/moduleparam.h>
22#include <linux/tty.h>
23#include <linux/wait.h>
24#include <net/iucv/iucv.h>
25
26#include "hvc_console.h"
27
28
29
30#define HVC_IUCV_MAGIC 0xc9e4c3e5
31#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
32#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
33
34
35#define MSG_VERSION 0x02
36#define MSG_TYPE_ERROR 0x01
37#define MSG_TYPE_TERMENV 0x02
38#define MSG_TYPE_TERMIOS 0x04
39#define MSG_TYPE_WINSIZE 0x08
40#define MSG_TYPE_DATA 0x10
41
42struct iucv_tty_msg {
43 u8 version;
44 u8 type;
45#define MSG_MAX_DATALEN ((u16)(~0))
46 u16 datalen;
47 u8 data[];
48} __attribute__((packed));
49#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
50
51enum iucv_state_t {
52 IUCV_DISCONN = 0,
53 IUCV_CONNECTED = 1,
54 IUCV_SEVERED = 2,
55};
56
57enum tty_state_t {
58 TTY_CLOSED = 0,
59 TTY_OPENED = 1,
60};
61
62struct hvc_iucv_private {
63 struct hvc_struct *hvc;
64 u8 srv_name[8];
65 unsigned char is_console;
66 enum iucv_state_t iucv_state;
67 enum tty_state_t tty_state;
68 struct iucv_path *path;
69 spinlock_t lock;
70#define SNDBUF_SIZE (PAGE_SIZE)
71 void *sndbuf;
72 size_t sndbuf_len;
73#define QUEUE_SNDBUF_DELAY (HZ / 25)
74 struct delayed_work sndbuf_work;
75 wait_queue_head_t sndbuf_waitq;
76 struct list_head tty_outqueue;
77 struct list_head tty_inqueue;
78 struct device *dev;
79};
80
81struct iucv_tty_buffer {
82 struct list_head list;
83 struct iucv_message msg;
84 size_t offset;
85 struct iucv_tty_msg *mbuf;
86};
87
88
89static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
90static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
91static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
92static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
93
94
95
96static unsigned long hvc_iucv_devices = 1;
97
98
99static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
100#define IUCV_HVC_CON_IDX (0)
101
102#define MAX_VMID_FILTER (500)
103static size_t hvc_iucv_filter_size;
104static void *hvc_iucv_filter;
105static const char *hvc_iucv_filter_string;
106static DEFINE_RWLOCK(hvc_iucv_filter_lock);
107
108
109static struct kmem_cache *hvc_iucv_buffer_cache;
110static mempool_t *hvc_iucv_mempool;
111
112
113static struct iucv_handler hvc_iucv_handler = {
114 .path_pending = hvc_iucv_path_pending,
115 .path_severed = hvc_iucv_path_severed,
116 .message_complete = hvc_iucv_msg_complete,
117 .message_pending = hvc_iucv_msg_pending,
118};
119
120
121
122
123
124
125
126
127
128struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
129{
130 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
131 return NULL;
132 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
133}
134
135
136
137
138
139
140
141
142
143
144
145
146static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
147{
148 struct iucv_tty_buffer *bufp;
149
150 bufp = mempool_alloc(hvc_iucv_mempool, flags);
151 if (!bufp)
152 return NULL;
153 memset(bufp, 0, sizeof(*bufp));
154
155 if (size > 0) {
156 bufp->msg.length = MSG_SIZE(size);
157 bufp->mbuf = kmalloc(bufp->msg.length, flags);
158 if (!bufp->mbuf) {
159 mempool_free(bufp, hvc_iucv_mempool);
160 return NULL;
161 }
162 bufp->mbuf->version = MSG_VERSION;
163 bufp->mbuf->type = MSG_TYPE_DATA;
164 bufp->mbuf->datalen = (u16) size;
165 }
166 return bufp;
167}
168
169
170
171
172
173static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
174{
175 kfree(bufp->mbuf);
176 mempool_free(bufp, hvc_iucv_mempool);
177}
178
179
180
181
182
183static void destroy_tty_buffer_list(struct list_head *list)
184{
185 struct iucv_tty_buffer *ent, *next;
186
187 list_for_each_entry_safe(ent, next, list, list) {
188 list_del(&ent->list);
189 destroy_tty_buffer(ent);
190 }
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213static int hvc_iucv_write(struct hvc_iucv_private *priv,
214 char *buf, int count, int *has_more_data)
215{
216 struct iucv_tty_buffer *rb;
217 int written;
218 int rc;
219
220
221 if (priv->iucv_state == IUCV_DISCONN)
222 return 0;
223
224
225
226 if (priv->iucv_state == IUCV_SEVERED)
227 return -EPIPE;
228
229
230 if (list_empty(&priv->tty_inqueue))
231 return 0;
232
233
234 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
235
236 written = 0;
237 if (!rb->mbuf) {
238
239
240 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC);
241 if (!rb->mbuf)
242 return -ENOMEM;
243
244 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
245 rb->mbuf, rb->msg.length, NULL);
246 switch (rc) {
247 case 0:
248 break;
249 case 2:
250 case 9:
251 break;
252 default:
253 written = -EIO;
254 }
255
256
257 if (rc || (rb->mbuf->version != MSG_VERSION) ||
258 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
259 goto out_remove_buffer;
260 }
261
262 switch (rb->mbuf->type) {
263 case MSG_TYPE_DATA:
264 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
265 memcpy(buf, rb->mbuf->data + rb->offset, written);
266 if (written < (rb->mbuf->datalen - rb->offset)) {
267 rb->offset += written;
268 *has_more_data = 1;
269 goto out_written;
270 }
271 break;
272
273 case MSG_TYPE_WINSIZE:
274 if (rb->mbuf->datalen != sizeof(struct winsize))
275 break;
276
277
278 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
279 break;
280
281 case MSG_TYPE_ERROR:
282 case MSG_TYPE_TERMENV:
283 case MSG_TYPE_TERMIOS:
284 break;
285 }
286
287out_remove_buffer:
288 list_del(&rb->list);
289 destroy_tty_buffer(rb);
290 *has_more_data = !list_empty(&priv->tty_inqueue);
291
292out_written:
293 return written;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
311{
312 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
313 int written;
314 int has_more_data;
315
316 if (count <= 0)
317 return 0;
318
319 if (!priv)
320 return -ENODEV;
321
322 spin_lock(&priv->lock);
323 has_more_data = 0;
324 written = hvc_iucv_write(priv, buf, count, &has_more_data);
325 spin_unlock(&priv->lock);
326
327
328 if (has_more_data)
329 hvc_kick();
330
331 return written;
332}
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
351 int count)
352{
353 size_t len;
354
355 if (priv->iucv_state == IUCV_DISCONN)
356 return count;
357
358 if (priv->iucv_state == IUCV_SEVERED)
359 return -EPIPE;
360
361 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
362 if (!len)
363 return 0;
364
365 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
366 priv->sndbuf_len += len;
367
368 if (priv->iucv_state == IUCV_CONNECTED)
369 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
370
371 return len;
372}
373
374
375
376
377
378
379
380
381
382
383static int hvc_iucv_send(struct hvc_iucv_private *priv)
384{
385 struct iucv_tty_buffer *sb;
386 int rc, len;
387
388 if (priv->iucv_state == IUCV_SEVERED)
389 return -EPIPE;
390
391 if (priv->iucv_state == IUCV_DISCONN)
392 return -EIO;
393
394 if (!priv->sndbuf_len)
395 return 0;
396
397
398
399 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
400 if (!sb)
401 return -ENOMEM;
402
403 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
404 sb->mbuf->datalen = (u16) priv->sndbuf_len;
405 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
406
407 list_add_tail(&sb->list, &priv->tty_outqueue);
408
409 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
410 (void *) sb->mbuf, sb->msg.length);
411 if (rc) {
412
413
414 list_del(&sb->list);
415 destroy_tty_buffer(sb);
416 }
417 len = priv->sndbuf_len;
418 priv->sndbuf_len = 0;
419
420 return len;
421}
422
423
424
425
426
427
428
429
430static void hvc_iucv_sndbuf_work(struct work_struct *work)
431{
432 struct hvc_iucv_private *priv;
433
434 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
435 if (!priv)
436 return;
437
438 spin_lock_bh(&priv->lock);
439 hvc_iucv_send(priv);
440 spin_unlock_bh(&priv->lock);
441}
442
443
444
445
446
447
448
449
450
451
452
453
454
455static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
456{
457 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
458 int queued;
459
460 if (count <= 0)
461 return 0;
462
463 if (!priv)
464 return -ENODEV;
465
466 spin_lock(&priv->lock);
467 queued = hvc_iucv_queue(priv, buf, count);
468 spin_unlock(&priv->lock);
469
470 return queued;
471}
472
473
474
475
476
477
478
479
480
481
482
483
484static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
485{
486 struct hvc_iucv_private *priv;
487
488 priv = hvc_iucv_get_private(id);
489 if (!priv)
490 return 0;
491
492 spin_lock_bh(&priv->lock);
493 priv->tty_state = TTY_OPENED;
494 spin_unlock_bh(&priv->lock);
495
496 return 0;
497}
498
499
500
501
502
503static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
504{
505 destroy_tty_buffer_list(&priv->tty_outqueue);
506 destroy_tty_buffer_list(&priv->tty_inqueue);
507
508 priv->tty_state = TTY_CLOSED;
509 priv->iucv_state = IUCV_DISCONN;
510
511 priv->sndbuf_len = 0;
512}
513
514
515
516
517
518static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
519{
520 int rc;
521
522 spin_lock_bh(&priv->lock);
523 rc = list_empty(&priv->tty_outqueue);
524 spin_unlock_bh(&priv->lock);
525
526 return rc;
527}
528
529
530
531
532
533
534
535
536static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
537{
538 int sync_wait;
539
540 cancel_delayed_work_sync(&priv->sndbuf_work);
541
542 spin_lock_bh(&priv->lock);
543 hvc_iucv_send(priv);
544 sync_wait = !list_empty(&priv->tty_outqueue);
545 spin_unlock_bh(&priv->lock);
546
547 if (sync_wait)
548 wait_event_timeout(priv->sndbuf_waitq,
549 tty_outqueue_empty(priv), HZ/10);
550}
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
585{
586 struct iucv_path *path;
587
588 path = NULL;
589 spin_lock(&priv->lock);
590 if (priv->iucv_state == IUCV_CONNECTED) {
591 path = priv->path;
592 priv->path = NULL;
593 priv->iucv_state = IUCV_SEVERED;
594 if (priv->tty_state == TTY_CLOSED)
595 hvc_iucv_cleanup(priv);
596 else
597
598 if (priv->is_console) {
599 hvc_iucv_cleanup(priv);
600 priv->tty_state = TTY_OPENED;
601 } else
602 hvc_kick();
603 }
604 spin_unlock(&priv->lock);
605
606
607 if (path) {
608 iucv_path_sever(path, NULL);
609 iucv_path_free(path);
610 }
611}
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
631{
632 struct hvc_iucv_private *priv;
633
634 priv = hvc_iucv_get_private(id);
635 if (!priv)
636 return;
637
638 flush_sndbuf_sync(priv);
639
640 spin_lock_bh(&priv->lock);
641
642
643
644
645
646
647
648 priv->tty_state = TTY_CLOSED;
649
650 if (priv->iucv_state == IUCV_SEVERED)
651 hvc_iucv_cleanup(priv);
652 spin_unlock_bh(&priv->lock);
653}
654
655
656
657
658
659
660
661
662
663
664
665
666
667static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
668{
669 struct hvc_iucv_private *priv;
670 struct iucv_path *path;
671
672 priv = hvc_iucv_get_private(id);
673 if (!priv)
674 return;
675
676 flush_sndbuf_sync(priv);
677
678 spin_lock_bh(&priv->lock);
679 path = priv->path;
680 priv->path = NULL;
681 hvc_iucv_cleanup(priv);
682 spin_unlock_bh(&priv->lock);
683
684
685
686 if (path) {
687 iucv_path_sever(path, NULL);
688 iucv_path_free(path);
689 }
690}
691
692
693
694
695
696
697
698
699static int hvc_iucv_filter_connreq(u8 ipvmid[8])
700{
701 size_t i;
702
703
704 if (!hvc_iucv_filter_size)
705 return 0;
706
707 for (i = 0; i < hvc_iucv_filter_size; i++)
708 if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
709 return 0;
710 return 1;
711}
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732static int hvc_iucv_path_pending(struct iucv_path *path,
733 u8 ipvmid[8], u8 ipuser[16])
734{
735 struct hvc_iucv_private *priv;
736 u8 nuser_data[16];
737 u8 vm_user_id[9];
738 int i, rc;
739
740 priv = NULL;
741 for (i = 0; i < hvc_iucv_devices; i++)
742 if (hvc_iucv_table[i] &&
743 (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
744 priv = hvc_iucv_table[i];
745 break;
746 }
747 if (!priv)
748 return -ENODEV;
749
750
751 read_lock(&hvc_iucv_filter_lock);
752 rc = hvc_iucv_filter_connreq(ipvmid);
753 read_unlock(&hvc_iucv_filter_lock);
754 if (rc) {
755 iucv_path_sever(path, ipuser);
756 iucv_path_free(path);
757 memcpy(vm_user_id, ipvmid, 8);
758 vm_user_id[8] = 0;
759 pr_info("A connection request from z/VM user ID %s "
760 "was refused\n", vm_user_id);
761 return 0;
762 }
763
764 spin_lock(&priv->lock);
765
766
767
768
769 if (priv->iucv_state != IUCV_DISCONN) {
770 iucv_path_sever(path, ipuser);
771 iucv_path_free(path);
772 goto out_path_handled;
773 }
774
775
776 memcpy(nuser_data, ipuser + 8, 8);
777 memcpy(nuser_data + 8, ipuser, 8);
778 path->msglim = 0xffff;
779 path->flags &= ~IUCV_IPRMDATA;
780 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
781 if (rc) {
782 iucv_path_sever(path, ipuser);
783 iucv_path_free(path);
784 goto out_path_handled;
785 }
786 priv->path = path;
787 priv->iucv_state = IUCV_CONNECTED;
788
789
790 schedule_delayed_work(&priv->sndbuf_work, 5);
791
792out_path_handled:
793 spin_unlock(&priv->lock);
794 return 0;
795}
796
797
798
799
800
801
802
803
804
805
806
807
808static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
809{
810 struct hvc_iucv_private *priv = path->private;
811
812 hvc_iucv_hangup(priv);
813}
814
815
816
817
818
819
820
821
822
823
824
825
826static void hvc_iucv_msg_pending(struct iucv_path *path,
827 struct iucv_message *msg)
828{
829 struct hvc_iucv_private *priv = path->private;
830 struct iucv_tty_buffer *rb;
831
832
833 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
834 iucv_message_reject(path, msg);
835 return;
836 }
837
838 spin_lock(&priv->lock);
839
840
841 if (priv->tty_state == TTY_CLOSED) {
842 iucv_message_reject(path, msg);
843 goto unlock_return;
844 }
845
846
847 rb = alloc_tty_buffer(0, GFP_ATOMIC);
848 if (!rb) {
849 iucv_message_reject(path, msg);
850 goto unlock_return;
851 }
852 rb->msg = *msg;
853
854 list_add_tail(&rb->list, &priv->tty_inqueue);
855
856 hvc_kick();
857
858unlock_return:
859 spin_unlock(&priv->lock);
860}
861
862
863
864
865
866
867
868
869
870
871
872
873
874static void hvc_iucv_msg_complete(struct iucv_path *path,
875 struct iucv_message *msg)
876{
877 struct hvc_iucv_private *priv = path->private;
878 struct iucv_tty_buffer *ent, *next;
879 LIST_HEAD(list_remove);
880
881 spin_lock(&priv->lock);
882 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
883 if (ent->msg.id == msg->id) {
884 list_move(&ent->list, &list_remove);
885 break;
886 }
887 wake_up(&priv->sndbuf_waitq);
888 spin_unlock(&priv->lock);
889 destroy_tty_buffer_list(&list_remove);
890}
891
892
893
894
895
896
897
898
899static int hvc_iucv_pm_freeze(struct device *dev)
900{
901 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
902
903 local_bh_disable();
904 hvc_iucv_hangup(priv);
905 local_bh_enable();
906
907 return 0;
908}
909
910
911
912
913
914
915
916
917static int hvc_iucv_pm_restore_thaw(struct device *dev)
918{
919 hvc_kick();
920 return 0;
921}
922
923
924
925static struct hv_ops hvc_iucv_ops = {
926 .get_chars = hvc_iucv_get_chars,
927 .put_chars = hvc_iucv_put_chars,
928 .notifier_add = hvc_iucv_notifier_add,
929 .notifier_del = hvc_iucv_notifier_del,
930 .notifier_hangup = hvc_iucv_notifier_hangup,
931};
932
933
934static struct dev_pm_ops hvc_iucv_pm_ops = {
935 .freeze = hvc_iucv_pm_freeze,
936 .thaw = hvc_iucv_pm_restore_thaw,
937 .restore = hvc_iucv_pm_restore_thaw,
938};
939
940
941static struct device_driver hvc_iucv_driver = {
942 .name = KMSG_COMPONENT,
943 .bus = &iucv_bus,
944 .pm = &hvc_iucv_pm_ops,
945};
946
947
948
949
950
951
952
953
954
955
956static int __init hvc_iucv_alloc(int id, unsigned int is_console)
957{
958 struct hvc_iucv_private *priv;
959 char name[9];
960 int rc;
961
962 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
963 if (!priv)
964 return -ENOMEM;
965
966 spin_lock_init(&priv->lock);
967 INIT_LIST_HEAD(&priv->tty_outqueue);
968 INIT_LIST_HEAD(&priv->tty_inqueue);
969 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
970 init_waitqueue_head(&priv->sndbuf_waitq);
971
972 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
973 if (!priv->sndbuf) {
974 kfree(priv);
975 return -ENOMEM;
976 }
977
978
979 priv->is_console = is_console;
980
981
982 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id,
983 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
984 if (IS_ERR(priv->hvc)) {
985 rc = PTR_ERR(priv->hvc);
986 goto out_error_hvc;
987 }
988
989
990 priv->hvc->irq_requested = 1;
991
992
993 snprintf(name, 9, "lnxhvc%-2d", id);
994 memcpy(priv->srv_name, name, 8);
995 ASCEBC(priv->srv_name, 8);
996
997
998 priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
999 if (!priv->dev) {
1000 rc = -ENOMEM;
1001 goto out_error_dev;
1002 }
1003 dev_set_name(priv->dev, "hvc_iucv%d", id);
1004 dev_set_drvdata(priv->dev, priv);
1005 priv->dev->bus = &iucv_bus;
1006 priv->dev->parent = iucv_root;
1007 priv->dev->driver = &hvc_iucv_driver;
1008 priv->dev->release = (void (*)(struct device *)) kfree;
1009 rc = device_register(priv->dev);
1010 if (rc) {
1011 put_device(priv->dev);
1012 goto out_error_dev;
1013 }
1014
1015 hvc_iucv_table[id] = priv;
1016 return 0;
1017
1018out_error_dev:
1019 hvc_remove(priv->hvc);
1020out_error_hvc:
1021 free_page((unsigned long) priv->sndbuf);
1022 kfree(priv);
1023
1024 return rc;
1025}
1026
1027
1028
1029
1030static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1031{
1032 hvc_remove(priv->hvc);
1033 device_unregister(priv->dev);
1034 free_page((unsigned long) priv->sndbuf);
1035 kfree(priv);
1036}
1037
1038
1039
1040
1041
1042static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1043{
1044 const char *nextdelim, *residual;
1045 size_t len;
1046
1047 nextdelim = strchr(filter, ',');
1048 if (nextdelim) {
1049 len = nextdelim - filter;
1050 residual = nextdelim + 1;
1051 } else {
1052 len = strlen(filter);
1053 residual = filter + len;
1054 }
1055
1056 if (len == 0)
1057 return ERR_PTR(-EINVAL);
1058
1059
1060 if (filter[len - 1] == '\n')
1061 len--;
1062
1063 if (len > 8)
1064 return ERR_PTR(-EINVAL);
1065
1066
1067 memset(dest, ' ', 8);
1068 while (len--)
1069 dest[len] = toupper(filter[len]);
1070 return residual;
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static int hvc_iucv_setup_filter(const char *val)
1084{
1085 const char *residual;
1086 int err;
1087 size_t size, count;
1088 void *array, *old_filter;
1089
1090 count = strlen(val);
1091 if (count == 0 || (count == 1 && val[0] == '\n')) {
1092 size = 0;
1093 array = NULL;
1094 goto out_replace_filter;
1095 }
1096
1097
1098 size = 1;
1099 residual = val;
1100 while ((residual = strchr(residual, ',')) != NULL) {
1101 residual++;
1102 size++;
1103 }
1104
1105
1106 if (size > MAX_VMID_FILTER)
1107 return -ENOSPC;
1108
1109 array = kzalloc(size * 8, GFP_KERNEL);
1110 if (!array)
1111 return -ENOMEM;
1112
1113 count = size;
1114 residual = val;
1115 while (*residual && count) {
1116 residual = hvc_iucv_parse_filter(residual,
1117 array + ((size - count) * 8));
1118 if (IS_ERR(residual)) {
1119 err = PTR_ERR(residual);
1120 kfree(array);
1121 goto out_err;
1122 }
1123 count--;
1124 }
1125
1126out_replace_filter:
1127 write_lock_bh(&hvc_iucv_filter_lock);
1128 old_filter = hvc_iucv_filter;
1129 hvc_iucv_filter_size = size;
1130 hvc_iucv_filter = array;
1131 write_unlock_bh(&hvc_iucv_filter_lock);
1132 kfree(old_filter);
1133
1134 err = 0;
1135out_err:
1136 return err;
1137}
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149static int param_set_vmidfilter(const char *val, struct kernel_param *kp)
1150{
1151 int rc;
1152
1153 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1154 return -ENODEV;
1155
1156 if (!val)
1157 return -EINVAL;
1158
1159 rc = 0;
1160 if (slab_is_available())
1161 rc = hvc_iucv_setup_filter(val);
1162 else
1163 hvc_iucv_filter_string = val;
1164 return rc;
1165}
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176static int param_get_vmidfilter(char *buffer, struct kernel_param *kp)
1177{
1178 int rc;
1179 size_t index, len;
1180 void *start, *end;
1181
1182 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1183 return -ENODEV;
1184
1185 rc = 0;
1186 read_lock_bh(&hvc_iucv_filter_lock);
1187 for (index = 0; index < hvc_iucv_filter_size; index++) {
1188 start = hvc_iucv_filter + (8 * index);
1189 end = memchr(start, ' ', 8);
1190 len = (end) ? end - start : 8;
1191 memcpy(buffer + rc, start, len);
1192 rc += len;
1193 buffer[rc++] = ',';
1194 }
1195 read_unlock_bh(&hvc_iucv_filter_lock);
1196 if (rc)
1197 buffer[--rc] = '\0';
1198 return rc;
1199}
1200
1201#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1202
1203
1204
1205
1206static int __init hvc_iucv_init(void)
1207{
1208 int rc;
1209 unsigned int i;
1210
1211 if (!hvc_iucv_devices)
1212 return -ENODEV;
1213
1214 if (!MACHINE_IS_VM) {
1215 pr_notice("The z/VM IUCV HVC device driver cannot "
1216 "be used without z/VM\n");
1217 rc = -ENODEV;
1218 goto out_error;
1219 }
1220
1221 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1222 pr_err("%lu is not a valid value for the hvc_iucv= "
1223 "kernel parameter\n", hvc_iucv_devices);
1224 rc = -EINVAL;
1225 goto out_error;
1226 }
1227
1228
1229 rc = driver_register(&hvc_iucv_driver);
1230 if (rc)
1231 goto out_error;
1232
1233
1234 if (hvc_iucv_filter_string) {
1235 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1236 switch (rc) {
1237 case 0:
1238 break;
1239 case -ENOMEM:
1240 pr_err("Allocating memory failed with "
1241 "reason code=%d\n", 3);
1242 goto out_error;
1243 case -EINVAL:
1244 pr_err("hvc_iucv_allow= does not specify a valid "
1245 "z/VM user ID list\n");
1246 goto out_error;
1247 case -ENOSPC:
1248 pr_err("hvc_iucv_allow= specifies too many "
1249 "z/VM user IDs\n");
1250 goto out_error;
1251 default:
1252 goto out_error;
1253 }
1254 }
1255
1256 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1257 sizeof(struct iucv_tty_buffer),
1258 0, 0, NULL);
1259 if (!hvc_iucv_buffer_cache) {
1260 pr_err("Allocating memory failed with reason code=%d\n", 1);
1261 rc = -ENOMEM;
1262 goto out_error;
1263 }
1264
1265 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1266 hvc_iucv_buffer_cache);
1267 if (!hvc_iucv_mempool) {
1268 pr_err("Allocating memory failed with reason code=%d\n", 2);
1269 kmem_cache_destroy(hvc_iucv_buffer_cache);
1270 rc = -ENOMEM;
1271 goto out_error;
1272 }
1273
1274
1275
1276 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1277 if (rc) {
1278 pr_err("Registering HVC terminal device as "
1279 "Linux console failed\n");
1280 goto out_error_memory;
1281 }
1282
1283
1284 for (i = 0; i < hvc_iucv_devices; i++) {
1285 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1286 if (rc) {
1287 pr_err("Creating a new HVC terminal device "
1288 "failed with error code=%d\n", rc);
1289 goto out_error_hvc;
1290 }
1291 }
1292
1293
1294 rc = iucv_register(&hvc_iucv_handler, 0);
1295 if (rc) {
1296 pr_err("Registering IUCV handlers failed with error code=%d\n",
1297 rc);
1298 goto out_error_iucv;
1299 }
1300
1301 return 0;
1302
1303out_error_iucv:
1304 iucv_unregister(&hvc_iucv_handler, 0);
1305out_error_hvc:
1306 for (i = 0; i < hvc_iucv_devices; i++)
1307 if (hvc_iucv_table[i])
1308 hvc_iucv_destroy(hvc_iucv_table[i]);
1309out_error_memory:
1310 mempool_destroy(hvc_iucv_mempool);
1311 kmem_cache_destroy(hvc_iucv_buffer_cache);
1312out_error:
1313 if (hvc_iucv_filter)
1314 kfree(hvc_iucv_filter);
1315 hvc_iucv_devices = 0;
1316 return rc;
1317}
1318
1319
1320
1321
1322
1323static int __init hvc_iucv_config(char *val)
1324{
1325 return strict_strtoul(val, 10, &hvc_iucv_devices);
1326}
1327
1328
1329device_initcall(hvc_iucv_init);
1330__setup("hvc_iucv=", hvc_iucv_config);
1331core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
1332