1
2
3
4
5
6
7
8
9
10
11
12#define KMSG_COMPONENT "hvc_iucv"
13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <asm/ebcdic.h>
18#include <linux/ctype.h>
19#include <linux/delay.h>
20#include <linux/device.h>
21#include <linux/init.h>
22#include <linux/mempool.h>
23#include <linux/moduleparam.h>
24#include <linux/tty.h>
25#include <linux/wait.h>
26#include <net/iucv/iucv.h>
27
28#include "hvc_console.h"
29
30
31
32#define HVC_IUCV_MAGIC 0xc9e4c3e5
33#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
34#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
35
36
37#define MSG_VERSION 0x02
38#define MSG_TYPE_ERROR 0x01
39#define MSG_TYPE_TERMENV 0x02
40#define MSG_TYPE_TERMIOS 0x04
41#define MSG_TYPE_WINSIZE 0x08
42#define MSG_TYPE_DATA 0x10
43
44struct iucv_tty_msg {
45 u8 version;
46 u8 type;
47#define MSG_MAX_DATALEN ((u16)(~0))
48 u16 datalen;
49 u8 data[];
50} __attribute__((packed));
51#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
52
53enum iucv_state_t {
54 IUCV_DISCONN = 0,
55 IUCV_CONNECTED = 1,
56 IUCV_SEVERED = 2,
57};
58
59enum tty_state_t {
60 TTY_CLOSED = 0,
61 TTY_OPENED = 1,
62};
63
64struct hvc_iucv_private {
65 struct hvc_struct *hvc;
66 u8 srv_name[8];
67 unsigned char is_console;
68 enum iucv_state_t iucv_state;
69 enum tty_state_t tty_state;
70 struct iucv_path *path;
71 spinlock_t lock;
72#define SNDBUF_SIZE (PAGE_SIZE)
73 void *sndbuf;
74 size_t sndbuf_len;
75#define QUEUE_SNDBUF_DELAY (HZ / 25)
76 struct delayed_work sndbuf_work;
77 wait_queue_head_t sndbuf_waitq;
78 struct list_head tty_outqueue;
79 struct list_head tty_inqueue;
80 struct device *dev;
81 u8 info_path[16];
82};
83
84struct iucv_tty_buffer {
85 struct list_head list;
86 struct iucv_message msg;
87 size_t offset;
88 struct iucv_tty_msg *mbuf;
89};
90
91
92static int hvc_iucv_path_pending(struct iucv_path *, u8 *, u8 *);
93static void hvc_iucv_path_severed(struct iucv_path *, u8 *);
94static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
95static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
96
97
98
99static unsigned long hvc_iucv_devices = 1;
100
101
102static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
103#define IUCV_HVC_CON_IDX (0)
104
105#define MAX_VMID_FILTER (500)
106#define FILTER_WILDCARD_CHAR '*'
107static size_t hvc_iucv_filter_size;
108static void *hvc_iucv_filter;
109static const char *hvc_iucv_filter_string;
110static DEFINE_RWLOCK(hvc_iucv_filter_lock);
111
112
113static struct kmem_cache *hvc_iucv_buffer_cache;
114static mempool_t *hvc_iucv_mempool;
115
116
117static struct iucv_handler hvc_iucv_handler = {
118 .path_pending = hvc_iucv_path_pending,
119 .path_severed = hvc_iucv_path_severed,
120 .message_complete = hvc_iucv_msg_complete,
121 .message_pending = hvc_iucv_msg_pending,
122};
123
124
125
126
127
128
129
130
131
132static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
133{
134 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
135 return NULL;
136 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
137}
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
153{
154 struct iucv_tty_buffer *bufp;
155
156 bufp = mempool_alloc(hvc_iucv_mempool, flags);
157 if (!bufp)
158 return NULL;
159 memset(bufp, 0, sizeof(*bufp));
160
161 if (size > 0) {
162 bufp->msg.length = MSG_SIZE(size);
163 bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
164 if (!bufp->mbuf) {
165 mempool_free(bufp, hvc_iucv_mempool);
166 return NULL;
167 }
168 bufp->mbuf->version = MSG_VERSION;
169 bufp->mbuf->type = MSG_TYPE_DATA;
170 bufp->mbuf->datalen = (u16) size;
171 }
172 return bufp;
173}
174
175
176
177
178
179static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
180{
181 kfree(bufp->mbuf);
182 mempool_free(bufp, hvc_iucv_mempool);
183}
184
185
186
187
188
189static void destroy_tty_buffer_list(struct list_head *list)
190{
191 struct iucv_tty_buffer *ent, *next;
192
193 list_for_each_entry_safe(ent, next, list, list) {
194 list_del(&ent->list);
195 destroy_tty_buffer(ent);
196 }
197}
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219static int hvc_iucv_write(struct hvc_iucv_private *priv,
220 char *buf, int count, int *has_more_data)
221{
222 struct iucv_tty_buffer *rb;
223 int written;
224 int rc;
225
226
227 if (priv->iucv_state == IUCV_DISCONN)
228 return 0;
229
230
231
232 if (priv->iucv_state == IUCV_SEVERED)
233 return -EPIPE;
234
235
236 if (list_empty(&priv->tty_inqueue))
237 return 0;
238
239
240 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
241
242 written = 0;
243 if (!rb->mbuf) {
244
245
246 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
247 if (!rb->mbuf)
248 return -ENOMEM;
249
250 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
251 rb->mbuf, rb->msg.length, NULL);
252 switch (rc) {
253 case 0:
254 break;
255 case 2:
256 case 9:
257 break;
258 default:
259 written = -EIO;
260 }
261
262
263 if (rc || (rb->mbuf->version != MSG_VERSION) ||
264 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
265 goto out_remove_buffer;
266 }
267
268 switch (rb->mbuf->type) {
269 case MSG_TYPE_DATA:
270 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
271 memcpy(buf, rb->mbuf->data + rb->offset, written);
272 if (written < (rb->mbuf->datalen - rb->offset)) {
273 rb->offset += written;
274 *has_more_data = 1;
275 goto out_written;
276 }
277 break;
278
279 case MSG_TYPE_WINSIZE:
280 if (rb->mbuf->datalen != sizeof(struct winsize))
281 break;
282
283
284 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
285 break;
286
287 case MSG_TYPE_ERROR:
288 case MSG_TYPE_TERMENV:
289 case MSG_TYPE_TERMIOS:
290 break;
291 }
292
293out_remove_buffer:
294 list_del(&rb->list);
295 destroy_tty_buffer(rb);
296 *has_more_data = !list_empty(&priv->tty_inqueue);
297
298out_written:
299 return written;
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
317{
318 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
319 int written;
320 int has_more_data;
321
322 if (count <= 0)
323 return 0;
324
325 if (!priv)
326 return -ENODEV;
327
328 spin_lock(&priv->lock);
329 has_more_data = 0;
330 written = hvc_iucv_write(priv, buf, count, &has_more_data);
331 spin_unlock(&priv->lock);
332
333
334 if (has_more_data)
335 hvc_kick();
336
337 return written;
338}
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
357 int count)
358{
359 size_t len;
360
361 if (priv->iucv_state == IUCV_DISCONN)
362 return count;
363
364 if (priv->iucv_state == IUCV_SEVERED)
365 return -EPIPE;
366
367 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
368 if (!len)
369 return 0;
370
371 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
372 priv->sndbuf_len += len;
373
374 if (priv->iucv_state == IUCV_CONNECTED)
375 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
376
377 return len;
378}
379
380
381
382
383
384
385
386
387
388
389static int hvc_iucv_send(struct hvc_iucv_private *priv)
390{
391 struct iucv_tty_buffer *sb;
392 int rc, len;
393
394 if (priv->iucv_state == IUCV_SEVERED)
395 return -EPIPE;
396
397 if (priv->iucv_state == IUCV_DISCONN)
398 return -EIO;
399
400 if (!priv->sndbuf_len)
401 return 0;
402
403
404
405 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
406 if (!sb)
407 return -ENOMEM;
408
409 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
410 sb->mbuf->datalen = (u16) priv->sndbuf_len;
411 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
412
413 list_add_tail(&sb->list, &priv->tty_outqueue);
414
415 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
416 (void *) sb->mbuf, sb->msg.length);
417 if (rc) {
418
419
420 list_del(&sb->list);
421 destroy_tty_buffer(sb);
422 }
423 len = priv->sndbuf_len;
424 priv->sndbuf_len = 0;
425
426 return len;
427}
428
429
430
431
432
433
434
435
436static void hvc_iucv_sndbuf_work(struct work_struct *work)
437{
438 struct hvc_iucv_private *priv;
439
440 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
441
442 spin_lock_bh(&priv->lock);
443 hvc_iucv_send(priv);
444 spin_unlock_bh(&priv->lock);
445}
446
447
448
449
450
451
452
453
454
455
456
457
458
459static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
460{
461 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
462 int queued;
463
464 if (count <= 0)
465 return 0;
466
467 if (!priv)
468 return -ENODEV;
469
470 spin_lock(&priv->lock);
471 queued = hvc_iucv_queue(priv, buf, count);
472 spin_unlock(&priv->lock);
473
474 return queued;
475}
476
477
478
479
480
481
482
483
484
485
486
487
488static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
489{
490 struct hvc_iucv_private *priv;
491
492 priv = hvc_iucv_get_private(id);
493 if (!priv)
494 return 0;
495
496 spin_lock_bh(&priv->lock);
497 priv->tty_state = TTY_OPENED;
498 spin_unlock_bh(&priv->lock);
499
500 return 0;
501}
502
503
504
505
506
507static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
508{
509 destroy_tty_buffer_list(&priv->tty_outqueue);
510 destroy_tty_buffer_list(&priv->tty_inqueue);
511
512 priv->tty_state = TTY_CLOSED;
513 priv->iucv_state = IUCV_DISCONN;
514
515 priv->sndbuf_len = 0;
516}
517
518
519
520
521
522static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
523{
524 int rc;
525
526 spin_lock_bh(&priv->lock);
527 rc = list_empty(&priv->tty_outqueue);
528 spin_unlock_bh(&priv->lock);
529
530 return rc;
531}
532
533
534
535
536
537
538
539
540static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
541{
542 int sync_wait;
543
544 cancel_delayed_work_sync(&priv->sndbuf_work);
545
546 spin_lock_bh(&priv->lock);
547 hvc_iucv_send(priv);
548 sync_wait = !list_empty(&priv->tty_outqueue);
549 spin_unlock_bh(&priv->lock);
550
551 if (sync_wait)
552 wait_event_timeout(priv->sndbuf_waitq,
553 tty_outqueue_empty(priv), HZ/10);
554}
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
589{
590 struct iucv_path *path;
591
592 path = NULL;
593 spin_lock(&priv->lock);
594 if (priv->iucv_state == IUCV_CONNECTED) {
595 path = priv->path;
596 priv->path = NULL;
597 priv->iucv_state = IUCV_SEVERED;
598 if (priv->tty_state == TTY_CLOSED)
599 hvc_iucv_cleanup(priv);
600 else
601
602 if (priv->is_console) {
603 hvc_iucv_cleanup(priv);
604 priv->tty_state = TTY_OPENED;
605 } else
606 hvc_kick();
607 }
608 spin_unlock(&priv->lock);
609
610
611 if (path) {
612 iucv_path_sever(path, NULL);
613 iucv_path_free(path);
614 }
615}
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
635{
636 struct hvc_iucv_private *priv;
637
638 priv = hvc_iucv_get_private(id);
639 if (!priv)
640 return;
641
642 flush_sndbuf_sync(priv);
643
644 spin_lock_bh(&priv->lock);
645
646
647
648
649
650
651
652 priv->tty_state = TTY_CLOSED;
653
654 if (priv->iucv_state == IUCV_SEVERED)
655 hvc_iucv_cleanup(priv);
656 spin_unlock_bh(&priv->lock);
657}
658
659
660
661
662
663
664
665
666
667
668static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
669{
670 struct hvc_iucv_private *priv;
671 struct iucv_path *path;
672
673
674
675
676 if (raise)
677 return;
678
679 priv = hvc_iucv_get_private(hp->vtermno);
680 if (!priv)
681 return;
682
683
684
685
686 flush_sndbuf_sync(priv);
687
688 spin_lock_bh(&priv->lock);
689 path = priv->path;
690 priv->path = NULL;
691 priv->iucv_state = IUCV_DISCONN;
692 spin_unlock_bh(&priv->lock);
693
694
695
696 if (path) {
697 iucv_path_sever(path, NULL);
698 iucv_path_free(path);
699 }
700}
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
716{
717 struct hvc_iucv_private *priv;
718
719 priv = hvc_iucv_get_private(id);
720 if (!priv)
721 return;
722
723 flush_sndbuf_sync(priv);
724
725 spin_lock_bh(&priv->lock);
726 destroy_tty_buffer_list(&priv->tty_outqueue);
727 destroy_tty_buffer_list(&priv->tty_inqueue);
728 priv->tty_state = TTY_CLOSED;
729 priv->sndbuf_len = 0;
730 spin_unlock_bh(&priv->lock);
731}
732
733
734
735
736
737
738
739
740static int hvc_iucv_filter_connreq(u8 ipvmid[8])
741{
742 const char *wildcard, *filter_entry;
743 size_t i, len;
744
745
746 if (!hvc_iucv_filter_size)
747 return 0;
748
749 for (i = 0; i < hvc_iucv_filter_size; i++) {
750 filter_entry = hvc_iucv_filter + (8 * i);
751
752
753
754
755
756
757 wildcard = strnchr(filter_entry, 8, FILTER_WILDCARD_CHAR);
758 len = (wildcard) ? wildcard - filter_entry : 8;
759 if (0 == memcmp(ipvmid, filter_entry, len))
760 return 0;
761 }
762 return 1;
763}
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784static int hvc_iucv_path_pending(struct iucv_path *path, u8 *ipvmid,
785 u8 *ipuser)
786{
787 struct hvc_iucv_private *priv, *tmp;
788 u8 wildcard[9] = "lnxhvc ";
789 int i, rc, find_unused;
790 u8 nuser_data[16];
791 u8 vm_user_id[9];
792
793 ASCEBC(wildcard, sizeof(wildcard));
794 find_unused = !memcmp(wildcard, ipuser, 8);
795
796
797
798
799
800
801 priv = NULL;
802 for (i = 0; i < hvc_iucv_devices; i++) {
803 tmp = hvc_iucv_table[i];
804 if (!tmp)
805 continue;
806
807 if (find_unused) {
808 spin_lock(&tmp->lock);
809 if (tmp->iucv_state == IUCV_DISCONN)
810 priv = tmp;
811 spin_unlock(&tmp->lock);
812
813 } else if (!memcmp(tmp->srv_name, ipuser, 8))
814 priv = tmp;
815 if (priv)
816 break;
817 }
818 if (!priv)
819 return -ENODEV;
820
821
822 read_lock(&hvc_iucv_filter_lock);
823 rc = hvc_iucv_filter_connreq(ipvmid);
824 read_unlock(&hvc_iucv_filter_lock);
825 if (rc) {
826 iucv_path_sever(path, ipuser);
827 iucv_path_free(path);
828 memcpy(vm_user_id, ipvmid, 8);
829 vm_user_id[8] = 0;
830 pr_info("A connection request from z/VM user ID %s "
831 "was refused\n", vm_user_id);
832 return 0;
833 }
834
835 spin_lock(&priv->lock);
836
837
838
839
840 if (priv->iucv_state != IUCV_DISCONN) {
841 iucv_path_sever(path, ipuser);
842 iucv_path_free(path);
843 goto out_path_handled;
844 }
845
846
847 memcpy(nuser_data, ipuser + 8, 8);
848 memcpy(nuser_data + 8, ipuser, 8);
849 path->msglim = 0xffff;
850 path->flags &= ~IUCV_IPRMDATA;
851 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
852 if (rc) {
853 iucv_path_sever(path, ipuser);
854 iucv_path_free(path);
855 goto out_path_handled;
856 }
857 priv->path = path;
858 priv->iucv_state = IUCV_CONNECTED;
859
860
861 memcpy(priv->info_path, ipvmid, 8);
862 memcpy(priv->info_path + 8, ipuser + 8, 8);
863
864
865 schedule_delayed_work(&priv->sndbuf_work, 5);
866
867out_path_handled:
868 spin_unlock(&priv->lock);
869 return 0;
870}
871
872
873
874
875
876
877
878
879
880
881
882
883static void hvc_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
884{
885 struct hvc_iucv_private *priv = path->private;
886
887 hvc_iucv_hangup(priv);
888}
889
890
891
892
893
894
895
896
897
898
899
900
901static void hvc_iucv_msg_pending(struct iucv_path *path,
902 struct iucv_message *msg)
903{
904 struct hvc_iucv_private *priv = path->private;
905 struct iucv_tty_buffer *rb;
906
907
908 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
909 iucv_message_reject(path, msg);
910 return;
911 }
912
913 spin_lock(&priv->lock);
914
915
916 if (priv->tty_state == TTY_CLOSED) {
917 iucv_message_reject(path, msg);
918 goto unlock_return;
919 }
920
921
922 rb = alloc_tty_buffer(0, GFP_ATOMIC);
923 if (!rb) {
924 iucv_message_reject(path, msg);
925 goto unlock_return;
926 }
927 rb->msg = *msg;
928
929 list_add_tail(&rb->list, &priv->tty_inqueue);
930
931 hvc_kick();
932
933unlock_return:
934 spin_unlock(&priv->lock);
935}
936
937
938
939
940
941
942
943
944
945
946
947
948
949static void hvc_iucv_msg_complete(struct iucv_path *path,
950 struct iucv_message *msg)
951{
952 struct hvc_iucv_private *priv = path->private;
953 struct iucv_tty_buffer *ent, *next;
954 LIST_HEAD(list_remove);
955
956 spin_lock(&priv->lock);
957 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
958 if (ent->msg.id == msg->id) {
959 list_move(&ent->list, &list_remove);
960 break;
961 }
962 wake_up(&priv->sndbuf_waitq);
963 spin_unlock(&priv->lock);
964 destroy_tty_buffer_list(&list_remove);
965}
966
967static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
968 struct device_attribute *attr,
969 char *buf)
970{
971 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
972 size_t len;
973
974 len = sizeof(priv->srv_name);
975 memcpy(buf, priv->srv_name, len);
976 EBCASC(buf, len);
977 buf[len++] = '\n';
978 return len;
979}
980
981static ssize_t hvc_iucv_dev_state_show(struct device *dev,
982 struct device_attribute *attr,
983 char *buf)
984{
985 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
986 return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
987}
988
989static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
990 struct device_attribute *attr,
991 char *buf)
992{
993 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
994 char vmid[9], ipuser[9];
995
996 memset(vmid, 0, sizeof(vmid));
997 memset(ipuser, 0, sizeof(ipuser));
998
999 spin_lock_bh(&priv->lock);
1000 if (priv->iucv_state == IUCV_CONNECTED) {
1001 memcpy(vmid, priv->info_path, 8);
1002 memcpy(ipuser, priv->info_path + 8, 8);
1003 }
1004 spin_unlock_bh(&priv->lock);
1005 EBCASC(ipuser, 8);
1006
1007 return sprintf(buf, "%s:%s\n", vmid, ipuser);
1008}
1009
1010
1011
1012static const struct hv_ops hvc_iucv_ops = {
1013 .get_chars = hvc_iucv_get_chars,
1014 .put_chars = hvc_iucv_put_chars,
1015 .notifier_add = hvc_iucv_notifier_add,
1016 .notifier_del = hvc_iucv_notifier_del,
1017 .notifier_hangup = hvc_iucv_notifier_hangup,
1018 .dtr_rts = hvc_iucv_dtr_rts,
1019};
1020
1021
1022static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
1023static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
1024static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
1025static struct attribute *hvc_iucv_dev_attrs[] = {
1026 &dev_attr_termid.attr,
1027 &dev_attr_state.attr,
1028 &dev_attr_peer.attr,
1029 NULL,
1030};
1031static struct attribute_group hvc_iucv_dev_attr_group = {
1032 .attrs = hvc_iucv_dev_attrs,
1033};
1034static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
1035 &hvc_iucv_dev_attr_group,
1036 NULL,
1037};
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049static int __init hvc_iucv_alloc(int id, unsigned int is_console)
1050{
1051 struct hvc_iucv_private *priv;
1052 char name[9];
1053 int rc;
1054
1055 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
1056 if (!priv)
1057 return -ENOMEM;
1058
1059 spin_lock_init(&priv->lock);
1060 INIT_LIST_HEAD(&priv->tty_outqueue);
1061 INIT_LIST_HEAD(&priv->tty_inqueue);
1062 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
1063 init_waitqueue_head(&priv->sndbuf_waitq);
1064
1065 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
1066 if (!priv->sndbuf) {
1067 kfree(priv);
1068 return -ENOMEM;
1069 }
1070
1071
1072 priv->is_console = is_console;
1073
1074
1075 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id,
1076 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
1077 if (IS_ERR(priv->hvc)) {
1078 rc = PTR_ERR(priv->hvc);
1079 goto out_error_hvc;
1080 }
1081
1082
1083 priv->hvc->irq_requested = 1;
1084
1085
1086 snprintf(name, 9, "lnxhvc%-2d", id);
1087 memcpy(priv->srv_name, name, 8);
1088 ASCEBC(priv->srv_name, 8);
1089
1090
1091 priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1092 if (!priv->dev) {
1093 rc = -ENOMEM;
1094 goto out_error_dev;
1095 }
1096 dev_set_name(priv->dev, "hvc_iucv%d", id);
1097 dev_set_drvdata(priv->dev, priv);
1098 priv->dev->bus = &iucv_bus;
1099 priv->dev->parent = iucv_root;
1100 priv->dev->groups = hvc_iucv_dev_attr_groups;
1101 priv->dev->release = (void (*)(struct device *)) kfree;
1102 rc = device_register(priv->dev);
1103 if (rc) {
1104 put_device(priv->dev);
1105 goto out_error_dev;
1106 }
1107
1108 hvc_iucv_table[id] = priv;
1109 return 0;
1110
1111out_error_dev:
1112 hvc_remove(priv->hvc);
1113out_error_hvc:
1114 free_page((unsigned long) priv->sndbuf);
1115 kfree(priv);
1116
1117 return rc;
1118}
1119
1120
1121
1122
1123static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1124{
1125 hvc_remove(priv->hvc);
1126 device_unregister(priv->dev);
1127 free_page((unsigned long) priv->sndbuf);
1128 kfree(priv);
1129}
1130
1131
1132
1133
1134
1135
1136static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1137{
1138 const char *nextdelim, *residual;
1139 size_t len;
1140
1141 nextdelim = strchr(filter, ',');
1142 if (nextdelim) {
1143 len = nextdelim - filter;
1144 residual = nextdelim + 1;
1145 } else {
1146 len = strlen(filter);
1147 residual = filter + len;
1148 }
1149
1150 if (len == 0)
1151 return ERR_PTR(-EINVAL);
1152
1153
1154 if (filter[len - 1] == '\n')
1155 len--;
1156
1157
1158 if (len == 1 && *filter == FILTER_WILDCARD_CHAR)
1159 return ERR_PTR(-EINVAL);
1160
1161 if (len > 8)
1162 return ERR_PTR(-EINVAL);
1163
1164
1165 memset(dest, ' ', 8);
1166 while (len--)
1167 dest[len] = toupper(filter[len]);
1168 return residual;
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181static int hvc_iucv_setup_filter(const char *val)
1182{
1183 const char *residual;
1184 int err;
1185 size_t size, count;
1186 void *array, *old_filter;
1187
1188 count = strlen(val);
1189 if (count == 0 || (count == 1 && val[0] == '\n')) {
1190 size = 0;
1191 array = NULL;
1192 goto out_replace_filter;
1193 }
1194
1195
1196 size = 1;
1197 residual = val;
1198 while ((residual = strchr(residual, ',')) != NULL) {
1199 residual++;
1200 size++;
1201 }
1202
1203
1204 if (size > MAX_VMID_FILTER)
1205 return -ENOSPC;
1206
1207 array = kcalloc(size, 8, GFP_KERNEL);
1208 if (!array)
1209 return -ENOMEM;
1210
1211 count = size;
1212 residual = val;
1213 while (*residual && count) {
1214 residual = hvc_iucv_parse_filter(residual,
1215 array + ((size - count) * 8));
1216 if (IS_ERR(residual)) {
1217 err = PTR_ERR(residual);
1218 kfree(array);
1219 goto out_err;
1220 }
1221 count--;
1222 }
1223
1224out_replace_filter:
1225 write_lock_bh(&hvc_iucv_filter_lock);
1226 old_filter = hvc_iucv_filter;
1227 hvc_iucv_filter_size = size;
1228 hvc_iucv_filter = array;
1229 write_unlock_bh(&hvc_iucv_filter_lock);
1230 kfree(old_filter);
1231
1232 err = 0;
1233out_err:
1234 return err;
1235}
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1248{
1249 int rc;
1250
1251 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1252 return -ENODEV;
1253
1254 if (!val)
1255 return -EINVAL;
1256
1257 rc = 0;
1258 if (slab_is_available())
1259 rc = hvc_iucv_setup_filter(val);
1260 else
1261 hvc_iucv_filter_string = val;
1262 return rc;
1263}
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1275{
1276 int rc;
1277 size_t index, len;
1278 void *start, *end;
1279
1280 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1281 return -ENODEV;
1282
1283 rc = 0;
1284 read_lock_bh(&hvc_iucv_filter_lock);
1285 for (index = 0; index < hvc_iucv_filter_size; index++) {
1286 start = hvc_iucv_filter + (8 * index);
1287 end = memchr(start, ' ', 8);
1288 len = (end) ? end - start : 8;
1289 memcpy(buffer + rc, start, len);
1290 rc += len;
1291 buffer[rc++] = ',';
1292 }
1293 read_unlock_bh(&hvc_iucv_filter_lock);
1294 if (rc)
1295 buffer[--rc] = '\0';
1296 return rc;
1297}
1298
1299#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1300
1301static const struct kernel_param_ops param_ops_vmidfilter = {
1302 .set = param_set_vmidfilter,
1303 .get = param_get_vmidfilter,
1304};
1305
1306
1307
1308
1309static int __init hvc_iucv_init(void)
1310{
1311 int rc;
1312 unsigned int i;
1313
1314 if (!hvc_iucv_devices)
1315 return -ENODEV;
1316
1317 if (!MACHINE_IS_VM) {
1318 pr_notice("The z/VM IUCV HVC device driver cannot "
1319 "be used without z/VM\n");
1320 rc = -ENODEV;
1321 goto out_error;
1322 }
1323
1324 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1325 pr_err("%lu is not a valid value for the hvc_iucv= "
1326 "kernel parameter\n", hvc_iucv_devices);
1327 rc = -EINVAL;
1328 goto out_error;
1329 }
1330
1331
1332 if (hvc_iucv_filter_string) {
1333 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1334 switch (rc) {
1335 case 0:
1336 break;
1337 case -ENOMEM:
1338 pr_err("Allocating memory failed with "
1339 "reason code=%d\n", 3);
1340 goto out_error;
1341 case -EINVAL:
1342 pr_err("hvc_iucv_allow= does not specify a valid "
1343 "z/VM user ID list\n");
1344 goto out_error;
1345 case -ENOSPC:
1346 pr_err("hvc_iucv_allow= specifies too many "
1347 "z/VM user IDs\n");
1348 goto out_error;
1349 default:
1350 goto out_error;
1351 }
1352 }
1353
1354 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1355 sizeof(struct iucv_tty_buffer),
1356 0, 0, NULL);
1357 if (!hvc_iucv_buffer_cache) {
1358 pr_err("Allocating memory failed with reason code=%d\n", 1);
1359 rc = -ENOMEM;
1360 goto out_error;
1361 }
1362
1363 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1364 hvc_iucv_buffer_cache);
1365 if (!hvc_iucv_mempool) {
1366 pr_err("Allocating memory failed with reason code=%d\n", 2);
1367 kmem_cache_destroy(hvc_iucv_buffer_cache);
1368 rc = -ENOMEM;
1369 goto out_error;
1370 }
1371
1372
1373
1374 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1375 if (rc) {
1376 pr_err("Registering HVC terminal device as "
1377 "Linux console failed\n");
1378 goto out_error_memory;
1379 }
1380
1381
1382 for (i = 0; i < hvc_iucv_devices; i++) {
1383 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1384 if (rc) {
1385 pr_err("Creating a new HVC terminal device "
1386 "failed with error code=%d\n", rc);
1387 goto out_error_hvc;
1388 }
1389 }
1390
1391
1392 rc = iucv_register(&hvc_iucv_handler, 0);
1393 if (rc) {
1394 pr_err("Registering IUCV handlers failed with error code=%d\n",
1395 rc);
1396 goto out_error_hvc;
1397 }
1398
1399 return 0;
1400
1401out_error_hvc:
1402 for (i = 0; i < hvc_iucv_devices; i++)
1403 if (hvc_iucv_table[i])
1404 hvc_iucv_destroy(hvc_iucv_table[i]);
1405out_error_memory:
1406 mempool_destroy(hvc_iucv_mempool);
1407 kmem_cache_destroy(hvc_iucv_buffer_cache);
1408out_error:
1409 kfree(hvc_iucv_filter);
1410 hvc_iucv_devices = 0;
1411 return rc;
1412}
1413
1414
1415
1416
1417
1418static int __init hvc_iucv_config(char *val)
1419{
1420 return kstrtoul(val, 10, &hvc_iucv_devices);
1421}
1422
1423
1424device_initcall(hvc_iucv_init);
1425__setup("hvc_iucv=", hvc_iucv_config);
1426core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
1427