1
2
3
4
5
6
7
8
9
10
11#define KMSG_COMPONENT "hvc_iucv"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <asm/ebcdic.h>
17#include <linux/ctype.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <linux/init.h>
21#include <linux/mempool.h>
22#include <linux/moduleparam.h>
23#include <linux/tty.h>
24#include <linux/wait.h>
25#include <net/iucv/iucv.h>
26
27#include "hvc_console.h"
28
29
30
31#define HVC_IUCV_MAGIC 0xc9e4c3e5
32#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
33#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
34
35
36#define MSG_VERSION 0x02
37#define MSG_TYPE_ERROR 0x01
38#define MSG_TYPE_TERMENV 0x02
39#define MSG_TYPE_TERMIOS 0x04
40#define MSG_TYPE_WINSIZE 0x08
41#define MSG_TYPE_DATA 0x10
42
43struct iucv_tty_msg {
44 u8 version;
45 u8 type;
46#define MSG_MAX_DATALEN ((u16)(~0))
47 u16 datalen;
48 u8 data[];
49} __attribute__((packed));
50#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
51
52enum iucv_state_t {
53 IUCV_DISCONN = 0,
54 IUCV_CONNECTED = 1,
55 IUCV_SEVERED = 2,
56};
57
58enum tty_state_t {
59 TTY_CLOSED = 0,
60 TTY_OPENED = 1,
61};
62
63struct hvc_iucv_private {
64 struct hvc_struct *hvc;
65 u8 srv_name[8];
66 unsigned char is_console;
67 enum iucv_state_t iucv_state;
68 enum tty_state_t tty_state;
69 struct iucv_path *path;
70 spinlock_t lock;
71#define SNDBUF_SIZE (PAGE_SIZE)
72 void *sndbuf;
73 size_t sndbuf_len;
74#define QUEUE_SNDBUF_DELAY (HZ / 25)
75 struct delayed_work sndbuf_work;
76 wait_queue_head_t sndbuf_waitq;
77 struct list_head tty_outqueue;
78 struct list_head tty_inqueue;
79 struct device *dev;
80 u8 info_path[16];
81};
82
83struct iucv_tty_buffer {
84 struct list_head list;
85 struct iucv_message msg;
86 size_t offset;
87 struct iucv_tty_msg *mbuf;
88};
89
90
91static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
92static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
93static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
94static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
95
96
97
98static unsigned long hvc_iucv_devices = 1;
99
100
101static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
102#define IUCV_HVC_CON_IDX (0)
103
104#define MAX_VMID_FILTER (500)
105static size_t hvc_iucv_filter_size;
106static void *hvc_iucv_filter;
107static const char *hvc_iucv_filter_string;
108static DEFINE_RWLOCK(hvc_iucv_filter_lock);
109
110
111static struct kmem_cache *hvc_iucv_buffer_cache;
112static mempool_t *hvc_iucv_mempool;
113
114
115static struct iucv_handler hvc_iucv_handler = {
116 .path_pending = hvc_iucv_path_pending,
117 .path_severed = hvc_iucv_path_severed,
118 .message_complete = hvc_iucv_msg_complete,
119 .message_pending = hvc_iucv_msg_pending,
120};
121
122
123
124
125
126
127
128
129
130static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
131{
132 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
133 return NULL;
134 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
135}
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
151{
152 struct iucv_tty_buffer *bufp;
153
154 bufp = mempool_alloc(hvc_iucv_mempool, flags);
155 if (!bufp)
156 return NULL;
157 memset(bufp, 0, sizeof(*bufp));
158
159 if (size > 0) {
160 bufp->msg.length = MSG_SIZE(size);
161 bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
162 if (!bufp->mbuf) {
163 mempool_free(bufp, hvc_iucv_mempool);
164 return NULL;
165 }
166 bufp->mbuf->version = MSG_VERSION;
167 bufp->mbuf->type = MSG_TYPE_DATA;
168 bufp->mbuf->datalen = (u16) size;
169 }
170 return bufp;
171}
172
173
174
175
176
177static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
178{
179 kfree(bufp->mbuf);
180 mempool_free(bufp, hvc_iucv_mempool);
181}
182
183
184
185
186
187static void destroy_tty_buffer_list(struct list_head *list)
188{
189 struct iucv_tty_buffer *ent, *next;
190
191 list_for_each_entry_safe(ent, next, list, list) {
192 list_del(&ent->list);
193 destroy_tty_buffer(ent);
194 }
195}
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217static int hvc_iucv_write(struct hvc_iucv_private *priv,
218 char *buf, int count, int *has_more_data)
219{
220 struct iucv_tty_buffer *rb;
221 int written;
222 int rc;
223
224
225 if (priv->iucv_state == IUCV_DISCONN)
226 return 0;
227
228
229
230 if (priv->iucv_state == IUCV_SEVERED)
231 return -EPIPE;
232
233
234 if (list_empty(&priv->tty_inqueue))
235 return 0;
236
237
238 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
239
240 written = 0;
241 if (!rb->mbuf) {
242
243
244 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
245 if (!rb->mbuf)
246 return -ENOMEM;
247
248 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
249 rb->mbuf, rb->msg.length, NULL);
250 switch (rc) {
251 case 0:
252 break;
253 case 2:
254 case 9:
255 break;
256 default:
257 written = -EIO;
258 }
259
260
261 if (rc || (rb->mbuf->version != MSG_VERSION) ||
262 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
263 goto out_remove_buffer;
264 }
265
266 switch (rb->mbuf->type) {
267 case MSG_TYPE_DATA:
268 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
269 memcpy(buf, rb->mbuf->data + rb->offset, written);
270 if (written < (rb->mbuf->datalen - rb->offset)) {
271 rb->offset += written;
272 *has_more_data = 1;
273 goto out_written;
274 }
275 break;
276
277 case MSG_TYPE_WINSIZE:
278 if (rb->mbuf->datalen != sizeof(struct winsize))
279 break;
280
281
282 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
283 break;
284
285 case MSG_TYPE_ERROR:
286 case MSG_TYPE_TERMENV:
287 case MSG_TYPE_TERMIOS:
288 break;
289 }
290
291out_remove_buffer:
292 list_del(&rb->list);
293 destroy_tty_buffer(rb);
294 *has_more_data = !list_empty(&priv->tty_inqueue);
295
296out_written:
297 return written;
298}
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
315{
316 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
317 int written;
318 int has_more_data;
319
320 if (count <= 0)
321 return 0;
322
323 if (!priv)
324 return -ENODEV;
325
326 spin_lock(&priv->lock);
327 has_more_data = 0;
328 written = hvc_iucv_write(priv, buf, count, &has_more_data);
329 spin_unlock(&priv->lock);
330
331
332 if (has_more_data)
333 hvc_kick();
334
335 return written;
336}
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
355 int count)
356{
357 size_t len;
358
359 if (priv->iucv_state == IUCV_DISCONN)
360 return count;
361
362 if (priv->iucv_state == IUCV_SEVERED)
363 return -EPIPE;
364
365 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
366 if (!len)
367 return 0;
368
369 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
370 priv->sndbuf_len += len;
371
372 if (priv->iucv_state == IUCV_CONNECTED)
373 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
374
375 return len;
376}
377
378
379
380
381
382
383
384
385
386
387static int hvc_iucv_send(struct hvc_iucv_private *priv)
388{
389 struct iucv_tty_buffer *sb;
390 int rc, len;
391
392 if (priv->iucv_state == IUCV_SEVERED)
393 return -EPIPE;
394
395 if (priv->iucv_state == IUCV_DISCONN)
396 return -EIO;
397
398 if (!priv->sndbuf_len)
399 return 0;
400
401
402
403 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
404 if (!sb)
405 return -ENOMEM;
406
407 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
408 sb->mbuf->datalen = (u16) priv->sndbuf_len;
409 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
410
411 list_add_tail(&sb->list, &priv->tty_outqueue);
412
413 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
414 (void *) sb->mbuf, sb->msg.length);
415 if (rc) {
416
417
418 list_del(&sb->list);
419 destroy_tty_buffer(sb);
420 }
421 len = priv->sndbuf_len;
422 priv->sndbuf_len = 0;
423
424 return len;
425}
426
427
428
429
430
431
432
433
434static void hvc_iucv_sndbuf_work(struct work_struct *work)
435{
436 struct hvc_iucv_private *priv;
437
438 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
439 if (!priv)
440 return;
441
442 spin_lock_bh(&priv->lock);
443 hvc_iucv_send(priv);
444 spin_unlock_bh(&priv->lock);
445}
446
447
448
449
450
451
452
453
454
455
456
457
458
459static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
460{
461 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
462 int queued;
463
464 if (count <= 0)
465 return 0;
466
467 if (!priv)
468 return -ENODEV;
469
470 spin_lock(&priv->lock);
471 queued = hvc_iucv_queue(priv, buf, count);
472 spin_unlock(&priv->lock);
473
474 return queued;
475}
476
477
478
479
480
481
482
483
484
485
486
487
488static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
489{
490 struct hvc_iucv_private *priv;
491
492 priv = hvc_iucv_get_private(id);
493 if (!priv)
494 return 0;
495
496 spin_lock_bh(&priv->lock);
497 priv->tty_state = TTY_OPENED;
498 spin_unlock_bh(&priv->lock);
499
500 return 0;
501}
502
503
504
505
506
507static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
508{
509 destroy_tty_buffer_list(&priv->tty_outqueue);
510 destroy_tty_buffer_list(&priv->tty_inqueue);
511
512 priv->tty_state = TTY_CLOSED;
513 priv->iucv_state = IUCV_DISCONN;
514
515 priv->sndbuf_len = 0;
516}
517
518
519
520
521
522static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
523{
524 int rc;
525
526 spin_lock_bh(&priv->lock);
527 rc = list_empty(&priv->tty_outqueue);
528 spin_unlock_bh(&priv->lock);
529
530 return rc;
531}
532
533
534
535
536
537
538
539
540static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
541{
542 int sync_wait;
543
544 cancel_delayed_work_sync(&priv->sndbuf_work);
545
546 spin_lock_bh(&priv->lock);
547 hvc_iucv_send(priv);
548 sync_wait = !list_empty(&priv->tty_outqueue);
549 spin_unlock_bh(&priv->lock);
550
551 if (sync_wait)
552 wait_event_timeout(priv->sndbuf_waitq,
553 tty_outqueue_empty(priv), HZ/10);
554}
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
589{
590 struct iucv_path *path;
591
592 path = NULL;
593 spin_lock(&priv->lock);
594 if (priv->iucv_state == IUCV_CONNECTED) {
595 path = priv->path;
596 priv->path = NULL;
597 priv->iucv_state = IUCV_SEVERED;
598 if (priv->tty_state == TTY_CLOSED)
599 hvc_iucv_cleanup(priv);
600 else
601
602 if (priv->is_console) {
603 hvc_iucv_cleanup(priv);
604 priv->tty_state = TTY_OPENED;
605 } else
606 hvc_kick();
607 }
608 spin_unlock(&priv->lock);
609
610
611 if (path) {
612 iucv_path_sever(path, NULL);
613 iucv_path_free(path);
614 }
615}
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
635{
636 struct hvc_iucv_private *priv;
637
638 priv = hvc_iucv_get_private(id);
639 if (!priv)
640 return;
641
642 flush_sndbuf_sync(priv);
643
644 spin_lock_bh(&priv->lock);
645
646
647
648
649
650
651
652 priv->tty_state = TTY_CLOSED;
653
654 if (priv->iucv_state == IUCV_SEVERED)
655 hvc_iucv_cleanup(priv);
656 spin_unlock_bh(&priv->lock);
657}
658
659
660
661
662
663
664
665
666
667
668static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
669{
670 struct hvc_iucv_private *priv;
671 struct iucv_path *path;
672
673
674
675
676 if (raise)
677 return;
678
679 priv = hvc_iucv_get_private(hp->vtermno);
680 if (!priv)
681 return;
682
683
684
685
686 flush_sndbuf_sync(priv);
687
688 spin_lock_bh(&priv->lock);
689 path = priv->path;
690 priv->path = NULL;
691 priv->iucv_state = IUCV_DISCONN;
692 spin_unlock_bh(&priv->lock);
693
694
695
696 if (path) {
697 iucv_path_sever(path, NULL);
698 iucv_path_free(path);
699 }
700}
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
716{
717 struct hvc_iucv_private *priv;
718
719 priv = hvc_iucv_get_private(id);
720 if (!priv)
721 return;
722
723 flush_sndbuf_sync(priv);
724
725 spin_lock_bh(&priv->lock);
726 destroy_tty_buffer_list(&priv->tty_outqueue);
727 destroy_tty_buffer_list(&priv->tty_inqueue);
728 priv->tty_state = TTY_CLOSED;
729 priv->sndbuf_len = 0;
730 spin_unlock_bh(&priv->lock);
731}
732
733
734
735
736
737
738
739
740static int hvc_iucv_filter_connreq(u8 ipvmid[8])
741{
742 size_t i;
743
744
745 if (!hvc_iucv_filter_size)
746 return 0;
747
748 for (i = 0; i < hvc_iucv_filter_size; i++)
749 if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
750 return 0;
751 return 1;
752}
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773static int hvc_iucv_path_pending(struct iucv_path *path,
774 u8 ipvmid[8], u8 ipuser[16])
775{
776 struct hvc_iucv_private *priv, *tmp;
777 u8 wildcard[9] = "lnxhvc ";
778 int i, rc, find_unused;
779 u8 nuser_data[16];
780 u8 vm_user_id[9];
781
782 ASCEBC(wildcard, sizeof(wildcard));
783 find_unused = !memcmp(wildcard, ipuser, 8);
784
785
786
787
788
789
790 priv = NULL;
791 for (i = 0; i < hvc_iucv_devices; i++) {
792 tmp = hvc_iucv_table[i];
793 if (!tmp)
794 continue;
795
796 if (find_unused) {
797 spin_lock(&tmp->lock);
798 if (tmp->iucv_state == IUCV_DISCONN)
799 priv = tmp;
800 spin_unlock(&tmp->lock);
801
802 } else if (!memcmp(tmp->srv_name, ipuser, 8))
803 priv = tmp;
804 if (priv)
805 break;
806 }
807 if (!priv)
808 return -ENODEV;
809
810
811 read_lock(&hvc_iucv_filter_lock);
812 rc = hvc_iucv_filter_connreq(ipvmid);
813 read_unlock(&hvc_iucv_filter_lock);
814 if (rc) {
815 iucv_path_sever(path, ipuser);
816 iucv_path_free(path);
817 memcpy(vm_user_id, ipvmid, 8);
818 vm_user_id[8] = 0;
819 pr_info("A connection request from z/VM user ID %s "
820 "was refused\n", vm_user_id);
821 return 0;
822 }
823
824 spin_lock(&priv->lock);
825
826
827
828
829 if (priv->iucv_state != IUCV_DISCONN) {
830 iucv_path_sever(path, ipuser);
831 iucv_path_free(path);
832 goto out_path_handled;
833 }
834
835
836 memcpy(nuser_data, ipuser + 8, 8);
837 memcpy(nuser_data + 8, ipuser, 8);
838 path->msglim = 0xffff;
839 path->flags &= ~IUCV_IPRMDATA;
840 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
841 if (rc) {
842 iucv_path_sever(path, ipuser);
843 iucv_path_free(path);
844 goto out_path_handled;
845 }
846 priv->path = path;
847 priv->iucv_state = IUCV_CONNECTED;
848
849
850 memcpy(priv->info_path, ipvmid, 8);
851 memcpy(priv->info_path + 8, ipuser + 8, 8);
852
853
854 schedule_delayed_work(&priv->sndbuf_work, 5);
855
856out_path_handled:
857 spin_unlock(&priv->lock);
858 return 0;
859}
860
861
862
863
864
865
866
867
868
869
870
871
872static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
873{
874 struct hvc_iucv_private *priv = path->private;
875
876 hvc_iucv_hangup(priv);
877}
878
879
880
881
882
883
884
885
886
887
888
889
890static void hvc_iucv_msg_pending(struct iucv_path *path,
891 struct iucv_message *msg)
892{
893 struct hvc_iucv_private *priv = path->private;
894 struct iucv_tty_buffer *rb;
895
896
897 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
898 iucv_message_reject(path, msg);
899 return;
900 }
901
902 spin_lock(&priv->lock);
903
904
905 if (priv->tty_state == TTY_CLOSED) {
906 iucv_message_reject(path, msg);
907 goto unlock_return;
908 }
909
910
911 rb = alloc_tty_buffer(0, GFP_ATOMIC);
912 if (!rb) {
913 iucv_message_reject(path, msg);
914 goto unlock_return;
915 }
916 rb->msg = *msg;
917
918 list_add_tail(&rb->list, &priv->tty_inqueue);
919
920 hvc_kick();
921
922unlock_return:
923 spin_unlock(&priv->lock);
924}
925
926
927
928
929
930
931
932
933
934
935
936
937
938static void hvc_iucv_msg_complete(struct iucv_path *path,
939 struct iucv_message *msg)
940{
941 struct hvc_iucv_private *priv = path->private;
942 struct iucv_tty_buffer *ent, *next;
943 LIST_HEAD(list_remove);
944
945 spin_lock(&priv->lock);
946 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
947 if (ent->msg.id == msg->id) {
948 list_move(&ent->list, &list_remove);
949 break;
950 }
951 wake_up(&priv->sndbuf_waitq);
952 spin_unlock(&priv->lock);
953 destroy_tty_buffer_list(&list_remove);
954}
955
956
957
958
959
960
961
962
963static int hvc_iucv_pm_freeze(struct device *dev)
964{
965 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
966
967 local_bh_disable();
968 hvc_iucv_hangup(priv);
969 local_bh_enable();
970
971 return 0;
972}
973
974
975
976
977
978
979
980
981static int hvc_iucv_pm_restore_thaw(struct device *dev)
982{
983 hvc_kick();
984 return 0;
985}
986
987static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
988 struct device_attribute *attr,
989 char *buf)
990{
991 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
992 size_t len;
993
994 len = sizeof(priv->srv_name);
995 memcpy(buf, priv->srv_name, len);
996 EBCASC(buf, len);
997 buf[len++] = '\n';
998 return len;
999}
1000
1001static ssize_t hvc_iucv_dev_state_show(struct device *dev,
1002 struct device_attribute *attr,
1003 char *buf)
1004{
1005 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1006 return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
1007}
1008
1009static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
1010 struct device_attribute *attr,
1011 char *buf)
1012{
1013 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1014 char vmid[9], ipuser[9];
1015
1016 memset(vmid, 0, sizeof(vmid));
1017 memset(ipuser, 0, sizeof(ipuser));
1018
1019 spin_lock_bh(&priv->lock);
1020 if (priv->iucv_state == IUCV_CONNECTED) {
1021 memcpy(vmid, priv->info_path, 8);
1022 memcpy(ipuser, priv->info_path + 8, 8);
1023 }
1024 spin_unlock_bh(&priv->lock);
1025 EBCASC(ipuser, 8);
1026
1027 return sprintf(buf, "%s:%s\n", vmid, ipuser);
1028}
1029
1030
1031
1032static const struct hv_ops hvc_iucv_ops = {
1033 .get_chars = hvc_iucv_get_chars,
1034 .put_chars = hvc_iucv_put_chars,
1035 .notifier_add = hvc_iucv_notifier_add,
1036 .notifier_del = hvc_iucv_notifier_del,
1037 .notifier_hangup = hvc_iucv_notifier_hangup,
1038 .dtr_rts = hvc_iucv_dtr_rts,
1039};
1040
1041
1042static const struct dev_pm_ops hvc_iucv_pm_ops = {
1043 .freeze = hvc_iucv_pm_freeze,
1044 .thaw = hvc_iucv_pm_restore_thaw,
1045 .restore = hvc_iucv_pm_restore_thaw,
1046};
1047
1048
1049static struct device_driver hvc_iucv_driver = {
1050 .name = KMSG_COMPONENT,
1051 .bus = &iucv_bus,
1052 .pm = &hvc_iucv_pm_ops,
1053};
1054
1055
1056static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
1057static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
1058static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
1059static struct attribute *hvc_iucv_dev_attrs[] = {
1060 &dev_attr_termid.attr,
1061 &dev_attr_state.attr,
1062 &dev_attr_peer.attr,
1063 NULL,
1064};
1065static struct attribute_group hvc_iucv_dev_attr_group = {
1066 .attrs = hvc_iucv_dev_attrs,
1067};
1068static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
1069 &hvc_iucv_dev_attr_group,
1070 NULL,
1071};
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static int __init hvc_iucv_alloc(int id, unsigned int is_console)
1084{
1085 struct hvc_iucv_private *priv;
1086 char name[9];
1087 int rc;
1088
1089 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
1090 if (!priv)
1091 return -ENOMEM;
1092
1093 spin_lock_init(&priv->lock);
1094 INIT_LIST_HEAD(&priv->tty_outqueue);
1095 INIT_LIST_HEAD(&priv->tty_inqueue);
1096 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
1097 init_waitqueue_head(&priv->sndbuf_waitq);
1098
1099 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
1100 if (!priv->sndbuf) {
1101 kfree(priv);
1102 return -ENOMEM;
1103 }
1104
1105
1106 priv->is_console = is_console;
1107
1108
1109 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id,
1110 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
1111 if (IS_ERR(priv->hvc)) {
1112 rc = PTR_ERR(priv->hvc);
1113 goto out_error_hvc;
1114 }
1115
1116
1117 priv->hvc->irq_requested = 1;
1118
1119
1120 snprintf(name, 9, "lnxhvc%-2d", id);
1121 memcpy(priv->srv_name, name, 8);
1122 ASCEBC(priv->srv_name, 8);
1123
1124
1125 priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1126 if (!priv->dev) {
1127 rc = -ENOMEM;
1128 goto out_error_dev;
1129 }
1130 dev_set_name(priv->dev, "hvc_iucv%d", id);
1131 dev_set_drvdata(priv->dev, priv);
1132 priv->dev->bus = &iucv_bus;
1133 priv->dev->parent = iucv_root;
1134 priv->dev->driver = &hvc_iucv_driver;
1135 priv->dev->groups = hvc_iucv_dev_attr_groups;
1136 priv->dev->release = (void (*)(struct device *)) kfree;
1137 rc = device_register(priv->dev);
1138 if (rc) {
1139 put_device(priv->dev);
1140 goto out_error_dev;
1141 }
1142
1143 hvc_iucv_table[id] = priv;
1144 return 0;
1145
1146out_error_dev:
1147 hvc_remove(priv->hvc);
1148out_error_hvc:
1149 free_page((unsigned long) priv->sndbuf);
1150 kfree(priv);
1151
1152 return rc;
1153}
1154
1155
1156
1157
1158static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1159{
1160 hvc_remove(priv->hvc);
1161 device_unregister(priv->dev);
1162 free_page((unsigned long) priv->sndbuf);
1163 kfree(priv);
1164}
1165
1166
1167
1168
1169
1170static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1171{
1172 const char *nextdelim, *residual;
1173 size_t len;
1174
1175 nextdelim = strchr(filter, ',');
1176 if (nextdelim) {
1177 len = nextdelim - filter;
1178 residual = nextdelim + 1;
1179 } else {
1180 len = strlen(filter);
1181 residual = filter + len;
1182 }
1183
1184 if (len == 0)
1185 return ERR_PTR(-EINVAL);
1186
1187
1188 if (filter[len - 1] == '\n')
1189 len--;
1190
1191 if (len > 8)
1192 return ERR_PTR(-EINVAL);
1193
1194
1195 memset(dest, ' ', 8);
1196 while (len--)
1197 dest[len] = toupper(filter[len]);
1198 return residual;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211static int hvc_iucv_setup_filter(const char *val)
1212{
1213 const char *residual;
1214 int err;
1215 size_t size, count;
1216 void *array, *old_filter;
1217
1218 count = strlen(val);
1219 if (count == 0 || (count == 1 && val[0] == '\n')) {
1220 size = 0;
1221 array = NULL;
1222 goto out_replace_filter;
1223 }
1224
1225
1226 size = 1;
1227 residual = val;
1228 while ((residual = strchr(residual, ',')) != NULL) {
1229 residual++;
1230 size++;
1231 }
1232
1233
1234 if (size > MAX_VMID_FILTER)
1235 return -ENOSPC;
1236
1237 array = kzalloc(size * 8, GFP_KERNEL);
1238 if (!array)
1239 return -ENOMEM;
1240
1241 count = size;
1242 residual = val;
1243 while (*residual && count) {
1244 residual = hvc_iucv_parse_filter(residual,
1245 array + ((size - count) * 8));
1246 if (IS_ERR(residual)) {
1247 err = PTR_ERR(residual);
1248 kfree(array);
1249 goto out_err;
1250 }
1251 count--;
1252 }
1253
1254out_replace_filter:
1255 write_lock_bh(&hvc_iucv_filter_lock);
1256 old_filter = hvc_iucv_filter;
1257 hvc_iucv_filter_size = size;
1258 hvc_iucv_filter = array;
1259 write_unlock_bh(&hvc_iucv_filter_lock);
1260 kfree(old_filter);
1261
1262 err = 0;
1263out_err:
1264 return err;
1265}
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1278{
1279 int rc;
1280
1281 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1282 return -ENODEV;
1283
1284 if (!val)
1285 return -EINVAL;
1286
1287 rc = 0;
1288 if (slab_is_available())
1289 rc = hvc_iucv_setup_filter(val);
1290 else
1291 hvc_iucv_filter_string = val;
1292 return rc;
1293}
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1305{
1306 int rc;
1307 size_t index, len;
1308 void *start, *end;
1309
1310 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1311 return -ENODEV;
1312
1313 rc = 0;
1314 read_lock_bh(&hvc_iucv_filter_lock);
1315 for (index = 0; index < hvc_iucv_filter_size; index++) {
1316 start = hvc_iucv_filter + (8 * index);
1317 end = memchr(start, ' ', 8);
1318 len = (end) ? end - start : 8;
1319 memcpy(buffer + rc, start, len);
1320 rc += len;
1321 buffer[rc++] = ',';
1322 }
1323 read_unlock_bh(&hvc_iucv_filter_lock);
1324 if (rc)
1325 buffer[--rc] = '\0';
1326 return rc;
1327}
1328
1329#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1330
1331static struct kernel_param_ops param_ops_vmidfilter = {
1332 .set = param_set_vmidfilter,
1333 .get = param_get_vmidfilter,
1334};
1335
1336
1337
1338
1339static int __init hvc_iucv_init(void)
1340{
1341 int rc;
1342 unsigned int i;
1343
1344 if (!hvc_iucv_devices)
1345 return -ENODEV;
1346
1347 if (!MACHINE_IS_VM) {
1348 pr_notice("The z/VM IUCV HVC device driver cannot "
1349 "be used without z/VM\n");
1350 rc = -ENODEV;
1351 goto out_error;
1352 }
1353
1354 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1355 pr_err("%lu is not a valid value for the hvc_iucv= "
1356 "kernel parameter\n", hvc_iucv_devices);
1357 rc = -EINVAL;
1358 goto out_error;
1359 }
1360
1361
1362 rc = driver_register(&hvc_iucv_driver);
1363 if (rc)
1364 goto out_error;
1365
1366
1367 if (hvc_iucv_filter_string) {
1368 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1369 switch (rc) {
1370 case 0:
1371 break;
1372 case -ENOMEM:
1373 pr_err("Allocating memory failed with "
1374 "reason code=%d\n", 3);
1375 goto out_error;
1376 case -EINVAL:
1377 pr_err("hvc_iucv_allow= does not specify a valid "
1378 "z/VM user ID list\n");
1379 goto out_error;
1380 case -ENOSPC:
1381 pr_err("hvc_iucv_allow= specifies too many "
1382 "z/VM user IDs\n");
1383 goto out_error;
1384 default:
1385 goto out_error;
1386 }
1387 }
1388
1389 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1390 sizeof(struct iucv_tty_buffer),
1391 0, 0, NULL);
1392 if (!hvc_iucv_buffer_cache) {
1393 pr_err("Allocating memory failed with reason code=%d\n", 1);
1394 rc = -ENOMEM;
1395 goto out_error;
1396 }
1397
1398 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1399 hvc_iucv_buffer_cache);
1400 if (!hvc_iucv_mempool) {
1401 pr_err("Allocating memory failed with reason code=%d\n", 2);
1402 kmem_cache_destroy(hvc_iucv_buffer_cache);
1403 rc = -ENOMEM;
1404 goto out_error;
1405 }
1406
1407
1408
1409 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1410 if (rc) {
1411 pr_err("Registering HVC terminal device as "
1412 "Linux console failed\n");
1413 goto out_error_memory;
1414 }
1415
1416
1417 for (i = 0; i < hvc_iucv_devices; i++) {
1418 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1419 if (rc) {
1420 pr_err("Creating a new HVC terminal device "
1421 "failed with error code=%d\n", rc);
1422 goto out_error_hvc;
1423 }
1424 }
1425
1426
1427 rc = iucv_register(&hvc_iucv_handler, 0);
1428 if (rc) {
1429 pr_err("Registering IUCV handlers failed with error code=%d\n",
1430 rc);
1431 goto out_error_hvc;
1432 }
1433
1434 return 0;
1435
1436out_error_hvc:
1437 for (i = 0; i < hvc_iucv_devices; i++)
1438 if (hvc_iucv_table[i])
1439 hvc_iucv_destroy(hvc_iucv_table[i]);
1440out_error_memory:
1441 mempool_destroy(hvc_iucv_mempool);
1442 kmem_cache_destroy(hvc_iucv_buffer_cache);
1443out_error:
1444 kfree(hvc_iucv_filter);
1445 hvc_iucv_devices = 0;
1446 return rc;
1447}
1448
1449
1450
1451
1452
1453static int __init hvc_iucv_config(char *val)
1454{
1455 return kstrtoul(val, 10, &hvc_iucv_devices);
1456}
1457
1458
1459device_initcall(hvc_iucv_init);
1460__setup("hvc_iucv=", hvc_iucv_config);
1461core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
1462