1
2
3
4
5
6
7
8
9
10
11#define KMSG_COMPONENT "hvc_iucv"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <asm/ebcdic.h>
17#include <linux/ctype.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <linux/init.h>
21#include <linux/mempool.h>
22#include <linux/moduleparam.h>
23#include <linux/tty.h>
24#include <linux/wait.h>
25#include <net/iucv/iucv.h>
26
27#include "hvc_console.h"
28
29
30
31#define HVC_IUCV_MAGIC 0xc9e4c3e5
32#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
33#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
34
35
36#define MSG_VERSION 0x02
37#define MSG_TYPE_ERROR 0x01
38#define MSG_TYPE_TERMENV 0x02
39#define MSG_TYPE_TERMIOS 0x04
40#define MSG_TYPE_WINSIZE 0x08
41#define MSG_TYPE_DATA 0x10
42
43struct iucv_tty_msg {
44 u8 version;
45 u8 type;
46#define MSG_MAX_DATALEN ((u16)(~0))
47 u16 datalen;
48 u8 data[];
49} __attribute__((packed));
50#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
51
52enum iucv_state_t {
53 IUCV_DISCONN = 0,
54 IUCV_CONNECTED = 1,
55 IUCV_SEVERED = 2,
56};
57
58enum tty_state_t {
59 TTY_CLOSED = 0,
60 TTY_OPENED = 1,
61};
62
63struct hvc_iucv_private {
64 struct hvc_struct *hvc;
65 u8 srv_name[8];
66 unsigned char is_console;
67 enum iucv_state_t iucv_state;
68 enum tty_state_t tty_state;
69 struct iucv_path *path;
70 spinlock_t lock;
71#define SNDBUF_SIZE (PAGE_SIZE)
72 void *sndbuf;
73 size_t sndbuf_len;
74#define QUEUE_SNDBUF_DELAY (HZ / 25)
75 struct delayed_work sndbuf_work;
76 wait_queue_head_t sndbuf_waitq;
77 struct list_head tty_outqueue;
78 struct list_head tty_inqueue;
79 struct device *dev;
80 u8 info_path[16];
81};
82
83struct iucv_tty_buffer {
84 struct list_head list;
85 struct iucv_message msg;
86 size_t offset;
87 struct iucv_tty_msg *mbuf;
88};
89
90
91static int hvc_iucv_path_pending(struct iucv_path *, u8 *, u8 *);
92static void hvc_iucv_path_severed(struct iucv_path *, u8 *);
93static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
94static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
95
96
97
98static unsigned long hvc_iucv_devices = 1;
99
100
101static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
102#define IUCV_HVC_CON_IDX (0)
103
104#define MAX_VMID_FILTER (500)
105#define FILTER_WILDCARD_CHAR '*'
106static size_t hvc_iucv_filter_size;
107static void *hvc_iucv_filter;
108static const char *hvc_iucv_filter_string;
109static DEFINE_RWLOCK(hvc_iucv_filter_lock);
110
111
112static struct kmem_cache *hvc_iucv_buffer_cache;
113static mempool_t *hvc_iucv_mempool;
114
115
116static struct iucv_handler hvc_iucv_handler = {
117 .path_pending = hvc_iucv_path_pending,
118 .path_severed = hvc_iucv_path_severed,
119 .message_complete = hvc_iucv_msg_complete,
120 .message_pending = hvc_iucv_msg_pending,
121};
122
123
124
125
126
127
128
129
130
131static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
132{
133 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
134 return NULL;
135 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
136}
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
152{
153 struct iucv_tty_buffer *bufp;
154
155 bufp = mempool_alloc(hvc_iucv_mempool, flags);
156 if (!bufp)
157 return NULL;
158 memset(bufp, 0, sizeof(*bufp));
159
160 if (size > 0) {
161 bufp->msg.length = MSG_SIZE(size);
162 bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
163 if (!bufp->mbuf) {
164 mempool_free(bufp, hvc_iucv_mempool);
165 return NULL;
166 }
167 bufp->mbuf->version = MSG_VERSION;
168 bufp->mbuf->type = MSG_TYPE_DATA;
169 bufp->mbuf->datalen = (u16) size;
170 }
171 return bufp;
172}
173
174
175
176
177
178static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
179{
180 kfree(bufp->mbuf);
181 mempool_free(bufp, hvc_iucv_mempool);
182}
183
184
185
186
187
188static void destroy_tty_buffer_list(struct list_head *list)
189{
190 struct iucv_tty_buffer *ent, *next;
191
192 list_for_each_entry_safe(ent, next, list, list) {
193 list_del(&ent->list);
194 destroy_tty_buffer(ent);
195 }
196}
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218static int hvc_iucv_write(struct hvc_iucv_private *priv,
219 char *buf, int count, int *has_more_data)
220{
221 struct iucv_tty_buffer *rb;
222 int written;
223 int rc;
224
225
226 if (priv->iucv_state == IUCV_DISCONN)
227 return 0;
228
229
230
231 if (priv->iucv_state == IUCV_SEVERED)
232 return -EPIPE;
233
234
235 if (list_empty(&priv->tty_inqueue))
236 return 0;
237
238
239 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
240
241 written = 0;
242 if (!rb->mbuf) {
243
244
245 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
246 if (!rb->mbuf)
247 return -ENOMEM;
248
249 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
250 rb->mbuf, rb->msg.length, NULL);
251 switch (rc) {
252 case 0:
253 break;
254 case 2:
255 case 9:
256 break;
257 default:
258 written = -EIO;
259 }
260
261
262 if (rc || (rb->mbuf->version != MSG_VERSION) ||
263 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
264 goto out_remove_buffer;
265 }
266
267 switch (rb->mbuf->type) {
268 case MSG_TYPE_DATA:
269 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
270 memcpy(buf, rb->mbuf->data + rb->offset, written);
271 if (written < (rb->mbuf->datalen - rb->offset)) {
272 rb->offset += written;
273 *has_more_data = 1;
274 goto out_written;
275 }
276 break;
277
278 case MSG_TYPE_WINSIZE:
279 if (rb->mbuf->datalen != sizeof(struct winsize))
280 break;
281
282
283 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
284 break;
285
286 case MSG_TYPE_ERROR:
287 case MSG_TYPE_TERMENV:
288 case MSG_TYPE_TERMIOS:
289 break;
290 }
291
292out_remove_buffer:
293 list_del(&rb->list);
294 destroy_tty_buffer(rb);
295 *has_more_data = !list_empty(&priv->tty_inqueue);
296
297out_written:
298 return written;
299}
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
316{
317 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
318 int written;
319 int has_more_data;
320
321 if (count <= 0)
322 return 0;
323
324 if (!priv)
325 return -ENODEV;
326
327 spin_lock(&priv->lock);
328 has_more_data = 0;
329 written = hvc_iucv_write(priv, buf, count, &has_more_data);
330 spin_unlock(&priv->lock);
331
332
333 if (has_more_data)
334 hvc_kick();
335
336 return written;
337}
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
356 int count)
357{
358 size_t len;
359
360 if (priv->iucv_state == IUCV_DISCONN)
361 return count;
362
363 if (priv->iucv_state == IUCV_SEVERED)
364 return -EPIPE;
365
366 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
367 if (!len)
368 return 0;
369
370 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
371 priv->sndbuf_len += len;
372
373 if (priv->iucv_state == IUCV_CONNECTED)
374 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
375
376 return len;
377}
378
379
380
381
382
383
384
385
386
387
388static int hvc_iucv_send(struct hvc_iucv_private *priv)
389{
390 struct iucv_tty_buffer *sb;
391 int rc, len;
392
393 if (priv->iucv_state == IUCV_SEVERED)
394 return -EPIPE;
395
396 if (priv->iucv_state == IUCV_DISCONN)
397 return -EIO;
398
399 if (!priv->sndbuf_len)
400 return 0;
401
402
403
404 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
405 if (!sb)
406 return -ENOMEM;
407
408 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
409 sb->mbuf->datalen = (u16) priv->sndbuf_len;
410 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
411
412 list_add_tail(&sb->list, &priv->tty_outqueue);
413
414 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
415 (void *) sb->mbuf, sb->msg.length);
416 if (rc) {
417
418
419 list_del(&sb->list);
420 destroy_tty_buffer(sb);
421 }
422 len = priv->sndbuf_len;
423 priv->sndbuf_len = 0;
424
425 return len;
426}
427
428
429
430
431
432
433
434
435static void hvc_iucv_sndbuf_work(struct work_struct *work)
436{
437 struct hvc_iucv_private *priv;
438
439 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
440 if (!priv)
441 return;
442
443 spin_lock_bh(&priv->lock);
444 hvc_iucv_send(priv);
445 spin_unlock_bh(&priv->lock);
446}
447
448
449
450
451
452
453
454
455
456
457
458
459
460static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
461{
462 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
463 int queued;
464
465 if (count <= 0)
466 return 0;
467
468 if (!priv)
469 return -ENODEV;
470
471 spin_lock(&priv->lock);
472 queued = hvc_iucv_queue(priv, buf, count);
473 spin_unlock(&priv->lock);
474
475 return queued;
476}
477
478
479
480
481
482
483
484
485
486
487
488
489static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
490{
491 struct hvc_iucv_private *priv;
492
493 priv = hvc_iucv_get_private(id);
494 if (!priv)
495 return 0;
496
497 spin_lock_bh(&priv->lock);
498 priv->tty_state = TTY_OPENED;
499 spin_unlock_bh(&priv->lock);
500
501 return 0;
502}
503
504
505
506
507
508static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
509{
510 destroy_tty_buffer_list(&priv->tty_outqueue);
511 destroy_tty_buffer_list(&priv->tty_inqueue);
512
513 priv->tty_state = TTY_CLOSED;
514 priv->iucv_state = IUCV_DISCONN;
515
516 priv->sndbuf_len = 0;
517}
518
519
520
521
522
523static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
524{
525 int rc;
526
527 spin_lock_bh(&priv->lock);
528 rc = list_empty(&priv->tty_outqueue);
529 spin_unlock_bh(&priv->lock);
530
531 return rc;
532}
533
534
535
536
537
538
539
540
541static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
542{
543 int sync_wait;
544
545 cancel_delayed_work_sync(&priv->sndbuf_work);
546
547 spin_lock_bh(&priv->lock);
548 hvc_iucv_send(priv);
549 sync_wait = !list_empty(&priv->tty_outqueue);
550 spin_unlock_bh(&priv->lock);
551
552 if (sync_wait)
553 wait_event_timeout(priv->sndbuf_waitq,
554 tty_outqueue_empty(priv), HZ/10);
555}
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
590{
591 struct iucv_path *path;
592
593 path = NULL;
594 spin_lock(&priv->lock);
595 if (priv->iucv_state == IUCV_CONNECTED) {
596 path = priv->path;
597 priv->path = NULL;
598 priv->iucv_state = IUCV_SEVERED;
599 if (priv->tty_state == TTY_CLOSED)
600 hvc_iucv_cleanup(priv);
601 else
602
603 if (priv->is_console) {
604 hvc_iucv_cleanup(priv);
605 priv->tty_state = TTY_OPENED;
606 } else
607 hvc_kick();
608 }
609 spin_unlock(&priv->lock);
610
611
612 if (path) {
613 iucv_path_sever(path, NULL);
614 iucv_path_free(path);
615 }
616}
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
636{
637 struct hvc_iucv_private *priv;
638
639 priv = hvc_iucv_get_private(id);
640 if (!priv)
641 return;
642
643 flush_sndbuf_sync(priv);
644
645 spin_lock_bh(&priv->lock);
646
647
648
649
650
651
652
653 priv->tty_state = TTY_CLOSED;
654
655 if (priv->iucv_state == IUCV_SEVERED)
656 hvc_iucv_cleanup(priv);
657 spin_unlock_bh(&priv->lock);
658}
659
660
661
662
663
664
665
666
667
668
669static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
670{
671 struct hvc_iucv_private *priv;
672 struct iucv_path *path;
673
674
675
676
677 if (raise)
678 return;
679
680 priv = hvc_iucv_get_private(hp->vtermno);
681 if (!priv)
682 return;
683
684
685
686
687 flush_sndbuf_sync(priv);
688
689 spin_lock_bh(&priv->lock);
690 path = priv->path;
691 priv->path = NULL;
692 priv->iucv_state = IUCV_DISCONN;
693 spin_unlock_bh(&priv->lock);
694
695
696
697 if (path) {
698 iucv_path_sever(path, NULL);
699 iucv_path_free(path);
700 }
701}
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
717{
718 struct hvc_iucv_private *priv;
719
720 priv = hvc_iucv_get_private(id);
721 if (!priv)
722 return;
723
724 flush_sndbuf_sync(priv);
725
726 spin_lock_bh(&priv->lock);
727 destroy_tty_buffer_list(&priv->tty_outqueue);
728 destroy_tty_buffer_list(&priv->tty_inqueue);
729 priv->tty_state = TTY_CLOSED;
730 priv->sndbuf_len = 0;
731 spin_unlock_bh(&priv->lock);
732}
733
734
735
736
737
738
739
740
741static int hvc_iucv_filter_connreq(u8 ipvmid[8])
742{
743 const char *wildcard, *filter_entry;
744 size_t i, len;
745
746
747 if (!hvc_iucv_filter_size)
748 return 0;
749
750 for (i = 0; i < hvc_iucv_filter_size; i++) {
751 filter_entry = hvc_iucv_filter + (8 * i);
752
753
754
755
756
757
758 wildcard = strnchr(filter_entry, 8, FILTER_WILDCARD_CHAR);
759 len = (wildcard) ? wildcard - filter_entry : 8;
760 if (0 == memcmp(ipvmid, filter_entry, len))
761 return 0;
762 }
763 return 1;
764}
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785static int hvc_iucv_path_pending(struct iucv_path *path, u8 *ipvmid,
786 u8 *ipuser)
787{
788 struct hvc_iucv_private *priv, *tmp;
789 u8 wildcard[9] = "lnxhvc ";
790 int i, rc, find_unused;
791 u8 nuser_data[16];
792 u8 vm_user_id[9];
793
794 ASCEBC(wildcard, sizeof(wildcard));
795 find_unused = !memcmp(wildcard, ipuser, 8);
796
797
798
799
800
801
802 priv = NULL;
803 for (i = 0; i < hvc_iucv_devices; i++) {
804 tmp = hvc_iucv_table[i];
805 if (!tmp)
806 continue;
807
808 if (find_unused) {
809 spin_lock(&tmp->lock);
810 if (tmp->iucv_state == IUCV_DISCONN)
811 priv = tmp;
812 spin_unlock(&tmp->lock);
813
814 } else if (!memcmp(tmp->srv_name, ipuser, 8))
815 priv = tmp;
816 if (priv)
817 break;
818 }
819 if (!priv)
820 return -ENODEV;
821
822
823 read_lock(&hvc_iucv_filter_lock);
824 rc = hvc_iucv_filter_connreq(ipvmid);
825 read_unlock(&hvc_iucv_filter_lock);
826 if (rc) {
827 iucv_path_sever(path, ipuser);
828 iucv_path_free(path);
829 memcpy(vm_user_id, ipvmid, 8);
830 vm_user_id[8] = 0;
831 pr_info("A connection request from z/VM user ID %s "
832 "was refused\n", vm_user_id);
833 return 0;
834 }
835
836 spin_lock(&priv->lock);
837
838
839
840
841 if (priv->iucv_state != IUCV_DISCONN) {
842 iucv_path_sever(path, ipuser);
843 iucv_path_free(path);
844 goto out_path_handled;
845 }
846
847
848 memcpy(nuser_data, ipuser + 8, 8);
849 memcpy(nuser_data + 8, ipuser, 8);
850 path->msglim = 0xffff;
851 path->flags &= ~IUCV_IPRMDATA;
852 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
853 if (rc) {
854 iucv_path_sever(path, ipuser);
855 iucv_path_free(path);
856 goto out_path_handled;
857 }
858 priv->path = path;
859 priv->iucv_state = IUCV_CONNECTED;
860
861
862 memcpy(priv->info_path, ipvmid, 8);
863 memcpy(priv->info_path + 8, ipuser + 8, 8);
864
865
866 schedule_delayed_work(&priv->sndbuf_work, 5);
867
868out_path_handled:
869 spin_unlock(&priv->lock);
870 return 0;
871}
872
873
874
875
876
877
878
879
880
881
882
883
884static void hvc_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
885{
886 struct hvc_iucv_private *priv = path->private;
887
888 hvc_iucv_hangup(priv);
889}
890
891
892
893
894
895
896
897
898
899
900
901
902static void hvc_iucv_msg_pending(struct iucv_path *path,
903 struct iucv_message *msg)
904{
905 struct hvc_iucv_private *priv = path->private;
906 struct iucv_tty_buffer *rb;
907
908
909 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
910 iucv_message_reject(path, msg);
911 return;
912 }
913
914 spin_lock(&priv->lock);
915
916
917 if (priv->tty_state == TTY_CLOSED) {
918 iucv_message_reject(path, msg);
919 goto unlock_return;
920 }
921
922
923 rb = alloc_tty_buffer(0, GFP_ATOMIC);
924 if (!rb) {
925 iucv_message_reject(path, msg);
926 goto unlock_return;
927 }
928 rb->msg = *msg;
929
930 list_add_tail(&rb->list, &priv->tty_inqueue);
931
932 hvc_kick();
933
934unlock_return:
935 spin_unlock(&priv->lock);
936}
937
938
939
940
941
942
943
944
945
946
947
948
949
950static void hvc_iucv_msg_complete(struct iucv_path *path,
951 struct iucv_message *msg)
952{
953 struct hvc_iucv_private *priv = path->private;
954 struct iucv_tty_buffer *ent, *next;
955 LIST_HEAD(list_remove);
956
957 spin_lock(&priv->lock);
958 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
959 if (ent->msg.id == msg->id) {
960 list_move(&ent->list, &list_remove);
961 break;
962 }
963 wake_up(&priv->sndbuf_waitq);
964 spin_unlock(&priv->lock);
965 destroy_tty_buffer_list(&list_remove);
966}
967
968
969
970
971
972
973
974
975static int hvc_iucv_pm_freeze(struct device *dev)
976{
977 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
978
979 local_bh_disable();
980 hvc_iucv_hangup(priv);
981 local_bh_enable();
982
983 return 0;
984}
985
986
987
988
989
990
991
992
993static int hvc_iucv_pm_restore_thaw(struct device *dev)
994{
995 hvc_kick();
996 return 0;
997}
998
999static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
1000 struct device_attribute *attr,
1001 char *buf)
1002{
1003 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1004 size_t len;
1005
1006 len = sizeof(priv->srv_name);
1007 memcpy(buf, priv->srv_name, len);
1008 EBCASC(buf, len);
1009 buf[len++] = '\n';
1010 return len;
1011}
1012
1013static ssize_t hvc_iucv_dev_state_show(struct device *dev,
1014 struct device_attribute *attr,
1015 char *buf)
1016{
1017 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1018 return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
1019}
1020
1021static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
1022 struct device_attribute *attr,
1023 char *buf)
1024{
1025 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1026 char vmid[9], ipuser[9];
1027
1028 memset(vmid, 0, sizeof(vmid));
1029 memset(ipuser, 0, sizeof(ipuser));
1030
1031 spin_lock_bh(&priv->lock);
1032 if (priv->iucv_state == IUCV_CONNECTED) {
1033 memcpy(vmid, priv->info_path, 8);
1034 memcpy(ipuser, priv->info_path + 8, 8);
1035 }
1036 spin_unlock_bh(&priv->lock);
1037 EBCASC(ipuser, 8);
1038
1039 return sprintf(buf, "%s:%s\n", vmid, ipuser);
1040}
1041
1042
1043
1044static const struct hv_ops hvc_iucv_ops = {
1045 .get_chars = hvc_iucv_get_chars,
1046 .put_chars = hvc_iucv_put_chars,
1047 .notifier_add = hvc_iucv_notifier_add,
1048 .notifier_del = hvc_iucv_notifier_del,
1049 .notifier_hangup = hvc_iucv_notifier_hangup,
1050 .dtr_rts = hvc_iucv_dtr_rts,
1051};
1052
1053
1054static const struct dev_pm_ops hvc_iucv_pm_ops = {
1055 .freeze = hvc_iucv_pm_freeze,
1056 .thaw = hvc_iucv_pm_restore_thaw,
1057 .restore = hvc_iucv_pm_restore_thaw,
1058};
1059
1060
1061static struct device_driver hvc_iucv_driver = {
1062 .name = KMSG_COMPONENT,
1063 .bus = &iucv_bus,
1064 .pm = &hvc_iucv_pm_ops,
1065};
1066
1067
1068static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
1069static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
1070static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
1071static struct attribute *hvc_iucv_dev_attrs[] = {
1072 &dev_attr_termid.attr,
1073 &dev_attr_state.attr,
1074 &dev_attr_peer.attr,
1075 NULL,
1076};
1077static struct attribute_group hvc_iucv_dev_attr_group = {
1078 .attrs = hvc_iucv_dev_attrs,
1079};
1080static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
1081 &hvc_iucv_dev_attr_group,
1082 NULL,
1083};
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static int __init hvc_iucv_alloc(int id, unsigned int is_console)
1096{
1097 struct hvc_iucv_private *priv;
1098 char name[9];
1099 int rc;
1100
1101 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
1102 if (!priv)
1103 return -ENOMEM;
1104
1105 spin_lock_init(&priv->lock);
1106 INIT_LIST_HEAD(&priv->tty_outqueue);
1107 INIT_LIST_HEAD(&priv->tty_inqueue);
1108 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
1109 init_waitqueue_head(&priv->sndbuf_waitq);
1110
1111 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
1112 if (!priv->sndbuf) {
1113 kfree(priv);
1114 return -ENOMEM;
1115 }
1116
1117
1118 priv->is_console = is_console;
1119
1120
1121 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id,
1122 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
1123 if (IS_ERR(priv->hvc)) {
1124 rc = PTR_ERR(priv->hvc);
1125 goto out_error_hvc;
1126 }
1127
1128
1129 priv->hvc->irq_requested = 1;
1130
1131
1132 snprintf(name, 9, "lnxhvc%-2d", id);
1133 memcpy(priv->srv_name, name, 8);
1134 ASCEBC(priv->srv_name, 8);
1135
1136
1137 priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1138 if (!priv->dev) {
1139 rc = -ENOMEM;
1140 goto out_error_dev;
1141 }
1142 dev_set_name(priv->dev, "hvc_iucv%d", id);
1143 dev_set_drvdata(priv->dev, priv);
1144 priv->dev->bus = &iucv_bus;
1145 priv->dev->parent = iucv_root;
1146 priv->dev->driver = &hvc_iucv_driver;
1147 priv->dev->groups = hvc_iucv_dev_attr_groups;
1148 priv->dev->release = (void (*)(struct device *)) kfree;
1149 rc = device_register(priv->dev);
1150 if (rc) {
1151 put_device(priv->dev);
1152 goto out_error_dev;
1153 }
1154
1155 hvc_iucv_table[id] = priv;
1156 return 0;
1157
1158out_error_dev:
1159 hvc_remove(priv->hvc);
1160out_error_hvc:
1161 free_page((unsigned long) priv->sndbuf);
1162 kfree(priv);
1163
1164 return rc;
1165}
1166
1167
1168
1169
1170static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1171{
1172 hvc_remove(priv->hvc);
1173 device_unregister(priv->dev);
1174 free_page((unsigned long) priv->sndbuf);
1175 kfree(priv);
1176}
1177
1178
1179
1180
1181
1182
1183static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1184{
1185 const char *nextdelim, *residual;
1186 size_t len;
1187
1188 nextdelim = strchr(filter, ',');
1189 if (nextdelim) {
1190 len = nextdelim - filter;
1191 residual = nextdelim + 1;
1192 } else {
1193 len = strlen(filter);
1194 residual = filter + len;
1195 }
1196
1197 if (len == 0)
1198 return ERR_PTR(-EINVAL);
1199
1200
1201 if (filter[len - 1] == '\n')
1202 len--;
1203
1204
1205 if (len == 1 && *filter == FILTER_WILDCARD_CHAR)
1206 return ERR_PTR(-EINVAL);
1207
1208 if (len > 8)
1209 return ERR_PTR(-EINVAL);
1210
1211
1212 memset(dest, ' ', 8);
1213 while (len--)
1214 dest[len] = toupper(filter[len]);
1215 return residual;
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228static int hvc_iucv_setup_filter(const char *val)
1229{
1230 const char *residual;
1231 int err;
1232 size_t size, count;
1233 void *array, *old_filter;
1234
1235 count = strlen(val);
1236 if (count == 0 || (count == 1 && val[0] == '\n')) {
1237 size = 0;
1238 array = NULL;
1239 goto out_replace_filter;
1240 }
1241
1242
1243 size = 1;
1244 residual = val;
1245 while ((residual = strchr(residual, ',')) != NULL) {
1246 residual++;
1247 size++;
1248 }
1249
1250
1251 if (size > MAX_VMID_FILTER)
1252 return -ENOSPC;
1253
1254 array = kzalloc(size * 8, GFP_KERNEL);
1255 if (!array)
1256 return -ENOMEM;
1257
1258 count = size;
1259 residual = val;
1260 while (*residual && count) {
1261 residual = hvc_iucv_parse_filter(residual,
1262 array + ((size - count) * 8));
1263 if (IS_ERR(residual)) {
1264 err = PTR_ERR(residual);
1265 kfree(array);
1266 goto out_err;
1267 }
1268 count--;
1269 }
1270
1271out_replace_filter:
1272 write_lock_bh(&hvc_iucv_filter_lock);
1273 old_filter = hvc_iucv_filter;
1274 hvc_iucv_filter_size = size;
1275 hvc_iucv_filter = array;
1276 write_unlock_bh(&hvc_iucv_filter_lock);
1277 kfree(old_filter);
1278
1279 err = 0;
1280out_err:
1281 return err;
1282}
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1295{
1296 int rc;
1297
1298 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1299 return -ENODEV;
1300
1301 if (!val)
1302 return -EINVAL;
1303
1304 rc = 0;
1305 if (slab_is_available())
1306 rc = hvc_iucv_setup_filter(val);
1307 else
1308 hvc_iucv_filter_string = val;
1309 return rc;
1310}
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1322{
1323 int rc;
1324 size_t index, len;
1325 void *start, *end;
1326
1327 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1328 return -ENODEV;
1329
1330 rc = 0;
1331 read_lock_bh(&hvc_iucv_filter_lock);
1332 for (index = 0; index < hvc_iucv_filter_size; index++) {
1333 start = hvc_iucv_filter + (8 * index);
1334 end = memchr(start, ' ', 8);
1335 len = (end) ? end - start : 8;
1336 memcpy(buffer + rc, start, len);
1337 rc += len;
1338 buffer[rc++] = ',';
1339 }
1340 read_unlock_bh(&hvc_iucv_filter_lock);
1341 if (rc)
1342 buffer[--rc] = '\0';
1343 return rc;
1344}
1345
1346#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1347
1348static const struct kernel_param_ops param_ops_vmidfilter = {
1349 .set = param_set_vmidfilter,
1350 .get = param_get_vmidfilter,
1351};
1352
1353
1354
1355
1356static int __init hvc_iucv_init(void)
1357{
1358 int rc;
1359 unsigned int i;
1360
1361 if (!hvc_iucv_devices)
1362 return -ENODEV;
1363
1364 if (!MACHINE_IS_VM) {
1365 pr_notice("The z/VM IUCV HVC device driver cannot "
1366 "be used without z/VM\n");
1367 rc = -ENODEV;
1368 goto out_error;
1369 }
1370
1371 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1372 pr_err("%lu is not a valid value for the hvc_iucv= "
1373 "kernel parameter\n", hvc_iucv_devices);
1374 rc = -EINVAL;
1375 goto out_error;
1376 }
1377
1378
1379 rc = driver_register(&hvc_iucv_driver);
1380 if (rc)
1381 goto out_error;
1382
1383
1384 if (hvc_iucv_filter_string) {
1385 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1386 switch (rc) {
1387 case 0:
1388 break;
1389 case -ENOMEM:
1390 pr_err("Allocating memory failed with "
1391 "reason code=%d\n", 3);
1392 goto out_error;
1393 case -EINVAL:
1394 pr_err("hvc_iucv_allow= does not specify a valid "
1395 "z/VM user ID list\n");
1396 goto out_error;
1397 case -ENOSPC:
1398 pr_err("hvc_iucv_allow= specifies too many "
1399 "z/VM user IDs\n");
1400 goto out_error;
1401 default:
1402 goto out_error;
1403 }
1404 }
1405
1406 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1407 sizeof(struct iucv_tty_buffer),
1408 0, 0, NULL);
1409 if (!hvc_iucv_buffer_cache) {
1410 pr_err("Allocating memory failed with reason code=%d\n", 1);
1411 rc = -ENOMEM;
1412 goto out_error;
1413 }
1414
1415 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1416 hvc_iucv_buffer_cache);
1417 if (!hvc_iucv_mempool) {
1418 pr_err("Allocating memory failed with reason code=%d\n", 2);
1419 kmem_cache_destroy(hvc_iucv_buffer_cache);
1420 rc = -ENOMEM;
1421 goto out_error;
1422 }
1423
1424
1425
1426 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1427 if (rc) {
1428 pr_err("Registering HVC terminal device as "
1429 "Linux console failed\n");
1430 goto out_error_memory;
1431 }
1432
1433
1434 for (i = 0; i < hvc_iucv_devices; i++) {
1435 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1436 if (rc) {
1437 pr_err("Creating a new HVC terminal device "
1438 "failed with error code=%d\n", rc);
1439 goto out_error_hvc;
1440 }
1441 }
1442
1443
1444 rc = iucv_register(&hvc_iucv_handler, 0);
1445 if (rc) {
1446 pr_err("Registering IUCV handlers failed with error code=%d\n",
1447 rc);
1448 goto out_error_hvc;
1449 }
1450
1451 return 0;
1452
1453out_error_hvc:
1454 for (i = 0; i < hvc_iucv_devices; i++)
1455 if (hvc_iucv_table[i])
1456 hvc_iucv_destroy(hvc_iucv_table[i]);
1457out_error_memory:
1458 mempool_destroy(hvc_iucv_mempool);
1459 kmem_cache_destroy(hvc_iucv_buffer_cache);
1460out_error:
1461 kfree(hvc_iucv_filter);
1462 hvc_iucv_devices = 0;
1463 return rc;
1464}
1465
1466
1467
1468
1469
1470static int __init hvc_iucv_config(char *val)
1471{
1472 return kstrtoul(val, 10, &hvc_iucv_devices);
1473}
1474
1475
1476device_initcall(hvc_iucv_init);
1477__setup("hvc_iucv=", hvc_iucv_config);
1478core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
1479