1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#define KMSG_COMPONENT "netiucv"
25#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26
27#undef DEBUG
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/errno.h>
34#include <linux/types.h>
35#include <linux/interrupt.h>
36#include <linux/timer.h>
37#include <linux/bitops.h>
38
39#include <linux/signal.h>
40#include <linux/string.h>
41#include <linux/device.h>
42
43#include <linux/ip.h>
44#include <linux/if_arp.h>
45#include <linux/tcp.h>
46#include <linux/skbuff.h>
47#include <linux/ctype.h>
48#include <net/dst.h>
49
50#include <asm/io.h>
51#include <linux/uaccess.h>
52#include <asm/ebcdic.h>
53
54#include <net/iucv/iucv.h>
55#include "fsm.h"
56
57MODULE_AUTHOR
58 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
59MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
60
61
62
63
64#define IUCV_DBF_SETUP_NAME "iucv_setup"
65#define IUCV_DBF_SETUP_LEN 64
66#define IUCV_DBF_SETUP_PAGES 2
67#define IUCV_DBF_SETUP_NR_AREAS 1
68#define IUCV_DBF_SETUP_LEVEL 3
69
70#define IUCV_DBF_DATA_NAME "iucv_data"
71#define IUCV_DBF_DATA_LEN 128
72#define IUCV_DBF_DATA_PAGES 2
73#define IUCV_DBF_DATA_NR_AREAS 1
74#define IUCV_DBF_DATA_LEVEL 2
75
76#define IUCV_DBF_TRACE_NAME "iucv_trace"
77#define IUCV_DBF_TRACE_LEN 16
78#define IUCV_DBF_TRACE_PAGES 4
79#define IUCV_DBF_TRACE_NR_AREAS 1
80#define IUCV_DBF_TRACE_LEVEL 3
81
82#define IUCV_DBF_TEXT(name,level,text) \
83 do { \
84 debug_text_event(iucv_dbf_##name,level,text); \
85 } while (0)
86
87#define IUCV_DBF_HEX(name,level,addr,len) \
88 do { \
89 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
90 } while (0)
91
92DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
93
94#define IUCV_DBF_TEXT_(name, level, text...) \
95 do { \
96 if (debug_level_enabled(iucv_dbf_##name, level)) { \
97 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
98 sprintf(__buf, text); \
99 debug_text_event(iucv_dbf_##name, level, __buf); \
100 put_cpu_var(iucv_dbf_txt_buf); \
101 } \
102 } while (0)
103
104#define IUCV_DBF_SPRINTF(name,level,text...) \
105 do { \
106 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
107 debug_sprintf_event(iucv_dbf_trace, level, text ); \
108 } while (0)
109
110
111
112
113#define PRINTK_HEADER " iucv: "
114
115static struct device_driver netiucv_driver = {
116 .owner = THIS_MODULE,
117 .name = "netiucv",
118 .bus = &iucv_bus,
119};
120
121
122
123
124struct connection_profile {
125 unsigned long maxmulti;
126 unsigned long maxcqueue;
127 unsigned long doios_single;
128 unsigned long doios_multi;
129 unsigned long txlen;
130 unsigned long tx_time;
131 unsigned long send_stamp;
132 unsigned long tx_pending;
133 unsigned long tx_max_pending;
134};
135
136
137
138
139struct iucv_connection {
140 struct list_head list;
141 struct iucv_path *path;
142 struct sk_buff *rx_buff;
143 struct sk_buff *tx_buff;
144 struct sk_buff_head collect_queue;
145 struct sk_buff_head commit_queue;
146 spinlock_t collect_lock;
147 int collect_len;
148 int max_buffsize;
149 fsm_timer timer;
150 fsm_instance *fsm;
151 struct net_device *netdev;
152 struct connection_profile prof;
153 char userid[9];
154 char userdata[17];
155};
156
157
158
159
160static LIST_HEAD(iucv_connection_list);
161static DEFINE_RWLOCK(iucv_connection_rwlock);
162
163
164
165
166
167struct iucv_event {
168 struct iucv_connection *conn;
169 void *data;
170};
171
172
173
174
175struct netiucv_priv {
176 struct net_device_stats stats;
177 unsigned long tbusy;
178 fsm_instance *fsm;
179 struct iucv_connection *conn;
180 struct device *dev;
181};
182
183
184
185
186struct ll_header {
187 u16 next;
188};
189
190#define NETIUCV_HDRLEN (sizeof(struct ll_header))
191#define NETIUCV_BUFSIZE_MAX 65537
192#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
193#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
194#define NETIUCV_MTU_DEFAULT 9216
195#define NETIUCV_QUEUELEN_DEFAULT 50
196#define NETIUCV_TIMEOUT_5SEC 5000
197
198
199
200
201
202static void netiucv_clear_busy(struct net_device *dev)
203{
204 struct netiucv_priv *priv = netdev_priv(dev);
205 clear_bit(0, &priv->tbusy);
206 netif_wake_queue(dev);
207}
208
209static int netiucv_test_and_set_busy(struct net_device *dev)
210{
211 struct netiucv_priv *priv = netdev_priv(dev);
212 netif_stop_queue(dev);
213 return test_and_set_bit(0, &priv->tbusy);
214}
215
216static u8 iucvMagic_ascii[16] = {
217 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
218 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
219};
220
221static u8 iucvMagic_ebcdic[16] = {
222 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
223 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
224};
225
226
227
228
229
230
231
232
233
234static char *netiucv_printname(char *name, int len)
235{
236 static char tmp[17];
237 char *p = tmp;
238 memcpy(tmp, name, len);
239 tmp[len] = '\0';
240 while (*p && ((p - tmp) < len) && (!isspace(*p)))
241 p++;
242 *p = '\0';
243 return tmp;
244}
245
246static char *netiucv_printuser(struct iucv_connection *conn)
247{
248 static char tmp_uid[9];
249 static char tmp_udat[17];
250 static char buf[100];
251
252 if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
253 tmp_uid[8] = '\0';
254 tmp_udat[16] = '\0';
255 memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
256 memcpy(tmp_udat, conn->userdata, 16);
257 EBCASC(tmp_udat, 16);
258 memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
259 sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
260 return buf;
261 } else
262 return netiucv_printname(conn->userid, 8);
263}
264
265
266
267
268enum dev_states {
269 DEV_STATE_STOPPED,
270 DEV_STATE_STARTWAIT,
271 DEV_STATE_STOPWAIT,
272 DEV_STATE_RUNNING,
273
274
275
276 NR_DEV_STATES
277};
278
279static const char *dev_state_names[] = {
280 "Stopped",
281 "StartWait",
282 "StopWait",
283 "Running",
284};
285
286
287
288
289enum dev_events {
290 DEV_EVENT_START,
291 DEV_EVENT_STOP,
292 DEV_EVENT_CONUP,
293 DEV_EVENT_CONDOWN,
294
295
296
297 NR_DEV_EVENTS
298};
299
300static const char *dev_event_names[] = {
301 "Start",
302 "Stop",
303 "Connection up",
304 "Connection down",
305};
306
307
308
309
310enum conn_events {
311
312
313
314
315 CONN_EVENT_CONN_REQ,
316 CONN_EVENT_CONN_ACK,
317 CONN_EVENT_CONN_REJ,
318 CONN_EVENT_CONN_SUS,
319 CONN_EVENT_CONN_RES,
320 CONN_EVENT_RX,
321 CONN_EVENT_TXDONE,
322
323
324
325
326
327
328
329
330
331 CONN_EVENT_TIMER,
332
333
334
335
336 CONN_EVENT_START,
337 CONN_EVENT_STOP,
338
339
340
341
342 NR_CONN_EVENTS,
343};
344
345static const char *conn_event_names[] = {
346 "Remote connection request",
347 "Remote connection acknowledge",
348 "Remote connection reject",
349 "Connection suspended",
350 "Connection resumed",
351 "Data received",
352 "Data sent",
353
354 "Timer",
355
356 "Start",
357 "Stop",
358};
359
360
361
362
363enum conn_states {
364
365
366
367
368 CONN_STATE_INVALID,
369
370
371
372
373 CONN_STATE_STOPPED,
374
375
376
377
378
379
380 CONN_STATE_STARTWAIT,
381
382
383
384
385
386 CONN_STATE_SETUPWAIT,
387
388
389
390
391 CONN_STATE_IDLE,
392
393
394
395
396 CONN_STATE_TX,
397
398
399
400
401 CONN_STATE_REGERR,
402
403
404
405
406 CONN_STATE_CONNERR,
407
408
409
410
411 NR_CONN_STATES,
412};
413
414static const char *conn_state_names[] = {
415 "Invalid",
416 "Stopped",
417 "StartWait",
418 "SetupWait",
419 "Idle",
420 "TX",
421 "Terminating",
422 "Registration error",
423 "Connect error",
424};
425
426
427
428
429
430static debug_info_t *iucv_dbf_setup = NULL;
431static debug_info_t *iucv_dbf_data = NULL;
432static debug_info_t *iucv_dbf_trace = NULL;
433
434DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
435
436static void iucv_unregister_dbf_views(void)
437{
438 debug_unregister(iucv_dbf_setup);
439 debug_unregister(iucv_dbf_data);
440 debug_unregister(iucv_dbf_trace);
441}
442static int iucv_register_dbf_views(void)
443{
444 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
445 IUCV_DBF_SETUP_PAGES,
446 IUCV_DBF_SETUP_NR_AREAS,
447 IUCV_DBF_SETUP_LEN);
448 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
449 IUCV_DBF_DATA_PAGES,
450 IUCV_DBF_DATA_NR_AREAS,
451 IUCV_DBF_DATA_LEN);
452 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
453 IUCV_DBF_TRACE_PAGES,
454 IUCV_DBF_TRACE_NR_AREAS,
455 IUCV_DBF_TRACE_LEN);
456
457 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
458 (iucv_dbf_trace == NULL)) {
459 iucv_unregister_dbf_views();
460 return -ENOMEM;
461 }
462 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
463 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
464
465 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
466 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
467
468 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
469 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
470
471 return 0;
472}
473
474
475
476
477
478static void netiucv_callback_rx(struct iucv_path *path,
479 struct iucv_message *msg)
480{
481 struct iucv_connection *conn = path->private;
482 struct iucv_event ev;
483
484 ev.conn = conn;
485 ev.data = msg;
486 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
487}
488
489static void netiucv_callback_txdone(struct iucv_path *path,
490 struct iucv_message *msg)
491{
492 struct iucv_connection *conn = path->private;
493 struct iucv_event ev;
494
495 ev.conn = conn;
496 ev.data = msg;
497 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
498}
499
500static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
501{
502 struct iucv_connection *conn = path->private;
503
504 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
505}
506
507static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid,
508 u8 *ipuser)
509{
510 struct iucv_connection *conn = path->private;
511 struct iucv_event ev;
512 static char tmp_user[9];
513 static char tmp_udat[17];
514 int rc;
515
516 rc = -EINVAL;
517 memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
518 memcpy(tmp_udat, ipuser, 16);
519 EBCASC(tmp_udat, 16);
520 read_lock_bh(&iucv_connection_rwlock);
521 list_for_each_entry(conn, &iucv_connection_list, list) {
522 if (strncmp(ipvmid, conn->userid, 8) ||
523 strncmp(ipuser, conn->userdata, 16))
524 continue;
525
526 conn->path = path;
527 ev.conn = conn;
528 ev.data = path;
529 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
530 rc = 0;
531 }
532 IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
533 tmp_user, netiucv_printname(tmp_udat, 16));
534 read_unlock_bh(&iucv_connection_rwlock);
535 return rc;
536}
537
538static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser)
539{
540 struct iucv_connection *conn = path->private;
541
542 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
543}
544
545static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser)
546{
547 struct iucv_connection *conn = path->private;
548
549 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
550}
551
552static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
553{
554 struct iucv_connection *conn = path->private;
555
556 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
557}
558
559
560
561
562static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
563{
564}
565
566
567
568
569
570
571
572
573
574
575
576
577
578static void netiucv_unpack_skb(struct iucv_connection *conn,
579 struct sk_buff *pskb)
580{
581 struct net_device *dev = conn->netdev;
582 struct netiucv_priv *privptr = netdev_priv(dev);
583 u16 offset = 0;
584
585 skb_put(pskb, NETIUCV_HDRLEN);
586 pskb->dev = dev;
587 pskb->ip_summed = CHECKSUM_NONE;
588 pskb->protocol = cpu_to_be16(ETH_P_IP);
589
590 while (1) {
591 struct sk_buff *skb;
592 struct ll_header *header = (struct ll_header *) pskb->data;
593
594 if (!header->next)
595 break;
596
597 skb_pull(pskb, NETIUCV_HDRLEN);
598 header->next -= offset;
599 offset += header->next;
600 header->next -= NETIUCV_HDRLEN;
601 if (skb_tailroom(pskb) < header->next) {
602 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
603 header->next, skb_tailroom(pskb));
604 return;
605 }
606 skb_put(pskb, header->next);
607 skb_reset_mac_header(pskb);
608 skb = dev_alloc_skb(pskb->len);
609 if (!skb) {
610 IUCV_DBF_TEXT(data, 2,
611 "Out of memory in netiucv_unpack_skb\n");
612 privptr->stats.rx_dropped++;
613 return;
614 }
615 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
616 pskb->len);
617 skb_reset_mac_header(skb);
618 skb->dev = pskb->dev;
619 skb->protocol = pskb->protocol;
620 pskb->ip_summed = CHECKSUM_UNNECESSARY;
621 privptr->stats.rx_packets++;
622 privptr->stats.rx_bytes += skb->len;
623
624
625
626
627 netif_rx_ni(skb);
628 skb_pull(pskb, header->next);
629 skb_put(pskb, NETIUCV_HDRLEN);
630 }
631}
632
633static void conn_action_rx(fsm_instance *fi, int event, void *arg)
634{
635 struct iucv_event *ev = arg;
636 struct iucv_connection *conn = ev->conn;
637 struct iucv_message *msg = ev->data;
638 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
639 int rc;
640
641 IUCV_DBF_TEXT(trace, 4, __func__);
642
643 if (!conn->netdev) {
644 iucv_message_reject(conn->path, msg);
645 IUCV_DBF_TEXT(data, 2,
646 "Received data for unlinked connection\n");
647 return;
648 }
649 if (msg->length > conn->max_buffsize) {
650 iucv_message_reject(conn->path, msg);
651 privptr->stats.rx_dropped++;
652 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
653 msg->length, conn->max_buffsize);
654 return;
655 }
656 conn->rx_buff->data = conn->rx_buff->head;
657 skb_reset_tail_pointer(conn->rx_buff);
658 conn->rx_buff->len = 0;
659 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
660 msg->length, NULL);
661 if (rc || msg->length < 5) {
662 privptr->stats.rx_errors++;
663 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
664 return;
665 }
666 netiucv_unpack_skb(conn, conn->rx_buff);
667}
668
669static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
670{
671 struct iucv_event *ev = arg;
672 struct iucv_connection *conn = ev->conn;
673 struct iucv_message *msg = ev->data;
674 struct iucv_message txmsg;
675 struct netiucv_priv *privptr = NULL;
676 u32 single_flag = msg->tag;
677 u32 txbytes = 0;
678 u32 txpackets = 0;
679 u32 stat_maxcq = 0;
680 struct sk_buff *skb;
681 unsigned long saveflags;
682 struct ll_header header;
683 int rc;
684
685 IUCV_DBF_TEXT(trace, 4, __func__);
686
687 if (!conn || !conn->netdev) {
688 IUCV_DBF_TEXT(data, 2,
689 "Send confirmation for unlinked connection\n");
690 return;
691 }
692 privptr = netdev_priv(conn->netdev);
693 conn->prof.tx_pending--;
694 if (single_flag) {
695 if ((skb = skb_dequeue(&conn->commit_queue))) {
696 refcount_dec(&skb->users);
697 if (privptr) {
698 privptr->stats.tx_packets++;
699 privptr->stats.tx_bytes +=
700 (skb->len - NETIUCV_HDRLEN
701 - NETIUCV_HDRLEN);
702 }
703 dev_kfree_skb_any(skb);
704 }
705 }
706 conn->tx_buff->data = conn->tx_buff->head;
707 skb_reset_tail_pointer(conn->tx_buff);
708 conn->tx_buff->len = 0;
709 spin_lock_irqsave(&conn->collect_lock, saveflags);
710 while ((skb = skb_dequeue(&conn->collect_queue))) {
711 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
712 skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
713 skb_copy_from_linear_data(skb,
714 skb_put(conn->tx_buff, skb->len),
715 skb->len);
716 txbytes += skb->len;
717 txpackets++;
718 stat_maxcq++;
719 refcount_dec(&skb->users);
720 dev_kfree_skb_any(skb);
721 }
722 if (conn->collect_len > conn->prof.maxmulti)
723 conn->prof.maxmulti = conn->collect_len;
724 conn->collect_len = 0;
725 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
726 if (conn->tx_buff->len == 0) {
727 fsm_newstate(fi, CONN_STATE_IDLE);
728 return;
729 }
730
731 header.next = 0;
732 skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
733 conn->prof.send_stamp = jiffies;
734 txmsg.class = 0;
735 txmsg.tag = 0;
736 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
737 conn->tx_buff->data, conn->tx_buff->len);
738 conn->prof.doios_multi++;
739 conn->prof.txlen += conn->tx_buff->len;
740 conn->prof.tx_pending++;
741 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
742 conn->prof.tx_max_pending = conn->prof.tx_pending;
743 if (rc) {
744 conn->prof.tx_pending--;
745 fsm_newstate(fi, CONN_STATE_IDLE);
746 if (privptr)
747 privptr->stats.tx_errors += txpackets;
748 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
749 } else {
750 if (privptr) {
751 privptr->stats.tx_packets += txpackets;
752 privptr->stats.tx_bytes += txbytes;
753 }
754 if (stat_maxcq > conn->prof.maxcqueue)
755 conn->prof.maxcqueue = stat_maxcq;
756 }
757}
758
759static struct iucv_handler netiucv_handler = {
760 .path_pending = netiucv_callback_connreq,
761 .path_complete = netiucv_callback_connack,
762 .path_severed = netiucv_callback_connrej,
763 .path_quiesced = netiucv_callback_connsusp,
764 .path_resumed = netiucv_callback_connres,
765 .message_pending = netiucv_callback_rx,
766 .message_complete = netiucv_callback_txdone,
767};
768
769static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
770{
771 struct iucv_event *ev = arg;
772 struct iucv_connection *conn = ev->conn;
773 struct iucv_path *path = ev->data;
774 struct net_device *netdev = conn->netdev;
775 struct netiucv_priv *privptr = netdev_priv(netdev);
776 int rc;
777
778 IUCV_DBF_TEXT(trace, 3, __func__);
779
780 conn->path = path;
781 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
782 path->flags = 0;
783 rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
784 if (rc) {
785 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
786 return;
787 }
788 fsm_newstate(fi, CONN_STATE_IDLE);
789 netdev->tx_queue_len = conn->path->msglim;
790 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
791}
792
793static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
794{
795 struct iucv_event *ev = arg;
796 struct iucv_path *path = ev->data;
797
798 IUCV_DBF_TEXT(trace, 3, __func__);
799 iucv_path_sever(path, NULL);
800}
801
802static void conn_action_connack(fsm_instance *fi, int event, void *arg)
803{
804 struct iucv_connection *conn = arg;
805 struct net_device *netdev = conn->netdev;
806 struct netiucv_priv *privptr = netdev_priv(netdev);
807
808 IUCV_DBF_TEXT(trace, 3, __func__);
809 fsm_deltimer(&conn->timer);
810 fsm_newstate(fi, CONN_STATE_IDLE);
811 netdev->tx_queue_len = conn->path->msglim;
812 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
813}
814
815static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
816{
817 struct iucv_connection *conn = arg;
818
819 IUCV_DBF_TEXT(trace, 3, __func__);
820 fsm_deltimer(&conn->timer);
821 iucv_path_sever(conn->path, conn->userdata);
822 fsm_newstate(fi, CONN_STATE_STARTWAIT);
823}
824
825static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
826{
827 struct iucv_connection *conn = arg;
828 struct net_device *netdev = conn->netdev;
829 struct netiucv_priv *privptr = netdev_priv(netdev);
830
831 IUCV_DBF_TEXT(trace, 3, __func__);
832
833 fsm_deltimer(&conn->timer);
834 iucv_path_sever(conn->path, conn->userdata);
835 dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
836 "connection\n", netiucv_printuser(conn));
837 IUCV_DBF_TEXT(data, 2,
838 "conn_action_connsever: Remote dropped connection\n");
839 fsm_newstate(fi, CONN_STATE_STARTWAIT);
840 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
841}
842
843static void conn_action_start(fsm_instance *fi, int event, void *arg)
844{
845 struct iucv_connection *conn = arg;
846 struct net_device *netdev = conn->netdev;
847 struct netiucv_priv *privptr = netdev_priv(netdev);
848 int rc;
849
850 IUCV_DBF_TEXT(trace, 3, __func__);
851
852 fsm_newstate(fi, CONN_STATE_STARTWAIT);
853
854
855
856
857
858
859
860 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
861 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
862 IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
863 netdev->name, netiucv_printuser(conn));
864
865 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
866 NULL, conn->userdata, conn);
867 switch (rc) {
868 case 0:
869 netdev->tx_queue_len = conn->path->msglim;
870 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
871 CONN_EVENT_TIMER, conn);
872 return;
873 case 11:
874 dev_warn(privptr->dev,
875 "The IUCV device failed to connect to z/VM guest %s\n",
876 netiucv_printname(conn->userid, 8));
877 fsm_newstate(fi, CONN_STATE_STARTWAIT);
878 break;
879 case 12:
880 dev_warn(privptr->dev,
881 "The IUCV device failed to connect to the peer on z/VM"
882 " guest %s\n", netiucv_printname(conn->userid, 8));
883 fsm_newstate(fi, CONN_STATE_STARTWAIT);
884 break;
885 case 13:
886 dev_err(privptr->dev,
887 "Connecting the IUCV device would exceed the maximum"
888 " number of IUCV connections\n");
889 fsm_newstate(fi, CONN_STATE_CONNERR);
890 break;
891 case 14:
892 dev_err(privptr->dev,
893 "z/VM guest %s has too many IUCV connections"
894 " to connect with the IUCV device\n",
895 netiucv_printname(conn->userid, 8));
896 fsm_newstate(fi, CONN_STATE_CONNERR);
897 break;
898 case 15:
899 dev_err(privptr->dev,
900 "The IUCV device cannot connect to a z/VM guest with no"
901 " IUCV authorization\n");
902 fsm_newstate(fi, CONN_STATE_CONNERR);
903 break;
904 default:
905 dev_err(privptr->dev,
906 "Connecting the IUCV device failed with error %d\n",
907 rc);
908 fsm_newstate(fi, CONN_STATE_CONNERR);
909 break;
910 }
911 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
912 kfree(conn->path);
913 conn->path = NULL;
914}
915
916static void netiucv_purge_skb_queue(struct sk_buff_head *q)
917{
918 struct sk_buff *skb;
919
920 while ((skb = skb_dequeue(q))) {
921 refcount_dec(&skb->users);
922 dev_kfree_skb_any(skb);
923 }
924}
925
926static void conn_action_stop(fsm_instance *fi, int event, void *arg)
927{
928 struct iucv_event *ev = arg;
929 struct iucv_connection *conn = ev->conn;
930 struct net_device *netdev = conn->netdev;
931 struct netiucv_priv *privptr = netdev_priv(netdev);
932
933 IUCV_DBF_TEXT(trace, 3, __func__);
934
935 fsm_deltimer(&conn->timer);
936 fsm_newstate(fi, CONN_STATE_STOPPED);
937 netiucv_purge_skb_queue(&conn->collect_queue);
938 if (conn->path) {
939 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
940 iucv_path_sever(conn->path, conn->userdata);
941 kfree(conn->path);
942 conn->path = NULL;
943 }
944 netiucv_purge_skb_queue(&conn->commit_queue);
945 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
946}
947
948static void conn_action_inval(fsm_instance *fi, int event, void *arg)
949{
950 struct iucv_connection *conn = arg;
951 struct net_device *netdev = conn->netdev;
952
953 IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
954 netdev->name, conn->userid);
955}
956
957static const fsm_node conn_fsm[] = {
958 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
959 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
960
961 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
962 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
963 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
964 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
965 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
966 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
967 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
968
969 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
970 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
971 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
972 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
973 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
974
975 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
976 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
977
978 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
979 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
980 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
981
982 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
983 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
984
985 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
986 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
987};
988
989static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004static void dev_action_start(fsm_instance *fi, int event, void *arg)
1005{
1006 struct net_device *dev = arg;
1007 struct netiucv_priv *privptr = netdev_priv(dev);
1008
1009 IUCV_DBF_TEXT(trace, 3, __func__);
1010
1011 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1012 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022static void
1023dev_action_stop(fsm_instance *fi, int event, void *arg)
1024{
1025 struct net_device *dev = arg;
1026 struct netiucv_priv *privptr = netdev_priv(dev);
1027 struct iucv_event ev;
1028
1029 IUCV_DBF_TEXT(trace, 3, __func__);
1030
1031 ev.conn = privptr->conn;
1032
1033 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1034 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1035}
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045static void
1046dev_action_connup(fsm_instance *fi, int event, void *arg)
1047{
1048 struct net_device *dev = arg;
1049 struct netiucv_priv *privptr = netdev_priv(dev);
1050
1051 IUCV_DBF_TEXT(trace, 3, __func__);
1052
1053 switch (fsm_getstate(fi)) {
1054 case DEV_STATE_STARTWAIT:
1055 fsm_newstate(fi, DEV_STATE_RUNNING);
1056 dev_info(privptr->dev,
1057 "The IUCV device has been connected"
1058 " successfully to %s\n",
1059 netiucv_printuser(privptr->conn));
1060 IUCV_DBF_TEXT(setup, 3,
1061 "connection is up and running\n");
1062 break;
1063 case DEV_STATE_STOPWAIT:
1064 IUCV_DBF_TEXT(data, 2,
1065 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1066 break;
1067 }
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078static void
1079dev_action_conndown(fsm_instance *fi, int event, void *arg)
1080{
1081 IUCV_DBF_TEXT(trace, 3, __func__);
1082
1083 switch (fsm_getstate(fi)) {
1084 case DEV_STATE_RUNNING:
1085 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1086 break;
1087 case DEV_STATE_STOPWAIT:
1088 fsm_newstate(fi, DEV_STATE_STOPPED);
1089 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1090 break;
1091 }
1092}
1093
1094static const fsm_node dev_fsm[] = {
1095 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1096
1097 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1098 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1099
1100 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1101 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1102
1103 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1104 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
1105 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
1106};
1107
1108static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121static int netiucv_transmit_skb(struct iucv_connection *conn,
1122 struct sk_buff *skb)
1123{
1124 struct iucv_message msg;
1125 unsigned long saveflags;
1126 struct ll_header header;
1127 int rc;
1128
1129 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1130 int l = skb->len + NETIUCV_HDRLEN;
1131
1132 spin_lock_irqsave(&conn->collect_lock, saveflags);
1133 if (conn->collect_len + l >
1134 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1135 rc = -EBUSY;
1136 IUCV_DBF_TEXT(data, 2,
1137 "EBUSY from netiucv_transmit_skb\n");
1138 } else {
1139 refcount_inc(&skb->users);
1140 skb_queue_tail(&conn->collect_queue, skb);
1141 conn->collect_len += l;
1142 rc = 0;
1143 }
1144 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1145 } else {
1146 struct sk_buff *nskb = skb;
1147
1148
1149
1150
1151 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1152 NETIUCV_HDRLEN)) >> 31;
1153 int copied = 0;
1154 if (hi || (skb_tailroom(skb) < 2)) {
1155 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1156 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1157 if (!nskb) {
1158 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1159 rc = -ENOMEM;
1160 return rc;
1161 } else {
1162 skb_reserve(nskb, NETIUCV_HDRLEN);
1163 skb_put_data(nskb, skb->data, skb->len);
1164 }
1165 copied = 1;
1166 }
1167
1168
1169
1170 header.next = nskb->len + NETIUCV_HDRLEN;
1171 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1172 header.next = 0;
1173 skb_put_data(nskb, &header, NETIUCV_HDRLEN);
1174
1175 fsm_newstate(conn->fsm, CONN_STATE_TX);
1176 conn->prof.send_stamp = jiffies;
1177
1178 msg.tag = 1;
1179 msg.class = 0;
1180 rc = iucv_message_send(conn->path, &msg, 0, 0,
1181 nskb->data, nskb->len);
1182 conn->prof.doios_single++;
1183 conn->prof.txlen += skb->len;
1184 conn->prof.tx_pending++;
1185 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1186 conn->prof.tx_max_pending = conn->prof.tx_pending;
1187 if (rc) {
1188 struct netiucv_priv *privptr;
1189 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1190 conn->prof.tx_pending--;
1191 privptr = netdev_priv(conn->netdev);
1192 if (privptr)
1193 privptr->stats.tx_errors++;
1194 if (copied)
1195 dev_kfree_skb(nskb);
1196 else {
1197
1198
1199
1200
1201 skb_pull(skb, NETIUCV_HDRLEN);
1202 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1203 }
1204 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1205 } else {
1206 if (copied)
1207 dev_kfree_skb(skb);
1208 refcount_inc(&nskb->users);
1209 skb_queue_tail(&conn->commit_queue, nskb);
1210 }
1211 }
1212
1213 return rc;
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228static int netiucv_open(struct net_device *dev)
1229{
1230 struct netiucv_priv *priv = netdev_priv(dev);
1231
1232 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1233 return 0;
1234}
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244static int netiucv_close(struct net_device *dev)
1245{
1246 struct netiucv_priv *priv = netdev_priv(dev);
1247
1248 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1249 return 0;
1250}
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1264{
1265 struct netiucv_priv *privptr = netdev_priv(dev);
1266 int rc;
1267
1268 IUCV_DBF_TEXT(trace, 4, __func__);
1269
1270
1271
1272 if (skb == NULL) {
1273 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1274 privptr->stats.tx_dropped++;
1275 return NETDEV_TX_OK;
1276 }
1277 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1278 IUCV_DBF_TEXT(data, 2,
1279 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1280 dev_kfree_skb(skb);
1281 privptr->stats.tx_dropped++;
1282 return NETDEV_TX_OK;
1283 }
1284
1285
1286
1287
1288
1289 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1290 dev_kfree_skb(skb);
1291 privptr->stats.tx_dropped++;
1292 privptr->stats.tx_errors++;
1293 privptr->stats.tx_carrier_errors++;
1294 return NETDEV_TX_OK;
1295 }
1296
1297 if (netiucv_test_and_set_busy(dev)) {
1298 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1299 return NETDEV_TX_BUSY;
1300 }
1301 netif_trans_update(dev);
1302 rc = netiucv_transmit_skb(privptr->conn, skb);
1303 netiucv_clear_busy(dev);
1304 return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315static struct net_device_stats *netiucv_stats (struct net_device * dev)
1316{
1317 struct netiucv_priv *priv = netdev_priv(dev);
1318
1319 IUCV_DBF_TEXT(trace, 5, __func__);
1320 return &priv->stats;
1321}
1322
1323
1324
1325
1326
1327static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1328 char *buf)
1329{
1330 struct netiucv_priv *priv = dev_get_drvdata(dev);
1331
1332 IUCV_DBF_TEXT(trace, 5, __func__);
1333 return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1334}
1335
1336static int netiucv_check_user(const char *buf, size_t count, char *username,
1337 char *userdata)
1338{
1339 const char *p;
1340 int i;
1341
1342 p = strchr(buf, '.');
1343 if ((p && ((count > 26) ||
1344 ((p - buf) > 8) ||
1345 (buf + count - p > 18))) ||
1346 (!p && (count > 9))) {
1347 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1348 return -EINVAL;
1349 }
1350
1351 for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1352 if (isalnum(*p) || *p == '$') {
1353 username[i] = toupper(*p);
1354 continue;
1355 }
1356 if (*p == '\n')
1357
1358 break;
1359 IUCV_DBF_TEXT_(setup, 2,
1360 "conn_write: invalid character %02x\n", *p);
1361 return -EINVAL;
1362 }
1363 while (i < 8)
1364 username[i++] = ' ';
1365 username[8] = '\0';
1366
1367 if (*p == '.') {
1368 p++;
1369 for (i = 0; i < 16 && *p; i++, p++) {
1370 if (*p == '\n')
1371 break;
1372 userdata[i] = toupper(*p);
1373 }
1374 while (i > 0 && i < 16)
1375 userdata[i++] = ' ';
1376 } else
1377 memcpy(userdata, iucvMagic_ascii, 16);
1378 userdata[16] = '\0';
1379 ASCEBC(userdata, 16);
1380
1381 return 0;
1382}
1383
1384static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1385 const char *buf, size_t count)
1386{
1387 struct netiucv_priv *priv = dev_get_drvdata(dev);
1388 struct net_device *ndev = priv->conn->netdev;
1389 char username[9];
1390 char userdata[17];
1391 int rc;
1392 struct iucv_connection *cp;
1393
1394 IUCV_DBF_TEXT(trace, 3, __func__);
1395 rc = netiucv_check_user(buf, count, username, userdata);
1396 if (rc)
1397 return rc;
1398
1399 if (memcmp(username, priv->conn->userid, 9) &&
1400 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1401
1402 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1403 return -EPERM;
1404 }
1405 read_lock_bh(&iucv_connection_rwlock);
1406 list_for_each_entry(cp, &iucv_connection_list, list) {
1407 if (!strncmp(username, cp->userid, 9) &&
1408 !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
1409 read_unlock_bh(&iucv_connection_rwlock);
1410 IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1411 "already exists\n", netiucv_printuser(cp));
1412 return -EEXIST;
1413 }
1414 }
1415 read_unlock_bh(&iucv_connection_rwlock);
1416 memcpy(priv->conn->userid, username, 9);
1417 memcpy(priv->conn->userdata, userdata, 17);
1418 return count;
1419}
1420
1421static DEVICE_ATTR(user, 0644, user_show, user_write);
1422
1423static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1424 char *buf)
1425{
1426 struct netiucv_priv *priv = dev_get_drvdata(dev);
1427
1428 IUCV_DBF_TEXT(trace, 5, __func__);
1429 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1430}
1431
1432static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1433 const char *buf, size_t count)
1434{
1435 struct netiucv_priv *priv = dev_get_drvdata(dev);
1436 struct net_device *ndev = priv->conn->netdev;
1437 unsigned int bs1;
1438 int rc;
1439
1440 IUCV_DBF_TEXT(trace, 3, __func__);
1441 if (count >= 39)
1442 return -EINVAL;
1443
1444 rc = kstrtouint(buf, 0, &bs1);
1445
1446 if (rc == -EINVAL) {
1447 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n",
1448 buf);
1449 return -EINVAL;
1450 }
1451 if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) {
1452 IUCV_DBF_TEXT_(setup, 2,
1453 "buffer_write: buffer size %d too large\n",
1454 bs1);
1455 return -EINVAL;
1456 }
1457 if ((ndev->flags & IFF_RUNNING) &&
1458 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1459 IUCV_DBF_TEXT_(setup, 2,
1460 "buffer_write: buffer size %d too small\n",
1461 bs1);
1462 return -EINVAL;
1463 }
1464 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1465 IUCV_DBF_TEXT_(setup, 2,
1466 "buffer_write: buffer size %d too small\n",
1467 bs1);
1468 return -EINVAL;
1469 }
1470
1471 priv->conn->max_buffsize = bs1;
1472 if (!(ndev->flags & IFF_RUNNING))
1473 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1474
1475 return count;
1476
1477}
1478
1479static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1480
1481static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1482 char *buf)
1483{
1484 struct netiucv_priv *priv = dev_get_drvdata(dev);
1485
1486 IUCV_DBF_TEXT(trace, 5, __func__);
1487 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1488}
1489
1490static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1491
1492static ssize_t conn_fsm_show (struct device *dev,
1493 struct device_attribute *attr, char *buf)
1494{
1495 struct netiucv_priv *priv = dev_get_drvdata(dev);
1496
1497 IUCV_DBF_TEXT(trace, 5, __func__);
1498 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1499}
1500
1501static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1502
1503static ssize_t maxmulti_show (struct device *dev,
1504 struct device_attribute *attr, char *buf)
1505{
1506 struct netiucv_priv *priv = dev_get_drvdata(dev);
1507
1508 IUCV_DBF_TEXT(trace, 5, __func__);
1509 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1510}
1511
1512static ssize_t maxmulti_write (struct device *dev,
1513 struct device_attribute *attr,
1514 const char *buf, size_t count)
1515{
1516 struct netiucv_priv *priv = dev_get_drvdata(dev);
1517
1518 IUCV_DBF_TEXT(trace, 4, __func__);
1519 priv->conn->prof.maxmulti = 0;
1520 return count;
1521}
1522
1523static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1524
1525static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1526 char *buf)
1527{
1528 struct netiucv_priv *priv = dev_get_drvdata(dev);
1529
1530 IUCV_DBF_TEXT(trace, 5, __func__);
1531 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1532}
1533
1534static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1535 const char *buf, size_t count)
1536{
1537 struct netiucv_priv *priv = dev_get_drvdata(dev);
1538
1539 IUCV_DBF_TEXT(trace, 4, __func__);
1540 priv->conn->prof.maxcqueue = 0;
1541 return count;
1542}
1543
1544static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1545
1546static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1547 char *buf)
1548{
1549 struct netiucv_priv *priv = dev_get_drvdata(dev);
1550
1551 IUCV_DBF_TEXT(trace, 5, __func__);
1552 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1553}
1554
1555static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1556 const char *buf, size_t count)
1557{
1558 struct netiucv_priv *priv = dev_get_drvdata(dev);
1559
1560 IUCV_DBF_TEXT(trace, 4, __func__);
1561 priv->conn->prof.doios_single = 0;
1562 return count;
1563}
1564
1565static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1566
1567static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1568 char *buf)
1569{
1570 struct netiucv_priv *priv = dev_get_drvdata(dev);
1571
1572 IUCV_DBF_TEXT(trace, 5, __func__);
1573 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1574}
1575
1576static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1577 const char *buf, size_t count)
1578{
1579 struct netiucv_priv *priv = dev_get_drvdata(dev);
1580
1581 IUCV_DBF_TEXT(trace, 5, __func__);
1582 priv->conn->prof.doios_multi = 0;
1583 return count;
1584}
1585
1586static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1587
1588static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1589 char *buf)
1590{
1591 struct netiucv_priv *priv = dev_get_drvdata(dev);
1592
1593 IUCV_DBF_TEXT(trace, 5, __func__);
1594 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1595}
1596
1597static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1598 const char *buf, size_t count)
1599{
1600 struct netiucv_priv *priv = dev_get_drvdata(dev);
1601
1602 IUCV_DBF_TEXT(trace, 4, __func__);
1603 priv->conn->prof.txlen = 0;
1604 return count;
1605}
1606
1607static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1608
1609static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1610 char *buf)
1611{
1612 struct netiucv_priv *priv = dev_get_drvdata(dev);
1613
1614 IUCV_DBF_TEXT(trace, 5, __func__);
1615 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1616}
1617
1618static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1619 const char *buf, size_t count)
1620{
1621 struct netiucv_priv *priv = dev_get_drvdata(dev);
1622
1623 IUCV_DBF_TEXT(trace, 4, __func__);
1624 priv->conn->prof.tx_time = 0;
1625 return count;
1626}
1627
1628static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1629
1630static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1631 char *buf)
1632{
1633 struct netiucv_priv *priv = dev_get_drvdata(dev);
1634
1635 IUCV_DBF_TEXT(trace, 5, __func__);
1636 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1637}
1638
1639static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1640 const char *buf, size_t count)
1641{
1642 struct netiucv_priv *priv = dev_get_drvdata(dev);
1643
1644 IUCV_DBF_TEXT(trace, 4, __func__);
1645 priv->conn->prof.tx_pending = 0;
1646 return count;
1647}
1648
1649static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1650
1651static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1652 char *buf)
1653{
1654 struct netiucv_priv *priv = dev_get_drvdata(dev);
1655
1656 IUCV_DBF_TEXT(trace, 5, __func__);
1657 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1658}
1659
1660static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1661 const char *buf, size_t count)
1662{
1663 struct netiucv_priv *priv = dev_get_drvdata(dev);
1664
1665 IUCV_DBF_TEXT(trace, 4, __func__);
1666 priv->conn->prof.tx_max_pending = 0;
1667 return count;
1668}
1669
1670static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1671
1672static struct attribute *netiucv_attrs[] = {
1673 &dev_attr_buffer.attr,
1674 &dev_attr_user.attr,
1675 NULL,
1676};
1677
1678static struct attribute_group netiucv_attr_group = {
1679 .attrs = netiucv_attrs,
1680};
1681
1682static struct attribute *netiucv_stat_attrs[] = {
1683 &dev_attr_device_fsm_state.attr,
1684 &dev_attr_connection_fsm_state.attr,
1685 &dev_attr_max_tx_buffer_used.attr,
1686 &dev_attr_max_chained_skbs.attr,
1687 &dev_attr_tx_single_write_ops.attr,
1688 &dev_attr_tx_multi_write_ops.attr,
1689 &dev_attr_netto_bytes.attr,
1690 &dev_attr_max_tx_io_time.attr,
1691 &dev_attr_tx_pending.attr,
1692 &dev_attr_tx_max_pending.attr,
1693 NULL,
1694};
1695
1696static struct attribute_group netiucv_stat_attr_group = {
1697 .name = "stats",
1698 .attrs = netiucv_stat_attrs,
1699};
1700
1701static const struct attribute_group *netiucv_attr_groups[] = {
1702 &netiucv_stat_attr_group,
1703 &netiucv_attr_group,
1704 NULL,
1705};
1706
1707static int netiucv_register_device(struct net_device *ndev)
1708{
1709 struct netiucv_priv *priv = netdev_priv(ndev);
1710 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1711 int ret;
1712
1713 IUCV_DBF_TEXT(trace, 3, __func__);
1714
1715 if (dev) {
1716 dev_set_name(dev, "net%s", ndev->name);
1717 dev->bus = &iucv_bus;
1718 dev->parent = iucv_root;
1719 dev->groups = netiucv_attr_groups;
1720
1721
1722
1723
1724
1725
1726
1727 dev->release = (void (*)(struct device *))kfree;
1728 dev->driver = &netiucv_driver;
1729 } else
1730 return -ENOMEM;
1731
1732 ret = device_register(dev);
1733 if (ret) {
1734 put_device(dev);
1735 return ret;
1736 }
1737 priv->dev = dev;
1738 dev_set_drvdata(dev, priv);
1739 return 0;
1740}
1741
1742static void netiucv_unregister_device(struct device *dev)
1743{
1744 IUCV_DBF_TEXT(trace, 3, __func__);
1745 device_unregister(dev);
1746}
1747
1748
1749
1750
1751
1752static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1753 char *username,
1754 char *userdata)
1755{
1756 struct iucv_connection *conn;
1757
1758 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1759 if (!conn)
1760 goto out;
1761 skb_queue_head_init(&conn->collect_queue);
1762 skb_queue_head_init(&conn->commit_queue);
1763 spin_lock_init(&conn->collect_lock);
1764 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1765 conn->netdev = dev;
1766
1767 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1768 if (!conn->rx_buff)
1769 goto out_conn;
1770 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1771 if (!conn->tx_buff)
1772 goto out_rx;
1773 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1774 conn_event_names, NR_CONN_STATES,
1775 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1776 GFP_KERNEL);
1777 if (!conn->fsm)
1778 goto out_tx;
1779
1780 fsm_settimer(conn->fsm, &conn->timer);
1781 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1782
1783 if (userdata)
1784 memcpy(conn->userdata, userdata, 17);
1785 if (username) {
1786 memcpy(conn->userid, username, 9);
1787 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1788 }
1789
1790 write_lock_bh(&iucv_connection_rwlock);
1791 list_add_tail(&conn->list, &iucv_connection_list);
1792 write_unlock_bh(&iucv_connection_rwlock);
1793 return conn;
1794
1795out_tx:
1796 kfree_skb(conn->tx_buff);
1797out_rx:
1798 kfree_skb(conn->rx_buff);
1799out_conn:
1800 kfree(conn);
1801out:
1802 return NULL;
1803}
1804
1805
1806
1807
1808
1809static void netiucv_remove_connection(struct iucv_connection *conn)
1810{
1811
1812 IUCV_DBF_TEXT(trace, 3, __func__);
1813 write_lock_bh(&iucv_connection_rwlock);
1814 list_del_init(&conn->list);
1815 write_unlock_bh(&iucv_connection_rwlock);
1816 fsm_deltimer(&conn->timer);
1817 netiucv_purge_skb_queue(&conn->collect_queue);
1818 if (conn->path) {
1819 iucv_path_sever(conn->path, conn->userdata);
1820 kfree(conn->path);
1821 conn->path = NULL;
1822 }
1823 netiucv_purge_skb_queue(&conn->commit_queue);
1824 kfree_fsm(conn->fsm);
1825 kfree_skb(conn->rx_buff);
1826 kfree_skb(conn->tx_buff);
1827}
1828
1829
1830
1831
1832static void netiucv_free_netdevice(struct net_device *dev)
1833{
1834 struct netiucv_priv *privptr = netdev_priv(dev);
1835
1836 IUCV_DBF_TEXT(trace, 3, __func__);
1837
1838 if (!dev)
1839 return;
1840
1841 if (privptr) {
1842 if (privptr->conn)
1843 netiucv_remove_connection(privptr->conn);
1844 if (privptr->fsm)
1845 kfree_fsm(privptr->fsm);
1846 privptr->conn = NULL; privptr->fsm = NULL;
1847
1848 }
1849}
1850
1851
1852
1853
1854static const struct net_device_ops netiucv_netdev_ops = {
1855 .ndo_open = netiucv_open,
1856 .ndo_stop = netiucv_close,
1857 .ndo_get_stats = netiucv_stats,
1858 .ndo_start_xmit = netiucv_tx,
1859};
1860
1861static void netiucv_setup_netdevice(struct net_device *dev)
1862{
1863 dev->mtu = NETIUCV_MTU_DEFAULT;
1864 dev->min_mtu = 576;
1865 dev->max_mtu = NETIUCV_MTU_MAX;
1866 dev->needs_free_netdev = true;
1867 dev->priv_destructor = netiucv_free_netdevice;
1868 dev->hard_header_len = NETIUCV_HDRLEN;
1869 dev->addr_len = 0;
1870 dev->type = ARPHRD_SLIP;
1871 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1872 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1873 dev->netdev_ops = &netiucv_netdev_ops;
1874}
1875
1876
1877
1878
1879static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
1880{
1881 struct netiucv_priv *privptr;
1882 struct net_device *dev;
1883
1884 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1885 NET_NAME_UNKNOWN, netiucv_setup_netdevice);
1886 if (!dev)
1887 return NULL;
1888 rtnl_lock();
1889 if (dev_alloc_name(dev, dev->name) < 0)
1890 goto out_netdev;
1891
1892 privptr = netdev_priv(dev);
1893 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1894 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1895 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1896 if (!privptr->fsm)
1897 goto out_netdev;
1898
1899 privptr->conn = netiucv_new_connection(dev, username, userdata);
1900 if (!privptr->conn) {
1901 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1902 goto out_fsm;
1903 }
1904 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1905 return dev;
1906
1907out_fsm:
1908 kfree_fsm(privptr->fsm);
1909out_netdev:
1910 rtnl_unlock();
1911 free_netdev(dev);
1912 return NULL;
1913}
1914
1915static ssize_t connection_store(struct device_driver *drv, const char *buf,
1916 size_t count)
1917{
1918 char username[9];
1919 char userdata[17];
1920 int rc;
1921 struct net_device *dev;
1922 struct netiucv_priv *priv;
1923 struct iucv_connection *cp;
1924
1925 IUCV_DBF_TEXT(trace, 3, __func__);
1926 rc = netiucv_check_user(buf, count, username, userdata);
1927 if (rc)
1928 return rc;
1929
1930 read_lock_bh(&iucv_connection_rwlock);
1931 list_for_each_entry(cp, &iucv_connection_list, list) {
1932 if (!strncmp(username, cp->userid, 9) &&
1933 !strncmp(userdata, cp->userdata, 17)) {
1934 read_unlock_bh(&iucv_connection_rwlock);
1935 IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
1936 "already exists\n", netiucv_printuser(cp));
1937 return -EEXIST;
1938 }
1939 }
1940 read_unlock_bh(&iucv_connection_rwlock);
1941
1942 dev = netiucv_init_netdevice(username, userdata);
1943 if (!dev) {
1944 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
1945 return -ENODEV;
1946 }
1947
1948 rc = netiucv_register_device(dev);
1949 if (rc) {
1950 rtnl_unlock();
1951 IUCV_DBF_TEXT_(setup, 2,
1952 "ret %d from netiucv_register_device\n", rc);
1953 goto out_free_ndev;
1954 }
1955
1956
1957 priv = netdev_priv(dev);
1958 SET_NETDEV_DEV(dev, priv->dev);
1959
1960 rc = register_netdevice(dev);
1961 rtnl_unlock();
1962 if (rc)
1963 goto out_unreg;
1964
1965 dev_info(priv->dev, "The IUCV interface to %s has been established "
1966 "successfully\n",
1967 netiucv_printuser(priv->conn));
1968
1969 return count;
1970
1971out_unreg:
1972 netiucv_unregister_device(priv->dev);
1973out_free_ndev:
1974 netiucv_free_netdevice(dev);
1975 return rc;
1976}
1977static DRIVER_ATTR_WO(connection);
1978
1979static ssize_t remove_store(struct device_driver *drv, const char *buf,
1980 size_t count)
1981{
1982 struct iucv_connection *cp;
1983 struct net_device *ndev;
1984 struct netiucv_priv *priv;
1985 struct device *dev;
1986 char name[IFNAMSIZ];
1987 const char *p;
1988 int i;
1989
1990 IUCV_DBF_TEXT(trace, 3, __func__);
1991
1992 if (count >= IFNAMSIZ)
1993 count = IFNAMSIZ - 1;
1994
1995 for (i = 0, p = buf; i < count && *p; i++, p++) {
1996 if (*p == '\n' || *p == ' ')
1997
1998 break;
1999 name[i] = *p;
2000 }
2001 name[i] = '\0';
2002
2003 read_lock_bh(&iucv_connection_rwlock);
2004 list_for_each_entry(cp, &iucv_connection_list, list) {
2005 ndev = cp->netdev;
2006 priv = netdev_priv(ndev);
2007 dev = priv->dev;
2008 if (strncmp(name, ndev->name, count))
2009 continue;
2010 read_unlock_bh(&iucv_connection_rwlock);
2011 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2012 dev_warn(dev, "The IUCV device is connected"
2013 " to %s and cannot be removed\n",
2014 priv->conn->userid);
2015 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2016 return -EPERM;
2017 }
2018 unregister_netdev(ndev);
2019 netiucv_unregister_device(dev);
2020 return count;
2021 }
2022 read_unlock_bh(&iucv_connection_rwlock);
2023 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2024 return -EINVAL;
2025}
2026static DRIVER_ATTR_WO(remove);
2027
2028static struct attribute * netiucv_drv_attrs[] = {
2029 &driver_attr_connection.attr,
2030 &driver_attr_remove.attr,
2031 NULL,
2032};
2033
2034static struct attribute_group netiucv_drv_attr_group = {
2035 .attrs = netiucv_drv_attrs,
2036};
2037
2038static const struct attribute_group *netiucv_drv_attr_groups[] = {
2039 &netiucv_drv_attr_group,
2040 NULL,
2041};
2042
2043static void netiucv_banner(void)
2044{
2045 pr_info("driver initialized\n");
2046}
2047
2048static void __exit netiucv_exit(void)
2049{
2050 struct iucv_connection *cp;
2051 struct net_device *ndev;
2052 struct netiucv_priv *priv;
2053 struct device *dev;
2054
2055 IUCV_DBF_TEXT(trace, 3, __func__);
2056 while (!list_empty(&iucv_connection_list)) {
2057 cp = list_entry(iucv_connection_list.next,
2058 struct iucv_connection, list);
2059 ndev = cp->netdev;
2060 priv = netdev_priv(ndev);
2061 dev = priv->dev;
2062
2063 unregister_netdev(ndev);
2064 netiucv_unregister_device(dev);
2065 }
2066
2067 driver_unregister(&netiucv_driver);
2068 iucv_unregister(&netiucv_handler, 1);
2069 iucv_unregister_dbf_views();
2070
2071 pr_info("driver unloaded\n");
2072 return;
2073}
2074
2075static int __init netiucv_init(void)
2076{
2077 int rc;
2078
2079 rc = iucv_register_dbf_views();
2080 if (rc)
2081 goto out;
2082 rc = iucv_register(&netiucv_handler, 1);
2083 if (rc)
2084 goto out_dbf;
2085 IUCV_DBF_TEXT(trace, 3, __func__);
2086 netiucv_driver.groups = netiucv_drv_attr_groups;
2087 rc = driver_register(&netiucv_driver);
2088 if (rc) {
2089 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2090 goto out_iucv;
2091 }
2092
2093 netiucv_banner();
2094 return rc;
2095
2096out_iucv:
2097 iucv_unregister(&netiucv_handler, 1);
2098out_dbf:
2099 iucv_unregister_dbf_views();
2100out:
2101 return rc;
2102}
2103
2104module_init(netiucv_init);
2105module_exit(netiucv_exit);
2106MODULE_LICENSE("GPL");
2107