1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#undef DEBUG
19#undef DEBUGDATA
20#undef DEBUGCCW
21
22#define KMSG_COMPONENT "ctcm"
23#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/errno.h>
30#include <linux/types.h>
31#include <linux/interrupt.h>
32#include <linux/timer.h>
33#include <linux/bitops.h>
34
35#include <linux/signal.h>
36#include <linux/string.h>
37
38#include <linux/ip.h>
39#include <linux/if_arp.h>
40#include <linux/tcp.h>
41#include <linux/skbuff.h>
42#include <linux/ctype.h>
43#include <net/dst.h>
44
45#include <linux/io.h>
46#include <asm/ccwdev.h>
47#include <asm/ccwgroup.h>
48#include <linux/uaccess.h>
49
50#include <asm/idals.h>
51
52#include "ctcm_fsms.h"
53#include "ctcm_main.h"
54
55
56
57
58
59
60static struct device *ctcm_root_dev;
61
62
63
64
65struct channel *channels;
66
67
68
69
70
71
72
73
74void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
75{
76 struct net_device *dev = ch->netdev;
77 struct ctcm_priv *priv = dev->ml_priv;
78 __u16 len = *((__u16 *) pskb->data);
79
80 skb_put(pskb, 2 + LL_HEADER_LENGTH);
81 skb_pull(pskb, 2);
82 pskb->dev = dev;
83 pskb->ip_summed = CHECKSUM_UNNECESSARY;
84 while (len > 0) {
85 struct sk_buff *skb;
86 int skblen;
87 struct ll_header *header = (struct ll_header *)pskb->data;
88
89 skb_pull(pskb, LL_HEADER_LENGTH);
90 if ((ch->protocol == CTCM_PROTO_S390) &&
91 (header->type != ETH_P_IP)) {
92 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
93 ch->logflags |= LOG_FLAG_ILLEGALPKT;
94
95
96
97
98
99
100 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
101 "%s(%s): Illegal packet type 0x%04x"
102 " - dropping",
103 CTCM_FUNTAIL, dev->name, header->type);
104 }
105 priv->stats.rx_dropped++;
106 priv->stats.rx_frame_errors++;
107 return;
108 }
109 pskb->protocol = ntohs(header->type);
110 if ((header->length <= LL_HEADER_LENGTH) ||
111 (len <= LL_HEADER_LENGTH)) {
112 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
113 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
114 "%s(%s): Illegal packet size %d(%d,%d)"
115 "- dropping",
116 CTCM_FUNTAIL, dev->name,
117 header->length, dev->mtu, len);
118 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
119 }
120
121 priv->stats.rx_dropped++;
122 priv->stats.rx_length_errors++;
123 return;
124 }
125 header->length -= LL_HEADER_LENGTH;
126 len -= LL_HEADER_LENGTH;
127 if ((header->length > skb_tailroom(pskb)) ||
128 (header->length > len)) {
129 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
130 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
131 "%s(%s): Packet size %d (overrun)"
132 " - dropping", CTCM_FUNTAIL,
133 dev->name, header->length);
134 ch->logflags |= LOG_FLAG_OVERRUN;
135 }
136
137 priv->stats.rx_dropped++;
138 priv->stats.rx_length_errors++;
139 return;
140 }
141 skb_put(pskb, header->length);
142 skb_reset_mac_header(pskb);
143 len -= header->length;
144 skb = dev_alloc_skb(pskb->len);
145 if (!skb) {
146 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
147 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
148 "%s(%s): MEMORY allocation error",
149 CTCM_FUNTAIL, dev->name);
150 ch->logflags |= LOG_FLAG_NOMEM;
151 }
152 priv->stats.rx_dropped++;
153 return;
154 }
155 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
156 pskb->len);
157 skb_reset_mac_header(skb);
158 skb->dev = pskb->dev;
159 skb->protocol = pskb->protocol;
160 pskb->ip_summed = CHECKSUM_UNNECESSARY;
161 skblen = skb->len;
162
163
164
165 ch->logflags = 0;
166 priv->stats.rx_packets++;
167 priv->stats.rx_bytes += skblen;
168 netif_rx_ni(skb);
169 if (len > 0) {
170 skb_pull(pskb, header->length);
171 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
172 CTCM_DBF_DEV_NAME(TRACE, dev,
173 "Overrun in ctcm_unpack_skb");
174 ch->logflags |= LOG_FLAG_OVERRUN;
175 return;
176 }
177 skb_put(pskb, LL_HEADER_LENGTH);
178 }
179 }
180}
181
182
183
184
185
186
187static void channel_free(struct channel *ch)
188{
189 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s)", CTCM_FUNTAIL, ch->id);
190 ch->flags &= ~CHANNEL_FLAGS_INUSE;
191 fsm_newstate(ch->fsm, CTC_STATE_IDLE);
192}
193
194
195
196
197
198
199static void channel_remove(struct channel *ch)
200{
201 struct channel **c = &channels;
202 char chid[CTCM_ID_SIZE+1];
203 int ok = 0;
204
205 if (ch == NULL)
206 return;
207 else
208 strncpy(chid, ch->id, CTCM_ID_SIZE);
209
210 channel_free(ch);
211 while (*c) {
212 if (*c == ch) {
213 *c = ch->next;
214 fsm_deltimer(&ch->timer);
215 if (IS_MPC(ch))
216 fsm_deltimer(&ch->sweep_timer);
217
218 kfree_fsm(ch->fsm);
219 clear_normalized_cda(&ch->ccw[4]);
220 if (ch->trans_skb != NULL) {
221 clear_normalized_cda(&ch->ccw[1]);
222 dev_kfree_skb_any(ch->trans_skb);
223 }
224 if (IS_MPC(ch)) {
225 tasklet_kill(&ch->ch_tasklet);
226 tasklet_kill(&ch->ch_disc_tasklet);
227 kfree(ch->discontact_th);
228 }
229 kfree(ch->ccw);
230 kfree(ch->irb);
231 kfree(ch);
232 ok = 1;
233 break;
234 }
235 c = &((*c)->next);
236 }
237
238 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s) %s", CTCM_FUNTAIL,
239 chid, ok ? "OK" : "failed");
240}
241
242
243
244
245
246
247
248
249
250
251static struct channel *channel_get(enum ctcm_channel_types type,
252 char *id, int direction)
253{
254 struct channel *ch = channels;
255
256 while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type)))
257 ch = ch->next;
258 if (!ch) {
259 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
260 "%s(%d, %s, %d) not found in channel list\n",
261 CTCM_FUNTAIL, type, id, direction);
262 } else {
263 if (ch->flags & CHANNEL_FLAGS_INUSE)
264 ch = NULL;
265 else {
266 ch->flags |= CHANNEL_FLAGS_INUSE;
267 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
268 ch->flags |= (direction == CTCM_WRITE)
269 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
270 fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
271 }
272 }
273 return ch;
274}
275
276static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
277{
278 if (!IS_ERR(irb))
279 return 0;
280
281 CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN,
282 "irb error %ld on device %s\n",
283 PTR_ERR(irb), dev_name(&cdev->dev));
284
285 switch (PTR_ERR(irb)) {
286 case -EIO:
287 dev_err(&cdev->dev,
288 "An I/O-error occurred on the CTCM device\n");
289 break;
290 case -ETIMEDOUT:
291 dev_err(&cdev->dev,
292 "An adapter hardware operation timed out\n");
293 break;
294 default:
295 dev_err(&cdev->dev,
296 "An error occurred on the adapter hardware\n");
297 }
298 return PTR_ERR(irb);
299}
300
301
302
303
304
305
306
307
308static inline void ccw_unit_check(struct channel *ch, __u8 sense)
309{
310 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
311 "%s(%s): %02x",
312 CTCM_FUNTAIL, ch->id, sense);
313
314 if (sense & SNS0_INTERVENTION_REQ) {
315 if (sense & 0x01) {
316 if (ch->sense_rc != 0x01) {
317 pr_notice(
318 "%s: The communication peer has "
319 "disconnected\n", ch->id);
320 ch->sense_rc = 0x01;
321 }
322 fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch);
323 } else {
324 if (ch->sense_rc != SNS0_INTERVENTION_REQ) {
325 pr_notice(
326 "%s: The remote operating system is "
327 "not available\n", ch->id);
328 ch->sense_rc = SNS0_INTERVENTION_REQ;
329 }
330 fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch);
331 }
332 } else if (sense & SNS0_EQUIPMENT_CHECK) {
333 if (sense & SNS0_BUS_OUT_CHECK) {
334 if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
335 CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
336 "%s(%s): remote HW error %02x",
337 CTCM_FUNTAIL, ch->id, sense);
338 ch->sense_rc = SNS0_BUS_OUT_CHECK;
339 }
340 fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch);
341 } else {
342 if (ch->sense_rc != SNS0_EQUIPMENT_CHECK) {
343 CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
344 "%s(%s): remote read parity error %02x",
345 CTCM_FUNTAIL, ch->id, sense);
346 ch->sense_rc = SNS0_EQUIPMENT_CHECK;
347 }
348 fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch);
349 }
350 } else if (sense & SNS0_BUS_OUT_CHECK) {
351 if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
352 CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
353 "%s(%s): BUS OUT error %02x",
354 CTCM_FUNTAIL, ch->id, sense);
355 ch->sense_rc = SNS0_BUS_OUT_CHECK;
356 }
357 if (sense & 0x04)
358 fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch);
359 else
360 fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch);
361 } else if (sense & SNS0_CMD_REJECT) {
362 if (ch->sense_rc != SNS0_CMD_REJECT) {
363 CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
364 "%s(%s): Command rejected",
365 CTCM_FUNTAIL, ch->id);
366 ch->sense_rc = SNS0_CMD_REJECT;
367 }
368 } else if (sense == 0) {
369 CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
370 "%s(%s): Unit check ZERO",
371 CTCM_FUNTAIL, ch->id);
372 fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch);
373 } else {
374 CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
375 "%s(%s): Unit check code %02x unknown",
376 CTCM_FUNTAIL, ch->id, sense);
377 fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch);
378 }
379}
380
381int ctcm_ch_alloc_buffer(struct channel *ch)
382{
383 clear_normalized_cda(&ch->ccw[1]);
384 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA);
385 if (ch->trans_skb == NULL) {
386 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
387 "%s(%s): %s trans_skb allocation error",
388 CTCM_FUNTAIL, ch->id,
389 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
390 "RX" : "TX");
391 return -ENOMEM;
392 }
393
394 ch->ccw[1].count = ch->max_bufsize;
395 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
396 dev_kfree_skb(ch->trans_skb);
397 ch->trans_skb = NULL;
398 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
399 "%s(%s): %s set norm_cda failed",
400 CTCM_FUNTAIL, ch->id,
401 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
402 "RX" : "TX");
403 return -ENOMEM;
404 }
405
406 ch->ccw[1].count = 0;
407 ch->trans_skb_data = ch->trans_skb->data;
408 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
409 return 0;
410}
411
412
413
414
415
416
417
418
419
420
421
422
423
424int ctcm_open(struct net_device *dev)
425{
426 struct ctcm_priv *priv = dev->ml_priv;
427
428 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
429 if (!IS_MPC(priv))
430 fsm_event(priv->fsm, DEV_EVENT_START, dev);
431 return 0;
432}
433
434
435
436
437
438
439
440
441
442int ctcm_close(struct net_device *dev)
443{
444 struct ctcm_priv *priv = dev->ml_priv;
445
446 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
447 if (!IS_MPC(priv))
448 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
449 return 0;
450}
451
452
453
454
455
456
457
458
459
460
461
462
463
464static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
465{
466 unsigned long saveflags;
467 struct ll_header header;
468 int rc = 0;
469 __u16 block_len;
470 int ccw_idx;
471 struct sk_buff *nskb;
472 unsigned long hi;
473
474
475
476
477
478 spin_lock_irqsave(&ch->collect_lock, saveflags);
479 if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
480 int l = skb->len + LL_HEADER_LENGTH;
481
482 if (ch->collect_len + l > ch->max_bufsize - 2) {
483 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
484 return -EBUSY;
485 } else {
486 atomic_inc(&skb->users);
487 header.length = l;
488 header.type = skb->protocol;
489 header.unused = 0;
490 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
491 LL_HEADER_LENGTH);
492 skb_queue_tail(&ch->collect_queue, skb);
493 ch->collect_len += l;
494 }
495 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
496 goto done;
497 }
498 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
499
500
501
502
503 atomic_inc(&skb->users);
504 ch->prof.txlen += skb->len;
505 header.length = skb->len + LL_HEADER_LENGTH;
506 header.type = skb->protocol;
507 header.unused = 0;
508 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
509 block_len = skb->len + 2;
510 *((__u16 *)skb_push(skb, 2)) = block_len;
511
512
513
514
515
516 hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
517 if (hi) {
518 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
519 if (!nskb) {
520 atomic_dec(&skb->users);
521 skb_pull(skb, LL_HEADER_LENGTH + 2);
522 ctcm_clear_busy(ch->netdev);
523 return -ENOMEM;
524 } else {
525 memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
526 atomic_inc(&nskb->users);
527 atomic_dec(&skb->users);
528 dev_kfree_skb_irq(skb);
529 skb = nskb;
530 }
531 }
532
533 ch->ccw[4].count = block_len;
534 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
535
536
537
538
539
540 if (ctcm_checkalloc_buffer(ch)) {
541
542
543
544
545 atomic_dec(&skb->users);
546 skb_pull(skb, LL_HEADER_LENGTH + 2);
547 ctcm_clear_busy(ch->netdev);
548 return -ENOMEM;
549 }
550
551 skb_reset_tail_pointer(ch->trans_skb);
552 ch->trans_skb->len = 0;
553 ch->ccw[1].count = skb->len;
554 skb_copy_from_linear_data(skb,
555 skb_put(ch->trans_skb, skb->len), skb->len);
556 atomic_dec(&skb->users);
557 dev_kfree_skb_irq(skb);
558 ccw_idx = 0;
559 } else {
560 skb_queue_tail(&ch->io_queue, skb);
561 ccw_idx = 3;
562 }
563 if (do_debug_ccw)
564 ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
565 sizeof(struct ccw1) * 3);
566 ch->retry = 0;
567 fsm_newstate(ch->fsm, CTC_STATE_TX);
568 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
569 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
570 ch->prof.send_stamp = current_kernel_time();
571 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
572 (unsigned long)ch, 0xff, 0);
573 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
574 if (ccw_idx == 3)
575 ch->prof.doios_single++;
576 if (rc != 0) {
577 fsm_deltimer(&ch->timer);
578 ctcm_ccw_check_rc(ch, rc, "single skb TX");
579 if (ccw_idx == 3)
580 skb_dequeue_tail(&ch->io_queue);
581
582
583
584
585 skb_pull(skb, LL_HEADER_LENGTH + 2);
586 } else if (ccw_idx == 0) {
587 struct net_device *dev = ch->netdev;
588 struct ctcm_priv *priv = dev->ml_priv;
589 priv->stats.tx_packets++;
590 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
591 }
592done:
593 ctcm_clear_busy(ch->netdev);
594 return rc;
595}
596
597static void ctcmpc_send_sweep_req(struct channel *rch)
598{
599 struct net_device *dev = rch->netdev;
600 struct ctcm_priv *priv;
601 struct mpc_group *grp;
602 struct th_sweep *header;
603 struct sk_buff *sweep_skb;
604 struct channel *ch;
605
606
607 priv = dev->ml_priv;
608 grp = priv->mpcg;
609 ch = priv->channel[CTCM_WRITE];
610
611
612
613 if (grp->in_sweep == 0) {
614 grp->in_sweep = 1;
615 grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
616 grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
617 }
618
619 sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
620
621 if (sweep_skb == NULL) {
622
623 goto nomem;
624 }
625
626 header = kmalloc(TH_SWEEP_LENGTH, gfp_type());
627
628 if (!header) {
629 dev_kfree_skb_any(sweep_skb);
630
631 goto nomem;
632 }
633
634 header->th.th_seg = 0x00 ;
635 header->th.th_ch_flag = TH_SWEEP_REQ;
636 header->th.th_blk_flag = 0x00;
637 header->th.th_is_xid = 0x00;
638 header->th.th_seq_num = 0x00;
639 header->sw.th_last_seq = ch->th_seq_num;
640
641 memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
642
643 kfree(header);
644
645 dev->trans_start = jiffies;
646 skb_queue_tail(&ch->sweep_queue, sweep_skb);
647
648 fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
649
650 return;
651
652nomem:
653 grp->in_sweep = 0;
654 ctcm_clear_busy(dev);
655 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
656
657 return;
658}
659
660
661
662
663static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
664{
665 struct pdu *p_header;
666 struct net_device *dev = ch->netdev;
667 struct ctcm_priv *priv = dev->ml_priv;
668 struct mpc_group *grp = priv->mpcg;
669 struct th_header *header;
670 struct sk_buff *nskb;
671 int rc = 0;
672 int ccw_idx;
673 unsigned long hi;
674 unsigned long saveflags = 0;
675
676 CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n",
677 __func__, dev->name, smp_processor_id(), ch,
678 ch->id, fsm_getstate_str(ch->fsm));
679
680 if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
681 spin_lock_irqsave(&ch->collect_lock, saveflags);
682 atomic_inc(&skb->users);
683 p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
684
685 if (!p_header) {
686 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
687 goto nomem_exit;
688 }
689
690 p_header->pdu_offset = skb->len;
691 p_header->pdu_proto = 0x01;
692 p_header->pdu_flag = 0x00;
693 if (skb->protocol == ntohs(ETH_P_SNAP)) {
694 p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
695 } else {
696 p_header->pdu_flag |= PDU_FIRST;
697 }
698 p_header->pdu_seq = 0;
699 memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header,
700 PDU_HEADER_LENGTH);
701
702 CTCM_PR_DEBUG("%s(%s): Put on collect_q - skb len: %04x \n"
703 "pdu header and data for up to 32 bytes:\n",
704 __func__, dev->name, skb->len);
705 CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
706
707 skb_queue_tail(&ch->collect_queue, skb);
708 ch->collect_len += skb->len;
709 kfree(p_header);
710
711 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
712 goto done;
713 }
714
715
716
717
718
719 atomic_inc(&skb->users);
720
721
722
723
724
725 hi = ((unsigned long)skb->tail + TH_HEADER_LENGTH) >> 31;
726 if (hi) {
727 nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
728 if (!nskb) {
729 goto nomem_exit;
730 } else {
731 memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
732 atomic_inc(&nskb->users);
733 atomic_dec(&skb->users);
734 dev_kfree_skb_irq(skb);
735 skb = nskb;
736 }
737 }
738
739 p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
740
741 if (!p_header)
742 goto nomem_exit;
743
744 p_header->pdu_offset = skb->len;
745 p_header->pdu_proto = 0x01;
746 p_header->pdu_flag = 0x00;
747 p_header->pdu_seq = 0;
748 if (skb->protocol == ntohs(ETH_P_SNAP)) {
749 p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
750 } else {
751 p_header->pdu_flag |= PDU_FIRST;
752 }
753 memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, PDU_HEADER_LENGTH);
754
755 kfree(p_header);
756
757 if (ch->collect_len > 0) {
758 spin_lock_irqsave(&ch->collect_lock, saveflags);
759 skb_queue_tail(&ch->collect_queue, skb);
760 ch->collect_len += skb->len;
761 skb = skb_dequeue(&ch->collect_queue);
762 ch->collect_len -= skb->len;
763 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
764 }
765
766 p_header = (struct pdu *)skb->data;
767 p_header->pdu_flag |= PDU_LAST;
768
769 ch->prof.txlen += skb->len - PDU_HEADER_LENGTH;
770
771 header = kmalloc(TH_HEADER_LENGTH, gfp_type());
772 if (!header)
773 goto nomem_exit;
774
775 header->th_seg = 0x00;
776 header->th_ch_flag = TH_HAS_PDU;
777 header->th_blk_flag = 0x00;
778 header->th_is_xid = 0x00;
779 ch->th_seq_num++;
780 header->th_seq_num = ch->th_seq_num;
781
782 CTCM_PR_DBGDATA("%s(%s) ToVTAM_th_seq= %08x\n" ,
783 __func__, dev->name, ch->th_seq_num);
784
785
786 memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH);
787
788 kfree(header);
789
790 CTCM_PR_DBGDATA("%s(%s): skb len: %04x\n - pdu header and data for "
791 "up to 32 bytes sent to vtam:\n",
792 __func__, dev->name, skb->len);
793 CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
794
795 ch->ccw[4].count = skb->len;
796 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
797
798
799
800
801 if (ctcm_checkalloc_buffer(ch)) {
802
803
804
805
806 goto nomem_exit;
807 }
808
809 skb_reset_tail_pointer(ch->trans_skb);
810 ch->trans_skb->len = 0;
811 ch->ccw[1].count = skb->len;
812 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
813 atomic_dec(&skb->users);
814 dev_kfree_skb_irq(skb);
815 ccw_idx = 0;
816 CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
817 "up to 32 bytes sent to vtam:\n",
818 __func__, dev->name, ch->trans_skb->len);
819 CTCM_D3_DUMP((char *)ch->trans_skb->data,
820 min_t(int, 32, ch->trans_skb->len));
821 } else {
822 skb_queue_tail(&ch->io_queue, skb);
823 ccw_idx = 3;
824 }
825 ch->retry = 0;
826 fsm_newstate(ch->fsm, CTC_STATE_TX);
827 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
828
829 if (do_debug_ccw)
830 ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
831 sizeof(struct ccw1) * 3);
832
833 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
834 ch->prof.send_stamp = current_kernel_time();
835 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
836 (unsigned long)ch, 0xff, 0);
837 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
838 if (ccw_idx == 3)
839 ch->prof.doios_single++;
840 if (rc != 0) {
841 fsm_deltimer(&ch->timer);
842 ctcm_ccw_check_rc(ch, rc, "single skb TX");
843 if (ccw_idx == 3)
844 skb_dequeue_tail(&ch->io_queue);
845 } else if (ccw_idx == 0) {
846 priv->stats.tx_packets++;
847 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
848 }
849 if (ch->th_seq_num > 0xf0000000)
850 ctcmpc_send_sweep_req(ch);
851
852 goto done;
853nomem_exit:
854 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_CRIT,
855 "%s(%s): MEMORY allocation ERROR\n",
856 CTCM_FUNTAIL, ch->id);
857 rc = -ENOMEM;
858 atomic_dec(&skb->users);
859 dev_kfree_skb_any(skb);
860 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
861done:
862 CTCM_PR_DEBUG("Exit %s(%s)\n", __func__, dev->name);
863 return rc;
864}
865
866
867
868
869
870
871
872
873
874
875
876
877
878static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
879{
880 struct ctcm_priv *priv = dev->ml_priv;
881
882 if (skb == NULL) {
883 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
884 "%s(%s): NULL sk_buff passed",
885 CTCM_FUNTAIL, dev->name);
886 priv->stats.tx_dropped++;
887 return NETDEV_TX_OK;
888 }
889 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
890 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
891 "%s(%s): Got sk_buff with head room < %ld bytes",
892 CTCM_FUNTAIL, dev->name, LL_HEADER_LENGTH + 2);
893 dev_kfree_skb(skb);
894 priv->stats.tx_dropped++;
895 return NETDEV_TX_OK;
896 }
897
898
899
900
901
902 if (fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) {
903 fsm_event(priv->fsm, DEV_EVENT_START, dev);
904 dev_kfree_skb(skb);
905 priv->stats.tx_dropped++;
906 priv->stats.tx_errors++;
907 priv->stats.tx_carrier_errors++;
908 return NETDEV_TX_OK;
909 }
910
911 if (ctcm_test_and_set_busy(dev))
912 return NETDEV_TX_BUSY;
913
914 dev->trans_start = jiffies;
915 if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
916 return NETDEV_TX_BUSY;
917 return NETDEV_TX_OK;
918}
919
920
921static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
922{
923 int len = 0;
924 struct ctcm_priv *priv = dev->ml_priv;
925 struct mpc_group *grp = priv->mpcg;
926 struct sk_buff *newskb = NULL;
927
928
929
930
931 if (skb == NULL) {
932 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
933 "%s(%s): NULL sk_buff passed",
934 CTCM_FUNTAIL, dev->name);
935 priv->stats.tx_dropped++;
936 goto done;
937 }
938 if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) {
939 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
940 "%s(%s): Got sk_buff with head room < %ld bytes",
941 CTCM_FUNTAIL, dev->name,
942 TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
943
944 CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
945
946 len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
947 newskb = __dev_alloc_skb(len, gfp_type() | GFP_DMA);
948
949 if (!newskb) {
950 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
951 "%s: %s: __dev_alloc_skb failed",
952 __func__, dev->name);
953
954 dev_kfree_skb_any(skb);
955 priv->stats.tx_dropped++;
956 priv->stats.tx_errors++;
957 priv->stats.tx_carrier_errors++;
958 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
959 goto done;
960 }
961 newskb->protocol = skb->protocol;
962 skb_reserve(newskb, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
963 memcpy(skb_put(newskb, skb->len), skb->data, skb->len);
964 dev_kfree_skb_any(skb);
965 skb = newskb;
966 }
967
968
969
970
971
972
973 if ((fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) ||
974 (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
975 dev_kfree_skb_any(skb);
976 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
977 "%s(%s): inactive MPCGROUP - dropped",
978 CTCM_FUNTAIL, dev->name);
979 priv->stats.tx_dropped++;
980 priv->stats.tx_errors++;
981 priv->stats.tx_carrier_errors++;
982 goto done;
983 }
984
985 if (ctcm_test_and_set_busy(dev)) {
986 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
987 "%s(%s): device busy - dropped",
988 CTCM_FUNTAIL, dev->name);
989 dev_kfree_skb_any(skb);
990 priv->stats.tx_dropped++;
991 priv->stats.tx_errors++;
992 priv->stats.tx_carrier_errors++;
993 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
994 goto done;
995 }
996
997 dev->trans_start = jiffies;
998 if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
999 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1000 "%s(%s): device error - dropped",
1001 CTCM_FUNTAIL, dev->name);
1002 dev_kfree_skb_any(skb);
1003 priv->stats.tx_dropped++;
1004 priv->stats.tx_errors++;
1005 priv->stats.tx_carrier_errors++;
1006 ctcm_clear_busy(dev);
1007 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1008 goto done;
1009 }
1010 ctcm_clear_busy(dev);
1011done:
1012 if (do_debug)
1013 MPC_DBF_DEV_NAME(TRACE, dev, "exit");
1014
1015 return NETDEV_TX_OK;
1016}
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
1031{
1032 struct ctcm_priv *priv;
1033 int max_bufsize;
1034
1035 if (new_mtu < 576 || new_mtu > 65527)
1036 return -EINVAL;
1037
1038 priv = dev->ml_priv;
1039 max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
1040
1041 if (IS_MPC(priv)) {
1042 if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
1043 return -EINVAL;
1044 dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
1045 } else {
1046 if (new_mtu > max_bufsize - LL_HEADER_LENGTH - 2)
1047 return -EINVAL;
1048 dev->hard_header_len = LL_HEADER_LENGTH + 2;
1049 }
1050 dev->mtu = new_mtu;
1051 return 0;
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061static struct net_device_stats *ctcm_stats(struct net_device *dev)
1062{
1063 return &((struct ctcm_priv *)dev->ml_priv)->stats;
1064}
1065
1066static void ctcm_free_netdevice(struct net_device *dev)
1067{
1068 struct ctcm_priv *priv;
1069 struct mpc_group *grp;
1070
1071 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1072 "%s(%s)", CTCM_FUNTAIL, dev->name);
1073 priv = dev->ml_priv;
1074 if (priv) {
1075 grp = priv->mpcg;
1076 if (grp) {
1077 if (grp->fsm)
1078 kfree_fsm(grp->fsm);
1079 if (grp->xid_skb)
1080 dev_kfree_skb(grp->xid_skb);
1081 if (grp->rcvd_xid_skb)
1082 dev_kfree_skb(grp->rcvd_xid_skb);
1083 tasklet_kill(&grp->mpc_tasklet2);
1084 kfree(grp);
1085 priv->mpcg = NULL;
1086 }
1087 if (priv->fsm) {
1088 kfree_fsm(priv->fsm);
1089 priv->fsm = NULL;
1090 }
1091 kfree(priv->xid);
1092 priv->xid = NULL;
1093
1094
1095
1096
1097 }
1098#ifdef MODULE
1099 free_netdev(dev);
1100#endif
1101}
1102
1103struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
1104
1105static const struct net_device_ops ctcm_netdev_ops = {
1106 .ndo_open = ctcm_open,
1107 .ndo_stop = ctcm_close,
1108 .ndo_get_stats = ctcm_stats,
1109 .ndo_change_mtu = ctcm_change_mtu,
1110 .ndo_start_xmit = ctcm_tx,
1111};
1112
1113static const struct net_device_ops ctcm_mpc_netdev_ops = {
1114 .ndo_open = ctcm_open,
1115 .ndo_stop = ctcm_close,
1116 .ndo_get_stats = ctcm_stats,
1117 .ndo_change_mtu = ctcm_change_mtu,
1118 .ndo_start_xmit = ctcmpc_tx,
1119};
1120
1121void static ctcm_dev_setup(struct net_device *dev)
1122{
1123 dev->type = ARPHRD_SLIP;
1124 dev->tx_queue_len = 100;
1125 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1126}
1127
1128
1129
1130
1131
1132static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
1133{
1134 struct net_device *dev;
1135 struct mpc_group *grp;
1136 if (!priv)
1137 return NULL;
1138
1139 if (IS_MPC(priv))
1140 dev = alloc_netdev(0, MPC_DEVICE_GENE, ctcm_dev_setup);
1141 else
1142 dev = alloc_netdev(0, CTC_DEVICE_GENE, ctcm_dev_setup);
1143
1144 if (!dev) {
1145 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
1146 "%s: MEMORY allocation ERROR",
1147 CTCM_FUNTAIL);
1148 return NULL;
1149 }
1150 dev->ml_priv = priv;
1151 priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names,
1152 CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS,
1153 dev_fsm, dev_fsm_len, GFP_KERNEL);
1154 if (priv->fsm == NULL) {
1155 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
1156 free_netdev(dev);
1157 return NULL;
1158 }
1159 fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
1160 fsm_settimer(priv->fsm, &priv->restart_timer);
1161
1162 if (IS_MPC(priv)) {
1163
1164 grp = ctcmpc_init_mpc_group(priv);
1165 if (grp == NULL) {
1166 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
1167 free_netdev(dev);
1168 return NULL;
1169 }
1170 tasklet_init(&grp->mpc_tasklet2,
1171 mpc_group_ready, (unsigned long)dev);
1172 dev->mtu = MPC_BUFSIZE_DEFAULT -
1173 TH_HEADER_LENGTH - PDU_HEADER_LENGTH;
1174
1175 dev->netdev_ops = &ctcm_mpc_netdev_ops;
1176 dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
1177 priv->buffer_size = MPC_BUFSIZE_DEFAULT;
1178 } else {
1179 dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
1180 dev->netdev_ops = &ctcm_netdev_ops;
1181 dev->hard_header_len = LL_HEADER_LENGTH + 2;
1182 }
1183
1184 CTCMY_DBF_DEV(SETUP, dev, "finished");
1185
1186 return dev;
1187}
1188
1189
1190
1191
1192
1193
1194
1195
1196static void ctcm_irq_handler(struct ccw_device *cdev,
1197 unsigned long intparm, struct irb *irb)
1198{
1199 struct channel *ch;
1200 struct net_device *dev;
1201 struct ctcm_priv *priv;
1202 struct ccwgroup_device *cgdev;
1203 int cstat;
1204 int dstat;
1205
1206 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1207 "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
1208
1209 if (ctcm_check_irb_error(cdev, irb))
1210 return;
1211
1212 cgdev = dev_get_drvdata(&cdev->dev);
1213
1214 cstat = irb->scsw.cmd.cstat;
1215 dstat = irb->scsw.cmd.dstat;
1216
1217
1218 if (cgdev == NULL) {
1219 CTCM_DBF_TEXT_(TRACE, CTC_DBF_ERROR,
1220 "%s(%s) unsolicited irq: c-%02x d-%02x\n",
1221 CTCM_FUNTAIL, dev_name(&cdev->dev), cstat, dstat);
1222 dev_warn(&cdev->dev,
1223 "The adapter received a non-specific IRQ\n");
1224 return;
1225 }
1226
1227 priv = dev_get_drvdata(&cgdev->dev);
1228
1229
1230 if (priv->channel[CTCM_READ]->cdev == cdev)
1231 ch = priv->channel[CTCM_READ];
1232 else if (priv->channel[CTCM_WRITE]->cdev == cdev)
1233 ch = priv->channel[CTCM_WRITE];
1234 else {
1235 dev_err(&cdev->dev,
1236 "%s: Internal error: Can't determine channel for "
1237 "interrupt device %s\n",
1238 __func__, dev_name(&cdev->dev));
1239
1240 return;
1241 }
1242
1243 dev = ch->netdev;
1244 if (dev == NULL) {
1245 dev_err(&cdev->dev,
1246 "%s Internal error: net_device is NULL, ch = 0x%p\n",
1247 __func__, ch);
1248
1249 return;
1250 }
1251
1252
1253 memcpy(ch->irb, irb, sizeof(struct irb));
1254
1255
1256 if (irb->scsw.cmd.cstat) {
1257 fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
1258 CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
1259 "%s(%s): sub-ch check %s: cs=%02x ds=%02x",
1260 CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat);
1261 dev_warn(&cdev->dev,
1262 "A check occurred on the subchannel\n");
1263 return;
1264 }
1265
1266
1267 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
1268 if ((irb->ecw[0] & ch->sense_rc) == 0)
1269
1270 CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
1271 "%s(%s): sense=%02x, ds=%02x",
1272 CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat);
1273 ccw_unit_check(ch, irb->ecw[0]);
1274 return;
1275 }
1276 if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) {
1277 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
1278 fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch);
1279 else
1280 fsm_event(ch->fsm, CTC_EVENT_BUSY, ch);
1281 return;
1282 }
1283 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
1284 fsm_event(ch->fsm, CTC_EVENT_ATTN, ch);
1285 return;
1286 }
1287 if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
1288 (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
1289 (irb->scsw.cmd.stctl ==
1290 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1291 fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch);
1292 else
1293 fsm_event(ch->fsm, CTC_EVENT_IRQ, ch);
1294
1295}
1296
1297static const struct device_type ctcm_devtype = {
1298 .name = "ctcm",
1299 .groups = ctcm_attr_groups,
1300};
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1311{
1312 struct ctcm_priv *priv;
1313
1314 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1315 "%s %p",
1316 __func__, cgdev);
1317
1318 if (!get_device(&cgdev->dev))
1319 return -ENODEV;
1320
1321 priv = kzalloc(sizeof(struct ctcm_priv), GFP_KERNEL);
1322 if (!priv) {
1323 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
1324 "%s: memory allocation failure",
1325 CTCM_FUNTAIL);
1326 put_device(&cgdev->dev);
1327 return -ENOMEM;
1328 }
1329 priv->buffer_size = CTCM_BUFSIZE_DEFAULT;
1330 cgdev->cdev[0]->handler = ctcm_irq_handler;
1331 cgdev->cdev[1]->handler = ctcm_irq_handler;
1332 dev_set_drvdata(&cgdev->dev, priv);
1333 cgdev->dev.type = &ctcm_devtype;
1334
1335 return 0;
1336}
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
1349 struct ctcm_priv *priv)
1350{
1351 struct channel **c = &channels;
1352 struct channel *ch;
1353 int ccw_num;
1354 int rc = 0;
1355
1356 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1357 "%s(%s), type %d, proto %d",
1358 __func__, dev_name(&cdev->dev), type, priv->protocol);
1359
1360 ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1361 if (ch == NULL)
1362 return -ENOMEM;
1363
1364 ch->protocol = priv->protocol;
1365 if (IS_MPC(priv)) {
1366 ch->discontact_th = kzalloc(TH_HEADER_LENGTH, gfp_type());
1367 if (ch->discontact_th == NULL)
1368 goto nomem_return;
1369
1370 ch->discontact_th->th_blk_flag = TH_DISCONTACT;
1371 tasklet_init(&ch->ch_disc_tasklet,
1372 mpc_action_send_discontact, (unsigned long)ch);
1373
1374 tasklet_init(&ch->ch_tasklet, ctcmpc_bh, (unsigned long)ch);
1375 ch->max_bufsize = (MPC_BUFSIZE_DEFAULT - 35);
1376 ccw_num = 17;
1377 } else
1378 ccw_num = 8;
1379
1380 ch->ccw = kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1381 if (ch->ccw == NULL)
1382 goto nomem_return;
1383
1384 ch->cdev = cdev;
1385 snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev));
1386 ch->type = type;
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1432 ch->ccw[6].flags = CCW_FLAG_SLI;
1433
1434 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1435 ch->ccw[7].flags = CCW_FLAG_SLI;
1436
1437 if (IS_MPC(priv)) {
1438 ch->ccw[15].cmd_code = CCW_CMD_WRITE;
1439 ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1440 ch->ccw[15].count = TH_HEADER_LENGTH;
1441 ch->ccw[15].cda = virt_to_phys(ch->discontact_th);
1442
1443 ch->ccw[16].cmd_code = CCW_CMD_NOOP;
1444 ch->ccw[16].flags = CCW_FLAG_SLI;
1445
1446 ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
1447 ctc_ch_event_names, CTC_MPC_NR_STATES,
1448 CTC_MPC_NR_EVENTS, ctcmpc_ch_fsm,
1449 mpc_ch_fsm_len, GFP_KERNEL);
1450 } else {
1451 ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
1452 ctc_ch_event_names, CTC_NR_STATES,
1453 CTC_NR_EVENTS, ch_fsm,
1454 ch_fsm_len, GFP_KERNEL);
1455 }
1456 if (ch->fsm == NULL)
1457 goto nomem_return;
1458
1459 fsm_newstate(ch->fsm, CTC_STATE_IDLE);
1460
1461 ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
1462 if (ch->irb == NULL)
1463 goto nomem_return;
1464
1465 while (*c && ctcm_less_than((*c)->id, ch->id))
1466 c = &(*c)->next;
1467
1468 if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) {
1469 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1470 "%s (%s) already in list, using old entry",
1471 __func__, (*c)->id);
1472
1473 goto free_return;
1474 }
1475
1476 spin_lock_init(&ch->collect_lock);
1477
1478 fsm_settimer(ch->fsm, &ch->timer);
1479 skb_queue_head_init(&ch->io_queue);
1480 skb_queue_head_init(&ch->collect_queue);
1481
1482 if (IS_MPC(priv)) {
1483 fsm_settimer(ch->fsm, &ch->sweep_timer);
1484 skb_queue_head_init(&ch->sweep_queue);
1485 }
1486 ch->next = *c;
1487 *c = ch;
1488 return 0;
1489
1490nomem_return:
1491 rc = -ENOMEM;
1492
1493free_return:
1494 kfree(ch->ccw);
1495 kfree(ch->discontact_th);
1496 kfree_fsm(ch->fsm);
1497 kfree(ch->irb);
1498 kfree(ch);
1499 return rc;
1500}
1501
1502
1503
1504
1505static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
1506{
1507 enum ctcm_channel_types type;
1508 type = (enum ctcm_channel_types)id->driver_info;
1509
1510 if (type == ctcm_channel_type_ficon)
1511 type = ctcm_channel_type_escon;
1512
1513 return type;
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524static int ctcm_new_device(struct ccwgroup_device *cgdev)
1525{
1526 char read_id[CTCM_ID_SIZE];
1527 char write_id[CTCM_ID_SIZE];
1528 int direction;
1529 enum ctcm_channel_types type;
1530 struct ctcm_priv *priv;
1531 struct net_device *dev;
1532 struct ccw_device *cdev0;
1533 struct ccw_device *cdev1;
1534 struct channel *readc;
1535 struct channel *writec;
1536 int ret;
1537 int result;
1538
1539 priv = dev_get_drvdata(&cgdev->dev);
1540 if (!priv) {
1541 result = -ENODEV;
1542 goto out_err_result;
1543 }
1544
1545 cdev0 = cgdev->cdev[0];
1546 cdev1 = cgdev->cdev[1];
1547
1548 type = get_channel_type(&cdev0->id);
1549
1550 snprintf(read_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev0->dev));
1551 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev));
1552
1553 ret = add_channel(cdev0, type, priv);
1554 if (ret) {
1555 result = ret;
1556 goto out_err_result;
1557 }
1558 ret = add_channel(cdev1, type, priv);
1559 if (ret) {
1560 result = ret;
1561 goto out_remove_channel1;
1562 }
1563
1564 ret = ccw_device_set_online(cdev0);
1565 if (ret != 0) {
1566 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
1567 "%s(%s) set_online rc=%d",
1568 CTCM_FUNTAIL, read_id, ret);
1569 result = -EIO;
1570 goto out_remove_channel2;
1571 }
1572
1573 ret = ccw_device_set_online(cdev1);
1574 if (ret != 0) {
1575 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
1576 "%s(%s) set_online rc=%d",
1577 CTCM_FUNTAIL, write_id, ret);
1578
1579 result = -EIO;
1580 goto out_ccw1;
1581 }
1582
1583 dev = ctcm_init_netdevice(priv);
1584 if (dev == NULL) {
1585 result = -ENODEV;
1586 goto out_ccw2;
1587 }
1588
1589 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
1590 priv->channel[direction] =
1591 channel_get(type, direction == CTCM_READ ?
1592 read_id : write_id, direction);
1593 if (priv->channel[direction] == NULL) {
1594 if (direction == CTCM_WRITE)
1595 channel_free(priv->channel[CTCM_READ]);
1596 goto out_dev;
1597 }
1598 priv->channel[direction]->netdev = dev;
1599 priv->channel[direction]->protocol = priv->protocol;
1600 priv->channel[direction]->max_bufsize = priv->buffer_size;
1601 }
1602
1603 SET_NETDEV_DEV(dev, &cgdev->dev);
1604
1605 if (register_netdev(dev)) {
1606 result = -ENODEV;
1607 goto out_dev;
1608 }
1609
1610 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
1611
1612 dev_info(&dev->dev,
1613 "setup OK : r/w = %s/%s, protocol : %d\n",
1614 priv->channel[CTCM_READ]->id,
1615 priv->channel[CTCM_WRITE]->id, priv->protocol);
1616
1617 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1618 "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
1619 priv->channel[CTCM_READ]->id,
1620 priv->channel[CTCM_WRITE]->id, priv->protocol);
1621
1622 return 0;
1623out_dev:
1624 ctcm_free_netdevice(dev);
1625out_ccw2:
1626 ccw_device_set_offline(cgdev->cdev[1]);
1627out_ccw1:
1628 ccw_device_set_offline(cgdev->cdev[0]);
1629out_remove_channel2:
1630 readc = channel_get(type, read_id, CTCM_READ);
1631 channel_remove(readc);
1632out_remove_channel1:
1633 writec = channel_get(type, write_id, CTCM_WRITE);
1634 channel_remove(writec);
1635out_err_result:
1636 return result;
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
1647{
1648 struct ctcm_priv *priv;
1649 struct net_device *dev;
1650
1651 priv = dev_get_drvdata(&cgdev->dev);
1652 if (!priv)
1653 return -ENODEV;
1654
1655 if (priv->channel[CTCM_READ]) {
1656 dev = priv->channel[CTCM_READ]->netdev;
1657 CTCM_DBF_DEV(SETUP, dev, "");
1658
1659 ctcm_close(dev);
1660 dev->flags &= ~IFF_RUNNING;
1661 channel_free(priv->channel[CTCM_READ]);
1662 } else
1663 dev = NULL;
1664
1665 if (priv->channel[CTCM_WRITE])
1666 channel_free(priv->channel[CTCM_WRITE]);
1667
1668 if (dev) {
1669 unregister_netdev(dev);
1670 ctcm_free_netdevice(dev);
1671 }
1672
1673 if (priv->fsm)
1674 kfree_fsm(priv->fsm);
1675
1676 ccw_device_set_offline(cgdev->cdev[1]);
1677 ccw_device_set_offline(cgdev->cdev[0]);
1678
1679 if (priv->channel[CTCM_READ])
1680 channel_remove(priv->channel[CTCM_READ]);
1681 if (priv->channel[CTCM_WRITE])
1682 channel_remove(priv->channel[CTCM_WRITE]);
1683 priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
1684
1685 return 0;
1686
1687}
1688
1689
1690static void ctcm_remove_device(struct ccwgroup_device *cgdev)
1691{
1692 struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev);
1693
1694 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1695 "removing device %p, proto : %d",
1696 cgdev, priv->protocol);
1697
1698 if (cgdev->state == CCWGROUP_ONLINE)
1699 ctcm_shutdown_device(cgdev);
1700 dev_set_drvdata(&cgdev->dev, NULL);
1701 kfree(priv);
1702 put_device(&cgdev->dev);
1703}
1704
1705static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
1706{
1707 struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
1708
1709 if (gdev->state == CCWGROUP_OFFLINE)
1710 return 0;
1711 netif_device_detach(priv->channel[CTCM_READ]->netdev);
1712 ctcm_close(priv->channel[CTCM_READ]->netdev);
1713 if (!wait_event_timeout(priv->fsm->wait_q,
1714 fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
1715 netif_device_attach(priv->channel[CTCM_READ]->netdev);
1716 return -EBUSY;
1717 }
1718 ccw_device_set_offline(gdev->cdev[1]);
1719 ccw_device_set_offline(gdev->cdev[0]);
1720 return 0;
1721}
1722
1723static int ctcm_pm_resume(struct ccwgroup_device *gdev)
1724{
1725 struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
1726 int rc;
1727
1728 if (gdev->state == CCWGROUP_OFFLINE)
1729 return 0;
1730 rc = ccw_device_set_online(gdev->cdev[1]);
1731 if (rc)
1732 goto err_out;
1733 rc = ccw_device_set_online(gdev->cdev[0]);
1734 if (rc)
1735 goto err_out;
1736 ctcm_open(priv->channel[CTCM_READ]->netdev);
1737err_out:
1738 netif_device_attach(priv->channel[CTCM_READ]->netdev);
1739 return rc;
1740}
1741
1742static struct ccw_device_id ctcm_ids[] = {
1743 {CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
1744 {CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
1745 {CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon},
1746 {},
1747};
1748MODULE_DEVICE_TABLE(ccw, ctcm_ids);
1749
1750static struct ccw_driver ctcm_ccw_driver = {
1751 .driver = {
1752 .owner = THIS_MODULE,
1753 .name = "ctcm",
1754 },
1755 .ids = ctcm_ids,
1756 .probe = ccwgroup_probe_ccwdev,
1757 .remove = ccwgroup_remove_ccwdev,
1758 .int_class = IRQIO_CTC,
1759};
1760
1761static struct ccwgroup_driver ctcm_group_driver = {
1762 .driver = {
1763 .owner = THIS_MODULE,
1764 .name = CTC_DRIVER_NAME,
1765 },
1766 .setup = ctcm_probe_device,
1767 .remove = ctcm_remove_device,
1768 .set_online = ctcm_new_device,
1769 .set_offline = ctcm_shutdown_device,
1770 .freeze = ctcm_pm_suspend,
1771 .thaw = ctcm_pm_resume,
1772 .restore = ctcm_pm_resume,
1773};
1774
1775static ssize_t ctcm_driver_group_store(struct device_driver *ddrv,
1776 const char *buf, size_t count)
1777{
1778 int err;
1779
1780 err = ccwgroup_create_dev(ctcm_root_dev, &ctcm_group_driver, 2, buf);
1781 return err ? err : count;
1782}
1783static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
1784
1785static struct attribute *ctcm_drv_attrs[] = {
1786 &driver_attr_group.attr,
1787 NULL,
1788};
1789static struct attribute_group ctcm_drv_attr_group = {
1790 .attrs = ctcm_drv_attrs,
1791};
1792static const struct attribute_group *ctcm_drv_attr_groups[] = {
1793 &ctcm_drv_attr_group,
1794 NULL,
1795};
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807static void __exit ctcm_exit(void)
1808{
1809 ccwgroup_driver_unregister(&ctcm_group_driver);
1810 ccw_driver_unregister(&ctcm_ccw_driver);
1811 root_device_unregister(ctcm_root_dev);
1812 ctcm_unregister_dbf_views();
1813 pr_info("CTCM driver unloaded\n");
1814}
1815
1816
1817
1818
1819static void print_banner(void)
1820{
1821 pr_info("CTCM driver initialized\n");
1822}
1823
1824
1825
1826
1827
1828
1829
1830static int __init ctcm_init(void)
1831{
1832 int ret;
1833
1834 channels = NULL;
1835
1836 ret = ctcm_register_dbf_views();
1837 if (ret)
1838 goto out_err;
1839 ctcm_root_dev = root_device_register("ctcm");
1840 ret = PTR_RET(ctcm_root_dev);
1841 if (ret)
1842 goto register_err;
1843 ret = ccw_driver_register(&ctcm_ccw_driver);
1844 if (ret)
1845 goto ccw_err;
1846 ctcm_group_driver.driver.groups = ctcm_drv_attr_groups;
1847 ret = ccwgroup_driver_register(&ctcm_group_driver);
1848 if (ret)
1849 goto ccwgroup_err;
1850 print_banner();
1851 return 0;
1852
1853ccwgroup_err:
1854 ccw_driver_unregister(&ctcm_ccw_driver);
1855ccw_err:
1856 root_device_unregister(ctcm_root_dev);
1857register_err:
1858 ctcm_unregister_dbf_views();
1859out_err:
1860 pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n",
1861 __func__, ret);
1862 return ret;
1863}
1864
1865module_init(ctcm_init);
1866module_exit(ctcm_exit);
1867
1868MODULE_AUTHOR("Peter Tiedemann <ptiedem@de.ibm.com>");
1869MODULE_DESCRIPTION("Network driver for S/390 CTC + CTCMPC (SNA)");
1870MODULE_LICENSE("GPL");
1871
1872