1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/kernel.h>
49#include <linux/types.h>
50#include <linux/errno.h>
51#include <linux/slab.h>
52#include <linux/usb.h>
53#include <linux/device.h>
54#include <linux/crc32.h>
55
56#include <asm/unaligned.h>
57#include <asm/byteorder.h>
58#include <asm/uaccess.h>
59
60#include <net/irda/irda.h>
61#include <net/irda/wrapper.h>
62#include <net/irda/crc.h>
63
64#include "mcs7780.h"
65
66#define MCS_VENDOR_ID 0x9710
67#define MCS_PRODUCT_ID 0x7780
68
69static struct usb_device_id mcs_table[] = {
70
71 {USB_DEVICE(MCS_VENDOR_ID, MCS_PRODUCT_ID)},
72 {},
73};
74
75MODULE_AUTHOR("Brian Pugh <bpugh@cs.pdx.edu>");
76MODULE_DESCRIPTION("IrDA-USB Dongle Driver for MosChip MCS7780");
77MODULE_VERSION("0.3alpha");
78MODULE_LICENSE("GPL");
79
80MODULE_DEVICE_TABLE(usb, mcs_table);
81
82static int qos_mtt_bits = 0x07 ;
83module_param(qos_mtt_bits, int, 0);
84MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
85
86static int receive_mode = 0x1;
87module_param(receive_mode, int, 0);
88MODULE_PARM_DESC(receive_mode,
89 "Receive mode of the device (1:fast, 0:slow, default:1)");
90
91static int sir_tweak = 1;
92module_param(sir_tweak, int, 0444);
93MODULE_PARM_DESC(sir_tweak,
94 "Default pulse width (1:1.6us, 0:3/16 bit, default:1).");
95
96static int transceiver_type = MCS_TSC_VISHAY;
97module_param(transceiver_type, int, 0444);
98MODULE_PARM_DESC(transceiver_type, "IR transceiver type, see mcs7780.h.");
99
100static struct usb_driver mcs_driver = {
101 .name = "mcs7780",
102 .probe = mcs_probe,
103 .disconnect = mcs_disconnect,
104 .id_table = mcs_table,
105};
106
107
108
109
110
111
112
113
114
115
116static __u16 mcs_speed_set[16] = { 0,
117 MCS_SPEED_57600,
118 MCS_SPEED_115200,
119 0,
120 MCS_SPEED_1152000,
121 MCS_SPEED_9600,
122 MCS_SPEED_38400,
123 0, 0,
124 MCS_SPEED_2400,
125 MCS_SPEED_576000,
126 MCS_SPEED_19200,
127 0, 0, 0,
128};
129
130
131
132static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
133{
134 struct usb_device *dev = mcs->usbdev;
135 return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ,
136 MCS_WR_RTYPE, val, reg, NULL, 0,
137 msecs_to_jiffies(MCS_CTRL_TIMEOUT));
138}
139
140
141static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
142{
143 struct usb_device *dev = mcs->usbdev;
144 int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
145 MCS_RD_RTYPE, 0, reg, val, 2,
146 msecs_to_jiffies(MCS_CTRL_TIMEOUT));
147
148 return ret;
149}
150
151
152
153
154
155
156
157
158static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs)
159{
160 int ret = 0;
161 __u16 rval;
162
163
164 ret = mcs_get_reg(mcs, MCS_XCVR_REG, &rval);
165 if (unlikely(ret != 2)) {
166 ret = -EIO;
167 goto error;
168 }
169
170
171
172
173
174
175 rval |= (MCS_MODE0 | MCS_XCVR_CONF);
176 rval &= ~MCS_STFIR;
177 rval &= ~MCS_MODE1;
178 ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
179 if (unlikely(ret))
180 goto error;
181
182 rval &= ~MCS_MODE0;
183 ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
184 if (unlikely(ret))
185 goto error;
186
187 rval &= ~MCS_XCVR_CONF;
188 ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
189 if (unlikely(ret))
190 goto error;
191
192 ret = 0;
193error:
194 return ret;
195}
196
197
198static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs)
199{
200 net_warn_ratelimited("This transceiver type is not supported yet\n");
201 return 1;
202}
203
204
205static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs)
206{
207 net_warn_ratelimited("This transceiver type is not supported yet\n");
208 return 1;
209}
210
211
212static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
213{
214 int ret = 0;
215 __u16 rval;
216 const char *msg;
217
218 msg = "Basic transceiver setup error";
219
220
221
222
223 ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
224 if(unlikely(ret != 2))
225 goto error;
226 rval |= MCS_DRIVER;
227 ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
228 if(unlikely(ret))
229 goto error;
230
231 rval = 0;
232 ret = mcs_set_reg(mcs, MCS_MINRXPW_REG, rval);
233 if(unlikely(ret))
234 goto error;
235
236 ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
237 if(unlikely(ret != 2))
238 goto error;
239
240 rval &= ~MCS_FIR;
241 if(mcs->sir_tweak)
242 rval |= MCS_SIR16US;
243 else
244 rval &= ~MCS_SIR16US;
245
246
247 rval &= ~(MCS_BBTG | MCS_ASK);
248
249 rval &= ~MCS_SPEED_MASK;
250 rval |= MCS_SPEED_9600;
251 mcs->speed = 9600;
252 mcs->new_speed = 0;
253 rval &= ~MCS_PLLPWDN;
254
255
256
257
258 rval |= MCS_DTD | MCS_SIPEN;
259
260 ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
261 if(unlikely(ret))
262 goto error;
263
264 msg = "transceiver model specific setup error";
265 switch (mcs->transceiver_type) {
266 case MCS_TSC_VISHAY:
267 ret = mcs_setup_transceiver_vishay(mcs);
268 break;
269
270 case MCS_TSC_SHARP:
271 ret = mcs_setup_transceiver_sharp(mcs);
272 break;
273
274 case MCS_TSC_AGILENT:
275 ret = mcs_setup_transceiver_agilent(mcs);
276 break;
277
278 default:
279 net_warn_ratelimited("Unknown transceiver type: %d\n",
280 mcs->transceiver_type);
281 ret = 1;
282 }
283 if (unlikely(ret))
284 goto error;
285
286
287
288
289 if (mcs->transceiver_type != MCS_TSC_SHARP) {
290
291 ret = mcs_get_reg(mcs, MCS_XCVR_REG, &rval);
292 if (unlikely(ret != 2))
293 goto error;
294 if (mcs->receive_mode)
295 rval |= MCS_RXFAST;
296 else
297 rval &= ~MCS_RXFAST;
298 ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
299 if (unlikely(ret))
300 goto error;
301 }
302
303 msg = "transceiver reset";
304
305 ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
306 if (unlikely(ret != 2))
307 goto error;
308
309
310 rval &= ~MCS_RESET;
311 ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
312 if (unlikely(ret))
313 goto error;
314 else
315 return ret;
316
317error:
318 net_err_ratelimited("%s\n", msg);
319 return ret;
320}
321
322
323static inline int mcs_wrap_sir_skb(struct sk_buff *skb, __u8 * buf)
324{
325 int wraplen;
326
327
328 wraplen = async_wrap_skb(skb, buf + 2, 4094);
329
330 wraplen += 2;
331 buf[0] = wraplen & 0xff;
332 buf[1] = (wraplen >> 8) & 0xff;
333
334 return wraplen;
335}
336
337
338static unsigned mcs_wrap_fir_skb(const struct sk_buff *skb, __u8 *buf)
339{
340 unsigned int len = 0;
341 __u32 fcs = ~(crc32_le(~0, skb->data, skb->len));
342
343
344 len = skb->len + 6;
345
346
347
348
349
350 buf[0] = len & 0xff;
351 buf[1] = (len >> 8) & 0xff;
352
353 skb_copy_from_linear_data(skb, buf + 2, skb->len);
354
355 buf[len - 4] = fcs & 0xff;
356 buf[len - 3] = (fcs >> 8) & 0xff;
357 buf[len - 2] = (fcs >> 16) & 0xff;
358 buf[len - 1] = (fcs >> 24) & 0xff;
359
360 return len;
361}
362
363
364static unsigned mcs_wrap_mir_skb(const struct sk_buff *skb, __u8 *buf)
365{
366 __u16 fcs = 0;
367 int len = skb->len + 4;
368
369 fcs = ~(irda_calc_crc16(~fcs, skb->data, skb->len));
370
371
372
373
374 buf[0] = len & 0xff;
375 buf[1] = (len >> 8) & 0xff;
376
377 skb_copy_from_linear_data(skb, buf + 2, skb->len);
378
379 buf[len - 2] = fcs & 0xff;
380 buf[len - 1] = (fcs >> 8) & 0xff;
381
382 return len;
383}
384
385
386
387
388
389
390static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
391{
392 __u16 fcs;
393 int new_len;
394 struct sk_buff *skb;
395
396
397
398
399
400 new_len = len - 2;
401 if(unlikely(new_len <= 0)) {
402 net_err_ratelimited("%s short frame length %d\n",
403 mcs->netdev->name, new_len);
404 ++mcs->netdev->stats.rx_errors;
405 ++mcs->netdev->stats.rx_length_errors;
406 return;
407 }
408 fcs = 0;
409 fcs = irda_calc_crc16(~fcs, buf, len);
410
411 if(fcs != GOOD_FCS) {
412 net_err_ratelimited("crc error calc 0x%x len %d\n",
413 fcs, new_len);
414 mcs->netdev->stats.rx_errors++;
415 mcs->netdev->stats.rx_crc_errors++;
416 return;
417 }
418
419 skb = dev_alloc_skb(new_len + 1);
420 if(unlikely(!skb)) {
421 ++mcs->netdev->stats.rx_dropped;
422 return;
423 }
424
425 skb_reserve(skb, 1);
426 skb_copy_to_linear_data(skb, buf, new_len);
427 skb_put(skb, new_len);
428 skb_reset_mac_header(skb);
429 skb->protocol = htons(ETH_P_IRDA);
430 skb->dev = mcs->netdev;
431
432 netif_rx(skb);
433
434 mcs->netdev->stats.rx_packets++;
435 mcs->netdev->stats.rx_bytes += new_len;
436}
437
438
439
440
441
442static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
443{
444 __u32 fcs;
445 int new_len;
446 struct sk_buff *skb;
447
448
449
450
451
452
453 new_len = len - 4;
454 if(unlikely(new_len <= 0)) {
455 net_err_ratelimited("%s short frame length %d\n",
456 mcs->netdev->name, new_len);
457 ++mcs->netdev->stats.rx_errors;
458 ++mcs->netdev->stats.rx_length_errors;
459 return;
460 }
461
462 fcs = ~(crc32_le(~0, buf, new_len));
463 if(fcs != get_unaligned_le32(buf + new_len)) {
464 net_err_ratelimited("crc error calc 0x%x len %d\n",
465 fcs, new_len);
466 mcs->netdev->stats.rx_errors++;
467 mcs->netdev->stats.rx_crc_errors++;
468 return;
469 }
470
471 skb = dev_alloc_skb(new_len + 1);
472 if(unlikely(!skb)) {
473 ++mcs->netdev->stats.rx_dropped;
474 return;
475 }
476
477 skb_reserve(skb, 1);
478 skb_copy_to_linear_data(skb, buf, new_len);
479 skb_put(skb, new_len);
480 skb_reset_mac_header(skb);
481 skb->protocol = htons(ETH_P_IRDA);
482 skb->dev = mcs->netdev;
483
484 netif_rx(skb);
485
486 mcs->netdev->stats.rx_packets++;
487 mcs->netdev->stats.rx_bytes += new_len;
488}
489
490
491
492
493
494
495static inline int mcs_setup_urbs(struct mcs_cb *mcs)
496{
497 mcs->rx_urb = NULL;
498
499 mcs->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
500 if (!mcs->tx_urb)
501 return 0;
502
503 mcs->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
504 if (!mcs->rx_urb) {
505 usb_free_urb(mcs->tx_urb);
506 mcs->tx_urb = NULL;
507 return 0;
508 }
509
510 return 1;
511}
512
513
514
515
516
517static inline int mcs_receive_start(struct mcs_cb *mcs)
518{
519 mcs->rx_buff.in_frame = FALSE;
520 mcs->rx_buff.state = OUTSIDE_FRAME;
521
522 usb_fill_bulk_urb(mcs->rx_urb, mcs->usbdev,
523 usb_rcvbulkpipe(mcs->usbdev, mcs->ep_in),
524 mcs->in_buf, 4096, mcs_receive_irq, mcs);
525
526 mcs->rx_urb->status = 0;
527 return usb_submit_urb(mcs->rx_urb, GFP_KERNEL);
528}
529
530
531static inline int mcs_find_endpoints(struct mcs_cb *mcs,
532 struct usb_host_endpoint *ep, int epnum)
533{
534 int i;
535 int ret = 0;
536
537
538 if (!ep)
539 return ret;
540
541
542 for (i = 0; i < epnum; i++) {
543 if (ep[i].desc.bEndpointAddress & USB_DIR_IN)
544 mcs->ep_in = ep[i].desc.bEndpointAddress;
545 else
546 mcs->ep_out = ep[i].desc.bEndpointAddress;
547
548
549
550
551 if ((mcs->ep_in != 0) && (mcs->ep_out != 0)) {
552 ret = 1;
553 break;
554 }
555 }
556
557 return ret;
558}
559
560static void mcs_speed_work(struct work_struct *work)
561{
562 struct mcs_cb *mcs = container_of(work, struct mcs_cb, work);
563 struct net_device *netdev = mcs->netdev;
564
565 mcs_speed_change(mcs);
566 netif_wake_queue(netdev);
567}
568
569
570
571
572static int mcs_speed_change(struct mcs_cb *mcs)
573{
574 int ret = 0;
575 int rst = 0;
576 int cnt = 0;
577 __u16 nspeed;
578 __u16 rval;
579
580 nspeed = mcs_speed_set[(mcs->new_speed >> 8) & 0x0f];
581
582 do {
583 mcs_get_reg(mcs, MCS_RESV_REG, &rval);
584 } while(cnt++ < 100 && (rval & MCS_IRINTX));
585
586 if (cnt > 100) {
587 net_err_ratelimited("unable to change speed\n");
588 ret = -EIO;
589 goto error;
590 }
591
592 mcs_get_reg(mcs, MCS_MODE_REG, &rval);
593
594
595 if (mcs->new_speed <= 115200) {
596 rval &= ~MCS_FIR;
597
598 if ((rst = (mcs->speed > 115200)))
599 mcs_set_reg(mcs, MCS_MINRXPW_REG, 0);
600
601 } else if (mcs->new_speed <= 1152000) {
602 rval &= ~MCS_FIR;
603
604 if ((rst = !(mcs->speed == 576000 || mcs->speed == 1152000)))
605 mcs_set_reg(mcs, MCS_MINRXPW_REG, 5);
606
607 } else {
608 rval |= MCS_FIR;
609
610 if ((rst = (mcs->speed != 4000000)))
611 mcs_set_reg(mcs, MCS_MINRXPW_REG, 5);
612
613 }
614
615 rval &= ~MCS_SPEED_MASK;
616 rval |= nspeed;
617
618 ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
619 if (unlikely(ret))
620 goto error;
621
622 if (rst)
623 switch (mcs->transceiver_type) {
624 case MCS_TSC_VISHAY:
625 ret = mcs_setup_transceiver_vishay(mcs);
626 break;
627
628 case MCS_TSC_SHARP:
629 ret = mcs_setup_transceiver_sharp(mcs);
630 break;
631
632 case MCS_TSC_AGILENT:
633 ret = mcs_setup_transceiver_agilent(mcs);
634 break;
635
636 default:
637 ret = 1;
638 net_warn_ratelimited("Unknown transceiver type: %d\n",
639 mcs->transceiver_type);
640 }
641 if (unlikely(ret))
642 goto error;
643
644 mcs_get_reg(mcs, MCS_MODE_REG, &rval);
645 rval &= ~MCS_RESET;
646 ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
647
648 mcs->speed = mcs->new_speed;
649error:
650 mcs->new_speed = 0;
651 return ret;
652}
653
654
655static int mcs_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
656{
657
658
659 int ret = 0;
660
661 switch (cmd) {
662 default:
663 ret = -EOPNOTSUPP;
664 }
665
666 return ret;
667}
668
669
670static int mcs_net_close(struct net_device *netdev)
671{
672 int ret = 0;
673 struct mcs_cb *mcs = netdev_priv(netdev);
674
675
676 netif_stop_queue(netdev);
677
678 kfree_skb(mcs->rx_buff.skb);
679
680
681 usb_kill_urb(mcs->rx_urb);
682 usb_free_urb(mcs->rx_urb);
683 usb_kill_urb(mcs->tx_urb);
684 usb_free_urb(mcs->tx_urb);
685
686
687 if (mcs->irlap)
688 irlap_close(mcs->irlap);
689
690 mcs->irlap = NULL;
691 return ret;
692}
693
694
695static int mcs_net_open(struct net_device *netdev)
696{
697 struct mcs_cb *mcs = netdev_priv(netdev);
698 char hwname[16];
699 int ret = 0;
700
701 ret = usb_clear_halt(mcs->usbdev,
702 usb_sndbulkpipe(mcs->usbdev, mcs->ep_in));
703 if (ret)
704 goto error1;
705 ret = usb_clear_halt(mcs->usbdev,
706 usb_rcvbulkpipe(mcs->usbdev, mcs->ep_out));
707 if (ret)
708 goto error1;
709
710 ret = mcs_setup_transceiver(mcs);
711 if (ret)
712 goto error1;
713
714 ret = -ENOMEM;
715
716
717 mcs->receiving = 0;
718 mcs->rx_buff.truesize = IRDA_SKB_MAX_MTU;
719 mcs->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
720 if (!mcs->rx_buff.skb)
721 goto error1;
722
723 skb_reserve(mcs->rx_buff.skb, 1);
724 mcs->rx_buff.head = mcs->rx_buff.skb->data;
725
726
727
728
729
730
731 sprintf(hwname, "usb#%d", mcs->usbdev->devnum);
732 mcs->irlap = irlap_open(netdev, &mcs->qos, hwname);
733 if (!mcs->irlap) {
734 net_err_ratelimited("mcs7780: irlap_open failed\n");
735 goto error2;
736 }
737
738 if (!mcs_setup_urbs(mcs))
739 goto error3;
740
741 ret = mcs_receive_start(mcs);
742 if (ret)
743 goto error4;
744
745 netif_start_queue(netdev);
746 return 0;
747
748error4:
749 usb_free_urb(mcs->rx_urb);
750 usb_free_urb(mcs->tx_urb);
751error3:
752 irlap_close(mcs->irlap);
753error2:
754 kfree_skb(mcs->rx_buff.skb);
755error1:
756 return ret;
757}
758
759
760static void mcs_receive_irq(struct urb *urb)
761{
762 __u8 *bytes;
763 struct mcs_cb *mcs = urb->context;
764 int i;
765 int ret;
766
767 if (!netif_running(mcs->netdev))
768 return;
769
770 if (urb->status)
771 return;
772
773 if (urb->actual_length > 0) {
774 bytes = urb->transfer_buffer;
775
776
777
778
779
780 if(mcs->speed < 576000) {
781 async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
782 &mcs->rx_buff, 0xc0);
783
784 for (i = 0; i < urb->actual_length; i++)
785 async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
786 &mcs->rx_buff, bytes[i]);
787
788 async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
789 &mcs->rx_buff, 0xc1);
790 }
791
792 else if(mcs->speed == 576000 || mcs->speed == 1152000) {
793 mcs_unwrap_mir(mcs, urb->transfer_buffer,
794 urb->actual_length);
795 }
796
797 else {
798 mcs_unwrap_fir(mcs, urb->transfer_buffer,
799 urb->actual_length);
800 }
801 }
802
803 ret = usb_submit_urb(urb, GFP_ATOMIC);
804}
805
806
807static void mcs_send_irq(struct urb *urb)
808{
809 struct mcs_cb *mcs = urb->context;
810 struct net_device *ndev = mcs->netdev;
811
812 if (unlikely(mcs->new_speed))
813 schedule_work(&mcs->work);
814 else
815 netif_wake_queue(ndev);
816}
817
818
819static netdev_tx_t mcs_hard_xmit(struct sk_buff *skb,
820 struct net_device *ndev)
821{
822 unsigned long flags;
823 struct mcs_cb *mcs;
824 int wraplen;
825 int ret = 0;
826
827 netif_stop_queue(ndev);
828 mcs = netdev_priv(ndev);
829
830 spin_lock_irqsave(&mcs->lock, flags);
831
832 mcs->new_speed = irda_get_next_speed(skb);
833 if (likely(mcs->new_speed == mcs->speed))
834 mcs->new_speed = 0;
835
836
837 if(mcs->speed < 576000) {
838 wraplen = mcs_wrap_sir_skb(skb, mcs->out_buf);
839 }
840
841 else if(mcs->speed == 576000 || mcs->speed == 1152000) {
842 wraplen = mcs_wrap_mir_skb(skb, mcs->out_buf);
843 }
844
845 else {
846 wraplen = mcs_wrap_fir_skb(skb, mcs->out_buf);
847 }
848 usb_fill_bulk_urb(mcs->tx_urb, mcs->usbdev,
849 usb_sndbulkpipe(mcs->usbdev, mcs->ep_out),
850 mcs->out_buf, wraplen, mcs_send_irq, mcs);
851
852 if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) {
853 net_err_ratelimited("failed tx_urb: %d\n", ret);
854 switch (ret) {
855 case -ENODEV:
856 case -EPIPE:
857 break;
858 default:
859 mcs->netdev->stats.tx_errors++;
860 netif_start_queue(ndev);
861 }
862 } else {
863 mcs->netdev->stats.tx_packets++;
864 mcs->netdev->stats.tx_bytes += skb->len;
865 }
866
867 dev_kfree_skb(skb);
868 spin_unlock_irqrestore(&mcs->lock, flags);
869 return NETDEV_TX_OK;
870}
871
872static const struct net_device_ops mcs_netdev_ops = {
873 .ndo_open = mcs_net_open,
874 .ndo_stop = mcs_net_close,
875 .ndo_start_xmit = mcs_hard_xmit,
876 .ndo_do_ioctl = mcs_net_ioctl,
877};
878
879
880
881
882
883static int mcs_probe(struct usb_interface *intf,
884 const struct usb_device_id *id)
885{
886 struct usb_device *udev = interface_to_usbdev(intf);
887 struct net_device *ndev = NULL;
888 struct mcs_cb *mcs;
889 int ret = -ENOMEM;
890
891 ndev = alloc_irdadev(sizeof(*mcs));
892 if (!ndev)
893 goto error1;
894
895 pr_debug("MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum);
896
897 SET_NETDEV_DEV(ndev, &intf->dev);
898
899 ret = usb_reset_configuration(udev);
900 if (ret != 0) {
901 net_err_ratelimited("mcs7780: usb reset configuration failed\n");
902 goto error2;
903 }
904
905 mcs = netdev_priv(ndev);
906 mcs->usbdev = udev;
907 mcs->netdev = ndev;
908 spin_lock_init(&mcs->lock);
909
910
911 irda_init_max_qos_capabilies(&mcs->qos);
912
913
914 mcs->qos.baud_rate.bits &=
915 IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200
916 | IR_576000 | IR_1152000 | (IR_4000000 << 8);
917
918
919 mcs->qos.min_turn_time.bits &= qos_mtt_bits;
920 irda_qos_bits_to_value(&mcs->qos);
921
922
923 INIT_WORK(&mcs->work, mcs_speed_work);
924
925 ndev->netdev_ops = &mcs_netdev_ops;
926
927 if (!intf->cur_altsetting) {
928 ret = -ENOMEM;
929 goto error2;
930 }
931
932 ret = mcs_find_endpoints(mcs, intf->cur_altsetting->endpoint,
933 intf->cur_altsetting->desc.bNumEndpoints);
934 if (!ret) {
935 ret = -ENODEV;
936 goto error2;
937 }
938
939 ret = register_netdev(ndev);
940 if (ret != 0)
941 goto error2;
942
943 pr_debug("IrDA: Registered MosChip MCS7780 device as %s\n",
944 ndev->name);
945
946 mcs->transceiver_type = transceiver_type;
947 mcs->sir_tweak = sir_tweak;
948 mcs->receive_mode = receive_mode;
949
950 usb_set_intfdata(intf, mcs);
951 return 0;
952
953error2:
954 free_netdev(ndev);
955
956error1:
957 return ret;
958}
959
960
961static void mcs_disconnect(struct usb_interface *intf)
962{
963 struct mcs_cb *mcs = usb_get_intfdata(intf);
964
965 if (!mcs)
966 return;
967
968 cancel_work_sync(&mcs->work);
969
970 unregister_netdev(mcs->netdev);
971 free_netdev(mcs->netdev);
972
973 usb_set_intfdata(intf, NULL);
974 pr_debug("MCS7780 now disconnected.\n");
975}
976
977module_usb_driver(mcs_driver);
978