1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/delay.h>
19
20#include <net/irda/irda.h>
21#include <net/irda/wrapper.h>
22#include <net/irda/irda_device.h>
23
24#include "sir-dev.h"
25
26
27static struct workqueue_struct *irda_sir_wq;
28
29
30
31
32
33
34
35static int sirdev_tx_complete_fsm(struct sir_dev *dev)
36{
37 struct sir_fsm *fsm = &dev->fsm;
38 unsigned next_state, delay;
39 unsigned bytes_left;
40
41 do {
42 next_state = fsm->substate;
43 delay = 0;
44
45 switch(fsm->substate) {
46
47 case SIRDEV_STATE_WAIT_XMIT:
48 if (dev->drv->chars_in_buffer)
49 bytes_left = dev->drv->chars_in_buffer(dev);
50 else
51 bytes_left = 0;
52 if (!bytes_left) {
53 next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
54 break;
55 }
56
57 if (dev->speed > 115200)
58 delay = (bytes_left*8*10000) / (dev->speed/100);
59 else if (dev->speed > 0)
60 delay = (bytes_left*10*10000) / (dev->speed/100);
61 else
62 delay = 0;
63
64 if (delay < 100) {
65 udelay(delay);
66 delay = 0;
67 break;
68 }
69
70 delay = (delay+999) / 1000;
71 break;
72
73 case SIRDEV_STATE_WAIT_UNTIL_SENT:
74
75 if (dev->drv->wait_until_sent)
76 dev->drv->wait_until_sent(dev);
77 next_state = SIRDEV_STATE_TX_DONE;
78 break;
79
80 case SIRDEV_STATE_TX_DONE:
81 return 0;
82
83 default:
84 IRDA_ERROR("%s - undefined state\n", __func__);
85 return -EINVAL;
86 }
87 fsm->substate = next_state;
88 } while (delay == 0);
89 return delay;
90}
91
92
93
94
95
96
97
98
99
100
101
102
103static void sirdev_config_fsm(struct work_struct *work)
104{
105 struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
106 struct sir_fsm *fsm = &dev->fsm;
107 int next_state;
108 int ret = -1;
109 unsigned delay;
110
111 IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies);
112
113 do {
114 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
115 __func__, fsm->state, fsm->substate);
116
117 next_state = fsm->state;
118 delay = 0;
119
120 switch(fsm->state) {
121
122 case SIRDEV_STATE_DONGLE_OPEN:
123 if (dev->dongle_drv != NULL) {
124 ret = sirdev_put_dongle(dev);
125 if (ret) {
126 fsm->result = -EINVAL;
127 next_state = SIRDEV_STATE_ERROR;
128 break;
129 }
130 }
131
132
133 ret = sirdev_get_dongle(dev, fsm->param);
134 if (ret) {
135 fsm->result = ret;
136 next_state = SIRDEV_STATE_ERROR;
137 break;
138 }
139
140
141
142
143
144
145
146 delay = 50;
147 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
148 next_state = SIRDEV_STATE_DONGLE_RESET;
149
150 fsm->param = 9600;
151
152 break;
153
154 case SIRDEV_STATE_DONGLE_CLOSE:
155
156 if (dev->dongle_drv == NULL) {
157 fsm->result = -EINVAL;
158 next_state = SIRDEV_STATE_ERROR;
159 break;
160 }
161
162 ret = sirdev_put_dongle(dev);
163 if (ret) {
164 fsm->result = ret;
165 next_state = SIRDEV_STATE_ERROR;
166 break;
167 }
168 next_state = SIRDEV_STATE_DONE;
169 break;
170
171 case SIRDEV_STATE_SET_DTR_RTS:
172 ret = sirdev_set_dtr_rts(dev,
173 (fsm->param&0x02) ? TRUE : FALSE,
174 (fsm->param&0x01) ? TRUE : FALSE);
175 next_state = SIRDEV_STATE_DONE;
176 break;
177
178 case SIRDEV_STATE_SET_SPEED:
179 fsm->substate = SIRDEV_STATE_WAIT_XMIT;
180 next_state = SIRDEV_STATE_DONGLE_CHECK;
181 break;
182
183 case SIRDEV_STATE_DONGLE_CHECK:
184 ret = sirdev_tx_complete_fsm(dev);
185 if (ret < 0) {
186 fsm->result = ret;
187 next_state = SIRDEV_STATE_ERROR;
188 break;
189 }
190 if ((delay=ret) != 0)
191 break;
192
193 if (dev->dongle_drv) {
194 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
195 next_state = SIRDEV_STATE_DONGLE_RESET;
196 }
197 else {
198 dev->speed = fsm->param;
199 next_state = SIRDEV_STATE_PORT_SPEED;
200 }
201 break;
202
203 case SIRDEV_STATE_DONGLE_RESET:
204 if (dev->dongle_drv->reset) {
205 ret = dev->dongle_drv->reset(dev);
206 if (ret < 0) {
207 fsm->result = ret;
208 next_state = SIRDEV_STATE_ERROR;
209 break;
210 }
211 }
212 else
213 ret = 0;
214 if ((delay=ret) == 0) {
215
216 if (dev->drv->set_speed)
217 dev->drv->set_speed(dev, dev->speed);
218 fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
219 next_state = SIRDEV_STATE_DONGLE_SPEED;
220 }
221 break;
222
223 case SIRDEV_STATE_DONGLE_SPEED:
224 if (dev->dongle_drv->reset) {
225 ret = dev->dongle_drv->set_speed(dev, fsm->param);
226 if (ret < 0) {
227 fsm->result = ret;
228 next_state = SIRDEV_STATE_ERROR;
229 break;
230 }
231 }
232 else
233 ret = 0;
234 if ((delay=ret) == 0)
235 next_state = SIRDEV_STATE_PORT_SPEED;
236 break;
237
238 case SIRDEV_STATE_PORT_SPEED:
239
240 if (dev->drv->set_speed)
241 dev->drv->set_speed(dev, dev->speed);
242 dev->new_speed = 0;
243 next_state = SIRDEV_STATE_DONE;
244 break;
245
246 case SIRDEV_STATE_DONE:
247
248 netif_wake_queue(dev->netdev);
249 next_state = SIRDEV_STATE_COMPLETE;
250 break;
251
252 default:
253 IRDA_ERROR("%s - undefined state\n", __func__);
254 fsm->result = -EINVAL;
255
256
257 case SIRDEV_STATE_ERROR:
258 IRDA_ERROR("%s - error: %d\n", __func__, fsm->result);
259
260#if 0
261 netif_stop_queue(dev->netdev);
262#else
263 netif_wake_queue(dev->netdev);
264#endif
265
266
267 case SIRDEV_STATE_COMPLETE:
268
269 sirdev_enable_rx(dev);
270 up(&fsm->sem);
271 return;
272 }
273 fsm->state = next_state;
274 } while(!delay);
275
276 queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
277}
278
279
280
281
282
283
284int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
285{
286 struct sir_fsm *fsm = &dev->fsm;
287
288 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__,
289 initial_state, param);
290
291 if (down_trylock(&fsm->sem)) {
292 if (in_interrupt() || in_atomic() || irqs_disabled()) {
293 IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__);
294 return -EWOULDBLOCK;
295 } else
296 down(&fsm->sem);
297 }
298
299 if (fsm->state == SIRDEV_STATE_DEAD) {
300
301 IRDA_ERROR("%s(), instance staled!\n", __func__);
302 up(&fsm->sem);
303 return -ESTALE;
304 }
305
306 netif_stop_queue(dev->netdev);
307 atomic_set(&dev->enable_rx, 0);
308
309 fsm->state = initial_state;
310 fsm->param = param;
311 fsm->result = 0;
312
313 INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
314 queue_delayed_work(irda_sir_wq, &fsm->work, 0);
315 return 0;
316}
317
318
319
320
321void sirdev_enable_rx(struct sir_dev *dev)
322{
323 if (unlikely(atomic_read(&dev->enable_rx)))
324 return;
325
326
327 dev->rx_buff.data = dev->rx_buff.head;
328 dev->rx_buff.len = 0;
329 dev->rx_buff.in_frame = FALSE;
330 dev->rx_buff.state = OUTSIDE_FRAME;
331 atomic_set(&dev->enable_rx, 1);
332}
333
334static int sirdev_is_receiving(struct sir_dev *dev)
335{
336 if (!atomic_read(&dev->enable_rx))
337 return 0;
338
339 return dev->rx_buff.state != OUTSIDE_FRAME;
340}
341
342int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
343{
344 int err;
345
346 IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type);
347
348 err = sirdev_schedule_dongle_open(dev, type);
349 if (unlikely(err))
350 return err;
351 down(&dev->fsm.sem);
352 err = dev->fsm.result;
353 up(&dev->fsm.sem);
354 return err;
355}
356EXPORT_SYMBOL(sirdev_set_dongle);
357
358
359
360int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
361{
362 unsigned long flags;
363 int ret;
364
365 if (unlikely(len > dev->tx_buff.truesize))
366 return -ENOSPC;
367
368 spin_lock_irqsave(&dev->tx_lock, flags);
369 while (dev->tx_buff.len > 0) {
370 spin_unlock_irqrestore(&dev->tx_lock, flags);
371 msleep(10);
372 spin_lock_irqsave(&dev->tx_lock, flags);
373 }
374
375 dev->tx_buff.data = dev->tx_buff.head;
376 memcpy(dev->tx_buff.data, buf, len);
377 dev->tx_buff.len = len;
378
379 ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
380 if (ret > 0) {
381 IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__);
382
383 dev->tx_buff.data += ret;
384 dev->tx_buff.len -= ret;
385 dev->raw_tx = 1;
386 ret = len;
387 }
388 spin_unlock_irqrestore(&dev->tx_lock, flags);
389 return ret;
390}
391EXPORT_SYMBOL(sirdev_raw_write);
392
393
394
395int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
396{
397 int count;
398
399 if (atomic_read(&dev->enable_rx))
400 return -EIO;
401
402 count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len;
403
404 if (count > 0) {
405 memcpy(buf, dev->rx_buff.data, count);
406 dev->rx_buff.data += count;
407 dev->rx_buff.len -= count;
408 }
409
410
411
412 return count;
413}
414EXPORT_SYMBOL(sirdev_raw_read);
415
416int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
417{
418 int ret = -ENXIO;
419 if (dev->drv->set_dtr_rts)
420 ret = dev->drv->set_dtr_rts(dev, dtr, rts);
421 return ret;
422}
423EXPORT_SYMBOL(sirdev_set_dtr_rts);
424
425
426
427
428
429
430
431
432void sirdev_write_complete(struct sir_dev *dev)
433{
434 unsigned long flags;
435 struct sk_buff *skb;
436 int actual = 0;
437 int err;
438
439 spin_lock_irqsave(&dev->tx_lock, flags);
440
441 IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
442 __func__, dev->tx_buff.len);
443
444 if (likely(dev->tx_buff.len > 0)) {
445
446 actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
447
448 if (likely(actual>0)) {
449 dev->tx_buff.data += actual;
450 dev->tx_buff.len -= actual;
451 }
452 else if (unlikely(actual<0)) {
453
454 IRDA_ERROR("%s: drv->do_write failed (%d)\n",
455 __func__, actual);
456 if ((skb=dev->tx_skb) != NULL) {
457 dev->tx_skb = NULL;
458 dev_kfree_skb_any(skb);
459 dev->netdev->stats.tx_errors++;
460 dev->netdev->stats.tx_dropped++;
461 }
462 dev->tx_buff.len = 0;
463 }
464 if (dev->tx_buff.len > 0)
465 goto done;
466 }
467
468 if (unlikely(dev->raw_tx != 0)) {
469
470
471
472
473
474
475
476 IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__);
477 dev->raw_tx = 0;
478 goto done;
479 }
480
481
482
483
484
485
486
487
488
489
490
491
492
493 IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__);
494
495 if ((skb=dev->tx_skb) != NULL) {
496 dev->tx_skb = NULL;
497 dev->netdev->stats.tx_packets++;
498 dev->netdev->stats.tx_bytes += skb->len;
499 dev_kfree_skb_any(skb);
500 }
501
502 if (unlikely(dev->new_speed > 0)) {
503 IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__);
504 err = sirdev_schedule_speed(dev, dev->new_speed);
505 if (unlikely(err)) {
506
507
508
509 IRDA_ERROR("%s - schedule speed change failed: %d\n",
510 __func__, err);
511 netif_wake_queue(dev->netdev);
512 }
513
514
515
516
517
518 }
519 else {
520 sirdev_enable_rx(dev);
521 netif_wake_queue(dev->netdev);
522 }
523
524done:
525 spin_unlock_irqrestore(&dev->tx_lock, flags);
526}
527EXPORT_SYMBOL(sirdev_write_complete);
528
529
530
531
532
533
534int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
535{
536 if (!dev || !dev->netdev) {
537 IRDA_WARNING("%s(), not ready yet!\n", __func__);
538 return -1;
539 }
540
541 if (!dev->irlap) {
542 IRDA_WARNING("%s - too early: %p / %zd!\n",
543 __func__, cp, count);
544 return -1;
545 }
546
547 if (cp==NULL) {
548
549
550
551 irda_device_set_media_busy(dev->netdev, TRUE);
552 dev->netdev->stats.rx_dropped++;
553 IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
554 return 0;
555 }
556
557
558 if (likely(atomic_read(&dev->enable_rx))) {
559 while (count--)
560
561 async_unwrap_char(dev->netdev, &dev->netdev->stats,
562 &dev->rx_buff, *cp++);
563 } else {
564 while (count--) {
565
566
567
568
569
570 dev->rx_buff.data[dev->rx_buff.len++] = *cp++;
571
572
573 if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize))
574 dev->rx_buff.len = 0;
575 }
576 }
577
578 return 0;
579}
580EXPORT_SYMBOL(sirdev_receive);
581
582
583
584
585
586static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
587 struct net_device *ndev)
588{
589 struct sir_dev *dev = netdev_priv(ndev);
590 unsigned long flags;
591 int actual = 0;
592 int err;
593 s32 speed;
594
595 IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
596
597 netif_stop_queue(ndev);
598
599 IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len);
600
601 speed = irda_get_next_speed(skb);
602 if ((speed != dev->speed) && (speed != -1)) {
603 if (!skb->len) {
604 err = sirdev_schedule_speed(dev, speed);
605 if (unlikely(err == -EWOULDBLOCK)) {
606
607
608
609
610
611
612 return NETDEV_TX_BUSY;
613 }
614 else if (unlikely(err)) {
615
616
617
618 netif_start_queue(ndev);
619 }
620
621
622
623
624
625 dev_kfree_skb_any(skb);
626 return NETDEV_TX_OK;
627 } else
628 dev->new_speed = speed;
629 }
630
631
632 dev->tx_buff.data = dev->tx_buff.head;
633
634
635 if(spin_is_locked(&dev->tx_lock)) {
636 IRDA_DEBUG(3, "%s(), write not completed\n", __func__);
637 }
638
639
640 spin_lock_irqsave(&dev->tx_lock, flags);
641
642
643 dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize);
644
645
646
647
648
649
650 atomic_set(&dev->enable_rx, 0);
651 if (unlikely(sirdev_is_receiving(dev)))
652 dev->netdev->stats.collisions++;
653
654 actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
655
656 if (likely(actual > 0)) {
657 dev->tx_skb = skb;
658 dev->tx_buff.data += actual;
659 dev->tx_buff.len -= actual;
660 }
661 else if (unlikely(actual < 0)) {
662
663 IRDA_ERROR("%s: drv->do_write failed (%d)\n",
664 __func__, actual);
665 dev_kfree_skb_any(skb);
666 dev->netdev->stats.tx_errors++;
667 dev->netdev->stats.tx_dropped++;
668 netif_wake_queue(ndev);
669 }
670 spin_unlock_irqrestore(&dev->tx_lock, flags);
671
672 return NETDEV_TX_OK;
673}
674
675
676
677static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
678{
679 struct if_irda_req *irq = (struct if_irda_req *) rq;
680 struct sir_dev *dev = netdev_priv(ndev);
681 int ret = 0;
682
683 IRDA_ASSERT(dev != NULL, return -1;);
684
685 IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
686
687 switch (cmd) {
688 case SIOCSBANDWIDTH:
689 if (!capable(CAP_NET_ADMIN))
690 ret = -EPERM;
691 else
692 ret = sirdev_schedule_speed(dev, irq->ifr_baudrate);
693
694
695
696 break;
697
698 case SIOCSDONGLE:
699 if (!capable(CAP_NET_ADMIN))
700 ret = -EPERM;
701 else
702 ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle);
703
704
705
706 break;
707
708 case SIOCSMEDIABUSY:
709 if (!capable(CAP_NET_ADMIN))
710 ret = -EPERM;
711 else
712 irda_device_set_media_busy(dev->netdev, TRUE);
713 break;
714
715 case SIOCGRECEIVING:
716 irq->ifr_receiving = sirdev_is_receiving(dev);
717 break;
718
719 case SIOCSDTRRTS:
720 if (!capable(CAP_NET_ADMIN))
721 ret = -EPERM;
722 else
723 ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
724
725
726
727 break;
728
729 case SIOCSMODE:
730#if 0
731 if (!capable(CAP_NET_ADMIN))
732 ret = -EPERM;
733 else
734 ret = sirdev_schedule_mode(dev, irq->ifr_mode);
735
736
737
738 break;
739#endif
740 default:
741 ret = -EOPNOTSUPP;
742 }
743
744 return ret;
745}
746
747
748
749#define SIRBUF_ALLOCSIZE 4269
750
751static int sirdev_alloc_buffers(struct sir_dev *dev)
752{
753 dev->tx_buff.truesize = SIRBUF_ALLOCSIZE;
754 dev->rx_buff.truesize = IRDA_SKB_MAX_MTU;
755
756
757 dev->rx_buff.skb = __netdev_alloc_skb(dev->netdev, dev->rx_buff.truesize,
758 GFP_KERNEL);
759 if (dev->rx_buff.skb == NULL)
760 return -ENOMEM;
761 skb_reserve(dev->rx_buff.skb, 1);
762 dev->rx_buff.head = dev->rx_buff.skb->data;
763
764 dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL);
765 if (dev->tx_buff.head == NULL) {
766 kfree_skb(dev->rx_buff.skb);
767 dev->rx_buff.skb = NULL;
768 dev->rx_buff.head = NULL;
769 return -ENOMEM;
770 }
771
772 dev->tx_buff.data = dev->tx_buff.head;
773 dev->rx_buff.data = dev->rx_buff.head;
774 dev->tx_buff.len = 0;
775 dev->rx_buff.len = 0;
776
777 dev->rx_buff.in_frame = FALSE;
778 dev->rx_buff.state = OUTSIDE_FRAME;
779 return 0;
780};
781
782static void sirdev_free_buffers(struct sir_dev *dev)
783{
784 kfree_skb(dev->rx_buff.skb);
785 kfree(dev->tx_buff.head);
786 dev->rx_buff.head = dev->tx_buff.head = NULL;
787 dev->rx_buff.skb = NULL;
788}
789
790static int sirdev_open(struct net_device *ndev)
791{
792 struct sir_dev *dev = netdev_priv(ndev);
793 const struct sir_driver *drv = dev->drv;
794
795 if (!drv)
796 return -ENODEV;
797
798
799 if (!try_module_get(drv->owner))
800 return -ESTALE;
801
802 IRDA_DEBUG(2, "%s()\n", __func__);
803
804 if (sirdev_alloc_buffers(dev))
805 goto errout_dec;
806
807 if (!dev->drv->start_dev || dev->drv->start_dev(dev))
808 goto errout_free;
809
810 sirdev_enable_rx(dev);
811 dev->raw_tx = 0;
812
813 netif_start_queue(ndev);
814 dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname);
815 if (!dev->irlap)
816 goto errout_stop;
817
818 netif_wake_queue(ndev);
819
820 IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed);
821
822 return 0;
823
824errout_stop:
825 atomic_set(&dev->enable_rx, 0);
826 if (dev->drv->stop_dev)
827 dev->drv->stop_dev(dev);
828errout_free:
829 sirdev_free_buffers(dev);
830errout_dec:
831 module_put(drv->owner);
832 return -EAGAIN;
833}
834
835static int sirdev_close(struct net_device *ndev)
836{
837 struct sir_dev *dev = netdev_priv(ndev);
838 const struct sir_driver *drv;
839
840
841
842 netif_stop_queue(ndev);
843
844 down(&dev->fsm.sem);
845
846 atomic_set(&dev->enable_rx, 0);
847
848 if (unlikely(!dev->irlap))
849 goto out;
850 irlap_close(dev->irlap);
851 dev->irlap = NULL;
852
853 drv = dev->drv;
854 if (unlikely(!drv || !dev->priv))
855 goto out;
856
857 if (drv->stop_dev)
858 drv->stop_dev(dev);
859
860 sirdev_free_buffers(dev);
861 module_put(drv->owner);
862
863out:
864 dev->speed = 0;
865 up(&dev->fsm.sem);
866 return 0;
867}
868
869static const struct net_device_ops sirdev_ops = {
870 .ndo_start_xmit = sirdev_hard_xmit,
871 .ndo_open = sirdev_open,
872 .ndo_stop = sirdev_close,
873 .ndo_do_ioctl = sirdev_ioctl,
874};
875
876
877struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name)
878{
879 struct net_device *ndev;
880 struct sir_dev *dev;
881
882 IRDA_DEBUG(0, "%s - %s\n", __func__, name);
883
884
885
886
887
888 if (!drv || !drv->do_write)
889 return NULL;
890
891
892
893
894 ndev = alloc_irdadev(sizeof(*dev));
895 if (ndev == NULL) {
896 IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__);
897 goto out;
898 }
899 dev = netdev_priv(ndev);
900
901 irda_init_max_qos_capabilies(&dev->qos);
902 dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
903 dev->qos.min_turn_time.bits = drv->qos_mtt_bits;
904 irda_qos_bits_to_value(&dev->qos);
905
906 strncpy(dev->hwname, name, sizeof(dev->hwname)-1);
907
908 atomic_set(&dev->enable_rx, 0);
909 dev->tx_skb = NULL;
910
911 spin_lock_init(&dev->tx_lock);
912 sema_init(&dev->fsm.sem, 1);
913
914 dev->drv = drv;
915 dev->netdev = ndev;
916
917
918 ndev->netdev_ops = &sirdev_ops;
919
920 if (register_netdev(ndev)) {
921 IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
922 goto out_freenetdev;
923 }
924
925 return dev;
926
927out_freenetdev:
928 free_netdev(ndev);
929out:
930 return NULL;
931}
932EXPORT_SYMBOL(sirdev_get_instance);
933
934int sirdev_put_instance(struct sir_dev *dev)
935{
936 int err = 0;
937
938 IRDA_DEBUG(0, "%s\n", __func__);
939
940 atomic_set(&dev->enable_rx, 0);
941
942 netif_carrier_off(dev->netdev);
943 netif_device_detach(dev->netdev);
944
945 if (dev->dongle_drv)
946 err = sirdev_schedule_dongle_close(dev);
947 if (err)
948 IRDA_ERROR("%s - error %d\n", __func__, err);
949
950 sirdev_close(dev->netdev);
951
952 down(&dev->fsm.sem);
953 dev->fsm.state = SIRDEV_STATE_DEAD;
954 dev->dongle_drv = NULL;
955 dev->priv = NULL;
956 up(&dev->fsm.sem);
957
958
959 unregister_netdev(dev->netdev);
960
961 free_netdev(dev->netdev);
962
963 return 0;
964}
965EXPORT_SYMBOL(sirdev_put_instance);
966
967static int __init sir_wq_init(void)
968{
969 irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
970 if (!irda_sir_wq)
971 return -ENOMEM;
972 return 0;
973}
974
975static void __exit sir_wq_exit(void)
976{
977 destroy_workqueue(irda_sir_wq);
978}
979
980module_init(sir_wq_init);
981module_exit(sir_wq_exit);
982
983MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
984MODULE_DESCRIPTION("IrDA SIR core");
985MODULE_LICENSE("GPL");
986