1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/clk.h>
22#include <linux/module.h>
23#include <linux/netdevice.h>
24#include <linux/interrupt.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28
29#include <net/irda/irda.h>
30#include <net/irda/irmod.h>
31#include <net/irda/wrapper.h>
32#include <net/irda/irda_device.h>
33#include <asm/mach-au1x00/au1000.h>
34
35
36#define IR_RING_PTR_STATUS 0x00
37#define IR_RING_BASE_ADDR_H 0x04
38#define IR_RING_BASE_ADDR_L 0x08
39#define IR_RING_SIZE 0x0C
40#define IR_RING_PROMPT 0x10
41#define IR_RING_ADDR_CMPR 0x14
42#define IR_INT_CLEAR 0x18
43#define IR_CONFIG_1 0x20
44#define IR_SIR_FLAGS 0x24
45#define IR_STATUS 0x28
46#define IR_READ_PHY_CONFIG 0x2C
47#define IR_WRITE_PHY_CONFIG 0x30
48#define IR_MAX_PKT_LEN 0x34
49#define IR_RX_BYTE_CNT 0x38
50#define IR_CONFIG_2 0x3C
51#define IR_ENABLE 0x40
52
53
54#define IR_RX_INVERT_LED (1 << 0)
55#define IR_TX_INVERT_LED (1 << 1)
56#define IR_ST (1 << 2)
57#define IR_SF (1 << 3)
58#define IR_SIR (1 << 4)
59#define IR_MIR (1 << 5)
60#define IR_FIR (1 << 6)
61#define IR_16CRC (1 << 7)
62#define IR_TD (1 << 8)
63#define IR_RX_ALL (1 << 9)
64#define IR_DMA_ENABLE (1 << 10)
65#define IR_RX_ENABLE (1 << 11)
66#define IR_TX_ENABLE (1 << 12)
67#define IR_LOOPBACK (1 << 14)
68#define IR_SIR_MODE (IR_SIR | IR_DMA_ENABLE | \
69 IR_RX_ALL | IR_RX_ENABLE | IR_SF | \
70 IR_16CRC)
71
72
73#define IR_RX_STATUS (1 << 9)
74#define IR_TX_STATUS (1 << 10)
75#define IR_PHYEN (1 << 15)
76
77
78#define IR_BR(x) (((x) & 0x3f) << 10)
79#define IR_PW(x) (((x) & 0x1f) << 5)
80#define IR_P(x) ((x) & 0x1f)
81
82
83#define IR_MODE_INV (1 << 0)
84#define IR_ONE_PIN (1 << 1)
85#define IR_PHYCLK_40MHZ (0 << 2)
86#define IR_PHYCLK_48MHZ (1 << 2)
87#define IR_PHYCLK_56MHZ (2 << 2)
88#define IR_PHYCLK_64MHZ (3 << 2)
89#define IR_DP (1 << 4)
90#define IR_DA (1 << 5)
91#define IR_FLT_HIGH (0 << 6)
92#define IR_FLT_MEDHI (1 << 6)
93#define IR_FLT_MEDLO (2 << 6)
94#define IR_FLT_LO (3 << 6)
95#define IR_IEN (1 << 8)
96
97
98#define IR_HC (1 << 3)
99#define IR_CE (1 << 2)
100#define IR_C (1 << 1)
101#define IR_BE (1 << 0)
102
103#define NUM_IR_DESC 64
104#define RING_SIZE_4 0x0
105#define RING_SIZE_16 0x3
106#define RING_SIZE_64 0xF
107#define MAX_NUM_IR_DESC 64
108#define MAX_BUF_SIZE 2048
109
110
111#define AU_OWN (1 << 7)
112#define IR_DIS_CRC (1 << 6)
113#define IR_BAD_CRC (1 << 5)
114#define IR_NEED_PULSE (1 << 4)
115#define IR_FORCE_UNDER (1 << 3)
116#define IR_DISABLE_TX (1 << 2)
117#define IR_HW_UNDER (1 << 0)
118#define IR_TX_ERROR (IR_DIS_CRC | IR_BAD_CRC | IR_HW_UNDER)
119
120#define IR_PHY_ERROR (1 << 6)
121#define IR_CRC_ERROR (1 << 5)
122#define IR_MAX_LEN (1 << 4)
123#define IR_FIFO_OVER (1 << 3)
124#define IR_SIR_ERROR (1 << 2)
125#define IR_RX_ERROR (IR_PHY_ERROR | IR_CRC_ERROR | \
126 IR_MAX_LEN | IR_FIFO_OVER | IR_SIR_ERROR)
127
128struct db_dest {
129 struct db_dest *pnext;
130 volatile u32 *vaddr;
131 dma_addr_t dma_addr;
132};
133
134struct ring_dest {
135 u8 count_0;
136 u8 count_1;
137 u8 reserved;
138 u8 flags;
139 u8 addr_0;
140 u8 addr_1;
141 u8 addr_2;
142 u8 addr_3;
143};
144
145
146struct au1k_private {
147 void __iomem *iobase;
148 int irq_rx, irq_tx;
149
150 struct db_dest *pDBfree;
151 struct db_dest db[2 * NUM_IR_DESC];
152 volatile struct ring_dest *rx_ring[NUM_IR_DESC];
153 volatile struct ring_dest *tx_ring[NUM_IR_DESC];
154 struct db_dest *rx_db_inuse[NUM_IR_DESC];
155 struct db_dest *tx_db_inuse[NUM_IR_DESC];
156 u32 rx_head;
157 u32 tx_head;
158 u32 tx_tail;
159 u32 tx_full;
160
161 iobuff_t rx_buff;
162
163 struct net_device *netdev;
164 struct qos_info qos;
165 struct irlap_cb *irlap;
166
167 u8 open;
168 u32 speed;
169 u32 newspeed;
170
171 struct resource *ioarea;
172 struct au1k_irda_platform_data *platdata;
173 struct clk *irda_clk;
174};
175
176static int qos_mtt_bits = 0x07;
177
178static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode)
179{
180 if (p->platdata && p->platdata->set_phy_mode)
181 p->platdata->set_phy_mode(mode);
182}
183
184static inline unsigned long irda_read(struct au1k_private *p,
185 unsigned long ofs)
186{
187
188
189
190
191 (void)__raw_readl(p->iobase + ofs);
192 return __raw_readl(p->iobase + ofs);
193}
194
195static inline void irda_write(struct au1k_private *p, unsigned long ofs,
196 unsigned long val)
197{
198 __raw_writel(val, p->iobase + ofs);
199 wmb();
200}
201
202
203
204
205
206
207static struct db_dest *GetFreeDB(struct au1k_private *aup)
208{
209 struct db_dest *db;
210 db = aup->pDBfree;
211
212 if (db)
213 aup->pDBfree = db->pnext;
214 return db;
215}
216
217
218
219
220
221
222static void *dma_alloc(size_t size, dma_addr_t *dma_handle)
223{
224 void *ret;
225 int gfp = GFP_ATOMIC | GFP_DMA;
226
227 ret = (void *)__get_free_pages(gfp, get_order(size));
228
229 if (ret != NULL) {
230 memset(ret, 0, size);
231 *dma_handle = virt_to_bus(ret);
232 ret = (void *)KSEG0ADDR(ret);
233 }
234 return ret;
235}
236
237static void dma_free(void *vaddr, size_t size)
238{
239 vaddr = (void *)KSEG0ADDR(vaddr);
240 free_pages((unsigned long) vaddr, get_order(size));
241}
242
243
244static void setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
245{
246 int i;
247 for (i = 0; i < NUM_IR_DESC; i++) {
248 aup->rx_ring[i] = (volatile struct ring_dest *)
249 (rx_base + sizeof(struct ring_dest) * i);
250 }
251 for (i = 0; i < NUM_IR_DESC; i++) {
252 aup->tx_ring[i] = (volatile struct ring_dest *)
253 (tx_base + sizeof(struct ring_dest) * i);
254 }
255}
256
257static int au1k_irda_init_iobuf(iobuff_t *io, int size)
258{
259 io->head = kmalloc(size, GFP_KERNEL);
260 if (io->head != NULL) {
261 io->truesize = size;
262 io->in_frame = FALSE;
263 io->state = OUTSIDE_FRAME;
264 io->data = io->head;
265 }
266 return io->head ? 0 : -ENOMEM;
267}
268
269
270
271
272static int au1k_irda_set_speed(struct net_device *dev, int speed)
273{
274 struct au1k_private *aup = netdev_priv(dev);
275 volatile struct ring_dest *ptxd;
276 unsigned long control;
277 int ret = 0, timeout = 10, i;
278
279 if (speed == aup->speed)
280 return ret;
281
282
283 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
284 irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
285
286
287 irda_write(aup, IR_CONFIG_1,
288 irda_read(aup, IR_CONFIG_1) & ~(IR_RX_ENABLE | IR_TX_ENABLE));
289 msleep(20);
290 while (irda_read(aup, IR_STATUS) & (IR_RX_STATUS | IR_TX_STATUS)) {
291 msleep(20);
292 if (!timeout--) {
293 netdev_err(dev, "rx/tx disable timeout\n");
294 break;
295 }
296 }
297
298
299 irda_write(aup, IR_CONFIG_1,
300 irda_read(aup, IR_CONFIG_1) & ~IR_DMA_ENABLE);
301 msleep(20);
302
303
304 aup->tx_head = aup->tx_tail = aup->rx_head = 0;
305 for (i = 0; i < NUM_IR_DESC; i++) {
306 ptxd = aup->tx_ring[i];
307 ptxd->flags = 0;
308 ptxd->count_0 = 0;
309 ptxd->count_1 = 0;
310 }
311
312 for (i = 0; i < NUM_IR_DESC; i++) {
313 ptxd = aup->rx_ring[i];
314 ptxd->count_0 = 0;
315 ptxd->count_1 = 0;
316 ptxd->flags = AU_OWN;
317 }
318
319 if (speed == 4000000)
320 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_FIR);
321 else
322 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
323
324 switch (speed) {
325 case 9600:
326 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(11) | IR_PW(12));
327 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
328 break;
329 case 19200:
330 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(5) | IR_PW(12));
331 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
332 break;
333 case 38400:
334 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(2) | IR_PW(12));
335 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
336 break;
337 case 57600:
338 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(1) | IR_PW(12));
339 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
340 break;
341 case 115200:
342 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_PW(12));
343 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
344 break;
345 case 4000000:
346 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_P(15));
347 irda_write(aup, IR_CONFIG_1, IR_FIR | IR_DMA_ENABLE |
348 IR_RX_ENABLE);
349 break;
350 default:
351 netdev_err(dev, "unsupported speed %x\n", speed);
352 ret = -EINVAL;
353 break;
354 }
355
356 aup->speed = speed;
357 irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) | IR_PHYEN);
358
359 control = irda_read(aup, IR_STATUS);
360 irda_write(aup, IR_RING_PROMPT, 0);
361
362 if (control & (1 << 14)) {
363 netdev_err(dev, "configuration error\n");
364 } else {
365 if (control & (1 << 11))
366 netdev_debug(dev, "Valid SIR config\n");
367 if (control & (1 << 12))
368 netdev_debug(dev, "Valid MIR config\n");
369 if (control & (1 << 13))
370 netdev_debug(dev, "Valid FIR config\n");
371 if (control & (1 << 10))
372 netdev_debug(dev, "TX enabled\n");
373 if (control & (1 << 9))
374 netdev_debug(dev, "RX enabled\n");
375 }
376
377 return ret;
378}
379
380static void update_rx_stats(struct net_device *dev, u32 status, u32 count)
381{
382 struct net_device_stats *ps = &dev->stats;
383
384 ps->rx_packets++;
385
386 if (status & IR_RX_ERROR) {
387 ps->rx_errors++;
388 if (status & (IR_PHY_ERROR | IR_FIFO_OVER))
389 ps->rx_missed_errors++;
390 if (status & IR_MAX_LEN)
391 ps->rx_length_errors++;
392 if (status & IR_CRC_ERROR)
393 ps->rx_crc_errors++;
394 } else
395 ps->rx_bytes += count;
396}
397
398static void update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
399{
400 struct net_device_stats *ps = &dev->stats;
401
402 ps->tx_packets++;
403 ps->tx_bytes += pkt_len;
404
405 if (status & IR_TX_ERROR) {
406 ps->tx_errors++;
407 ps->tx_aborted_errors++;
408 }
409}
410
411static void au1k_tx_ack(struct net_device *dev)
412{
413 struct au1k_private *aup = netdev_priv(dev);
414 volatile struct ring_dest *ptxd;
415
416 ptxd = aup->tx_ring[aup->tx_tail];
417 while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
418 update_tx_stats(dev, ptxd->flags,
419 (ptxd->count_1 << 8) | ptxd->count_0);
420 ptxd->count_0 = 0;
421 ptxd->count_1 = 0;
422 wmb();
423 aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
424 ptxd = aup->tx_ring[aup->tx_tail];
425
426 if (aup->tx_full) {
427 aup->tx_full = 0;
428 netif_wake_queue(dev);
429 }
430 }
431
432 if (aup->tx_tail == aup->tx_head) {
433 if (aup->newspeed) {
434 au1k_irda_set_speed(dev, aup->newspeed);
435 aup->newspeed = 0;
436 } else {
437 irda_write(aup, IR_CONFIG_1,
438 irda_read(aup, IR_CONFIG_1) & ~IR_TX_ENABLE);
439 irda_write(aup, IR_CONFIG_1,
440 irda_read(aup, IR_CONFIG_1) | IR_RX_ENABLE);
441 irda_write(aup, IR_RING_PROMPT, 0);
442 }
443 }
444}
445
446static int au1k_irda_rx(struct net_device *dev)
447{
448 struct au1k_private *aup = netdev_priv(dev);
449 volatile struct ring_dest *prxd;
450 struct sk_buff *skb;
451 struct db_dest *pDB;
452 u32 flags, count;
453
454 prxd = aup->rx_ring[aup->rx_head];
455 flags = prxd->flags;
456
457 while (!(flags & AU_OWN)) {
458 pDB = aup->rx_db_inuse[aup->rx_head];
459 count = (prxd->count_1 << 8) | prxd->count_0;
460 if (!(flags & IR_RX_ERROR)) {
461
462 update_rx_stats(dev, flags, count);
463 skb = alloc_skb(count + 1, GFP_ATOMIC);
464 if (skb == NULL) {
465 dev->stats.rx_dropped++;
466 continue;
467 }
468 skb_reserve(skb, 1);
469 if (aup->speed == 4000000)
470 skb_put(skb, count);
471 else
472 skb_put(skb, count - 2);
473 skb_copy_to_linear_data(skb, (void *)pDB->vaddr,
474 count - 2);
475 skb->dev = dev;
476 skb_reset_mac_header(skb);
477 skb->protocol = htons(ETH_P_IRDA);
478 netif_rx(skb);
479 prxd->count_0 = 0;
480 prxd->count_1 = 0;
481 }
482 prxd->flags |= AU_OWN;
483 aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
484 irda_write(aup, IR_RING_PROMPT, 0);
485
486
487 prxd = aup->rx_ring[aup->rx_head];
488 flags = prxd->flags;
489
490 }
491 return 0;
492}
493
494static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
495{
496 struct net_device *dev = dev_id;
497 struct au1k_private *aup = netdev_priv(dev);
498
499 irda_write(aup, IR_INT_CLEAR, 0);
500
501 au1k_irda_rx(dev);
502 au1k_tx_ack(dev);
503
504 return IRQ_HANDLED;
505}
506
507static int au1k_init(struct net_device *dev)
508{
509 struct au1k_private *aup = netdev_priv(dev);
510 u32 enable, ring_address, phyck;
511 struct clk *c;
512 int i;
513
514 c = clk_get(NULL, "irda_clk");
515 if (IS_ERR(c))
516 return PTR_ERR(c);
517 i = clk_prepare_enable(c);
518 if (i) {
519 clk_put(c);
520 return i;
521 }
522
523 switch (clk_get_rate(c)) {
524 case 40000000:
525 phyck = IR_PHYCLK_40MHZ;
526 break;
527 case 48000000:
528 phyck = IR_PHYCLK_48MHZ;
529 break;
530 case 56000000:
531 phyck = IR_PHYCLK_56MHZ;
532 break;
533 case 64000000:
534 phyck = IR_PHYCLK_64MHZ;
535 break;
536 default:
537 clk_disable_unprepare(c);
538 clk_put(c);
539 return -EINVAL;
540 }
541 aup->irda_clk = c;
542
543 enable = IR_HC | IR_CE | IR_C;
544#ifndef CONFIG_CPU_LITTLE_ENDIAN
545 enable |= IR_BE;
546#endif
547 aup->tx_head = 0;
548 aup->tx_tail = 0;
549 aup->rx_head = 0;
550
551 for (i = 0; i < NUM_IR_DESC; i++)
552 aup->rx_ring[i]->flags = AU_OWN;
553
554 irda_write(aup, IR_ENABLE, enable);
555 msleep(20);
556
557
558 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
559 irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
560 msleep(20);
561
562 irda_write(aup, IR_MAX_PKT_LEN, MAX_BUF_SIZE);
563
564 ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
565 irda_write(aup, IR_RING_BASE_ADDR_H, ring_address >> 26);
566 irda_write(aup, IR_RING_BASE_ADDR_L, (ring_address >> 10) & 0xffff);
567
568 irda_write(aup, IR_RING_SIZE,
569 (RING_SIZE_64 << 8) | (RING_SIZE_64 << 12));
570
571 irda_write(aup, IR_CONFIG_2, phyck | IR_ONE_PIN);
572 irda_write(aup, IR_RING_ADDR_CMPR, 0);
573
574 au1k_irda_set_speed(dev, 9600);
575 return 0;
576}
577
578static int au1k_irda_start(struct net_device *dev)
579{
580 struct au1k_private *aup = netdev_priv(dev);
581 char hwname[32];
582 int retval;
583
584 retval = au1k_init(dev);
585 if (retval) {
586 netdev_err(dev, "error in au1k_init\n");
587 return retval;
588 }
589
590 retval = request_irq(aup->irq_tx, &au1k_irda_interrupt, 0,
591 dev->name, dev);
592 if (retval) {
593 netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
594 return retval;
595 }
596 retval = request_irq(aup->irq_rx, &au1k_irda_interrupt, 0,
597 dev->name, dev);
598 if (retval) {
599 free_irq(aup->irq_tx, dev);
600 netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
601 return retval;
602 }
603
604
605 sprintf(hwname, "Au1000 SIR/FIR");
606 aup->irlap = irlap_open(dev, &aup->qos, hwname);
607 netif_start_queue(dev);
608
609
610 irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) | IR_IEN);
611
612
613 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
614
615 return 0;
616}
617
618static int au1k_irda_stop(struct net_device *dev)
619{
620 struct au1k_private *aup = netdev_priv(dev);
621
622 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
623
624
625 irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) & ~IR_IEN);
626 irda_write(aup, IR_CONFIG_1, 0);
627 irda_write(aup, IR_ENABLE, 0);
628
629 if (aup->irlap) {
630 irlap_close(aup->irlap);
631 aup->irlap = NULL;
632 }
633
634 netif_stop_queue(dev);
635
636
637 free_irq(aup->irq_tx, dev);
638 free_irq(aup->irq_rx, dev);
639
640 clk_disable_unprepare(aup->irda_clk);
641 clk_put(aup->irda_clk);
642
643 return 0;
644}
645
646
647
648
649static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
650{
651 struct au1k_private *aup = netdev_priv(dev);
652 int speed = irda_get_next_speed(skb);
653 volatile struct ring_dest *ptxd;
654 struct db_dest *pDB;
655 u32 len, flags;
656
657 if (speed != aup->speed && speed != -1)
658 aup->newspeed = speed;
659
660 if ((skb->len == 0) && (aup->newspeed)) {
661 if (aup->tx_tail == aup->tx_head) {
662 au1k_irda_set_speed(dev, speed);
663 aup->newspeed = 0;
664 }
665 dev_kfree_skb(skb);
666 return NETDEV_TX_OK;
667 }
668
669 ptxd = aup->tx_ring[aup->tx_head];
670 flags = ptxd->flags;
671
672 if (flags & AU_OWN) {
673 netdev_debug(dev, "tx_full\n");
674 netif_stop_queue(dev);
675 aup->tx_full = 1;
676 return 1;
677 } else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
678 netdev_debug(dev, "tx_full\n");
679 netif_stop_queue(dev);
680 aup->tx_full = 1;
681 return 1;
682 }
683
684 pDB = aup->tx_db_inuse[aup->tx_head];
685
686#if 0
687 if (irda_read(aup, IR_RX_BYTE_CNT) != 0) {
688 netdev_debug(dev, "tx warning: rx byte cnt %x\n",
689 irda_read(aup, IR_RX_BYTE_CNT));
690 }
691#endif
692
693 if (aup->speed == 4000000) {
694
695 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
696 ptxd->count_0 = skb->len & 0xff;
697 ptxd->count_1 = (skb->len >> 8) & 0xff;
698 } else {
699
700 len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
701 ptxd->count_0 = len & 0xff;
702 ptxd->count_1 = (len >> 8) & 0xff;
703 ptxd->flags |= IR_DIS_CRC;
704 }
705 ptxd->flags |= AU_OWN;
706 wmb();
707
708 irda_write(aup, IR_CONFIG_1,
709 irda_read(aup, IR_CONFIG_1) | IR_TX_ENABLE);
710 irda_write(aup, IR_RING_PROMPT, 0);
711
712 dev_kfree_skb(skb);
713 aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
714 return NETDEV_TX_OK;
715}
716
717
718
719
720
721static void au1k_tx_timeout(struct net_device *dev)
722{
723 u32 speed;
724 struct au1k_private *aup = netdev_priv(dev);
725
726 netdev_err(dev, "tx timeout\n");
727 speed = aup->speed;
728 aup->speed = 0;
729 au1k_irda_set_speed(dev, speed);
730 aup->tx_full = 0;
731 netif_wake_queue(dev);
732}
733
734static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
735{
736 struct if_irda_req *rq = (struct if_irda_req *)ifreq;
737 struct au1k_private *aup = netdev_priv(dev);
738 int ret = -EOPNOTSUPP;
739
740 switch (cmd) {
741 case SIOCSBANDWIDTH:
742 if (capable(CAP_NET_ADMIN)) {
743
744
745
746
747 if (aup->open)
748 ret = au1k_irda_set_speed(dev,
749 rq->ifr_baudrate);
750 else {
751 netdev_err(dev, "ioctl: !netif_running\n");
752 ret = 0;
753 }
754 }
755 break;
756
757 case SIOCSMEDIABUSY:
758 ret = -EPERM;
759 if (capable(CAP_NET_ADMIN)) {
760 irda_device_set_media_busy(dev, TRUE);
761 ret = 0;
762 }
763 break;
764
765 case SIOCGRECEIVING:
766 rq->ifr_receiving = 0;
767 break;
768 default:
769 break;
770 }
771 return ret;
772}
773
774static const struct net_device_ops au1k_irda_netdev_ops = {
775 .ndo_open = au1k_irda_start,
776 .ndo_stop = au1k_irda_stop,
777 .ndo_start_xmit = au1k_irda_hard_xmit,
778 .ndo_tx_timeout = au1k_tx_timeout,
779 .ndo_do_ioctl = au1k_irda_ioctl,
780};
781
782static int au1k_irda_net_init(struct net_device *dev)
783{
784 struct au1k_private *aup = netdev_priv(dev);
785 struct db_dest *pDB, *pDBfree;
786 int i, err, retval = 0;
787 dma_addr_t temp;
788
789 err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
790 if (err)
791 goto out1;
792
793 dev->netdev_ops = &au1k_irda_netdev_ops;
794
795 irda_init_max_qos_capabilies(&aup->qos);
796
797
798 aup->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 |
799 IR_57600 | IR_115200 | IR_576000 | (IR_4000000 << 8);
800
801 aup->qos.min_turn_time.bits = qos_mtt_bits;
802 irda_qos_bits_to_value(&aup->qos);
803
804 retval = -ENOMEM;
805
806
807
808 aup->rx_ring[0] = (struct ring_dest *)
809 dma_alloc(2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)),
810 &temp);
811 if (!aup->rx_ring[0])
812 goto out2;
813
814
815 aup->db[0].vaddr =
816 dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
817 if (!aup->db[0].vaddr)
818 goto out3;
819
820 setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
821
822 pDBfree = NULL;
823 pDB = aup->db;
824 for (i = 0; i < (2 * NUM_IR_DESC); i++) {
825 pDB->pnext = pDBfree;
826 pDBfree = pDB;
827 pDB->vaddr =
828 (u32 *)((unsigned)aup->db[0].vaddr + (MAX_BUF_SIZE * i));
829 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
830 pDB++;
831 }
832 aup->pDBfree = pDBfree;
833
834
835 for (i = 0; i < NUM_IR_DESC; i++) {
836 pDB = GetFreeDB(aup);
837 if (!pDB)
838 goto out3;
839 aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
840 aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
841 aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
842 aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
843 aup->rx_db_inuse[i] = pDB;
844 }
845 for (i = 0; i < NUM_IR_DESC; i++) {
846 pDB = GetFreeDB(aup);
847 if (!pDB)
848 goto out3;
849 aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
850 aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
851 aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
852 aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
853 aup->tx_ring[i]->count_0 = 0;
854 aup->tx_ring[i]->count_1 = 0;
855 aup->tx_ring[i]->flags = 0;
856 aup->tx_db_inuse[i] = pDB;
857 }
858
859 return 0;
860
861out3:
862 dma_free((void *)aup->rx_ring[0],
863 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
864out2:
865 kfree(aup->rx_buff.head);
866out1:
867 netdev_err(dev, "au1k_irda_net_init() failed. Returns %d\n");
868 return retval;
869}
870
871static int au1k_irda_probe(struct platform_device *pdev)
872{
873 struct au1k_private *aup;
874 struct net_device *dev;
875 struct resource *r;
876 struct clk *c;
877 int err;
878
879 dev = alloc_irdadev(sizeof(struct au1k_private));
880 if (!dev)
881 return -ENOMEM;
882
883 aup = netdev_priv(dev);
884
885 aup->platdata = pdev->dev.platform_data;
886
887 err = -EINVAL;
888 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
889 if (!r)
890 goto out;
891
892 aup->irq_tx = r->start;
893
894 r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
895 if (!r)
896 goto out;
897
898 aup->irq_rx = r->start;
899
900 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
901 if (!r)
902 goto out;
903
904 err = -EBUSY;
905 aup->ioarea = request_mem_region(r->start, resource_size(r),
906 pdev->name);
907 if (!aup->ioarea)
908 goto out;
909
910
911 c = clk_get(NULL, "irda_clk");
912 if (IS_ERR(c)) {
913 err = PTR_ERR(c);
914 goto out;
915 }
916 clk_put(c);
917
918 aup->iobase = ioremap_nocache(r->start, resource_size(r));
919 if (!aup->iobase)
920 goto out2;
921
922 dev->irq = aup->irq_rx;
923
924 err = au1k_irda_net_init(dev);
925 if (err)
926 goto out3;
927 err = register_netdev(dev);
928 if (err)
929 goto out4;
930
931 platform_set_drvdata(pdev, dev);
932
933 netdev_info(dev, "IrDA: Registered device\n");
934 return 0;
935
936out4:
937 dma_free((void *)aup->db[0].vaddr,
938 MAX_BUF_SIZE * 2 * NUM_IR_DESC);
939 dma_free((void *)aup->rx_ring[0],
940 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
941 kfree(aup->rx_buff.head);
942out3:
943 iounmap(aup->iobase);
944out2:
945 release_resource(aup->ioarea);
946 kfree(aup->ioarea);
947out:
948 free_netdev(dev);
949 return err;
950}
951
952static int au1k_irda_remove(struct platform_device *pdev)
953{
954 struct net_device *dev = platform_get_drvdata(pdev);
955 struct au1k_private *aup = netdev_priv(dev);
956
957 unregister_netdev(dev);
958
959 dma_free((void *)aup->db[0].vaddr,
960 MAX_BUF_SIZE * 2 * NUM_IR_DESC);
961 dma_free((void *)aup->rx_ring[0],
962 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
963 kfree(aup->rx_buff.head);
964
965 iounmap(aup->iobase);
966 release_resource(aup->ioarea);
967 kfree(aup->ioarea);
968
969 free_netdev(dev);
970
971 return 0;
972}
973
974static struct platform_driver au1k_irda_driver = {
975 .driver = {
976 .name = "au1000-irda",
977 },
978 .probe = au1k_irda_probe,
979 .remove = au1k_irda_remove,
980};
981
982module_platform_driver(au1k_irda_driver);
983
984MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
985MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
986