1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/interrupt.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/platform_device.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/ethtool.h>
33#include <linux/ks8842.h>
34#include <linux/dmaengine.h>
35#include <linux/dma-mapping.h>
36#include <linux/scatterlist.h>
37
38#define DRV_NAME "ks8842"
39
40
41#define REG_TIMB_RST 0x1c
42#define REG_TIMB_FIFO 0x20
43#define REG_TIMB_ISR 0x24
44#define REG_TIMB_IER 0x28
45#define REG_TIMB_IAR 0x2C
46#define REQ_TIMB_DMA_RESUME 0x30
47
48
49
50#define REG_SELECT_BANK 0x0e
51
52
53#define REG_QRFCR 0x04
54
55
56#define REG_MARL 0x00
57#define REG_MARM 0x02
58#define REG_MARH 0x04
59
60
61#define REG_GRR 0x06
62
63
64#define REG_TXCR 0x00
65#define REG_TXSR 0x02
66#define REG_RXCR 0x04
67#define REG_TXMIR 0x08
68#define REG_RXMIR 0x0A
69
70
71#define REG_TXQCR 0x00
72#define REG_RXQCR 0x02
73#define REG_TXFDPR 0x04
74#define REG_RXFDPR 0x06
75#define REG_QMU_DATA_LO 0x08
76#define REG_QMU_DATA_HI 0x0A
77
78
79#define REG_IER 0x00
80#define IRQ_LINK_CHANGE 0x8000
81#define IRQ_TX 0x4000
82#define IRQ_RX 0x2000
83#define IRQ_RX_OVERRUN 0x0800
84#define IRQ_TX_STOPPED 0x0200
85#define IRQ_RX_STOPPED 0x0100
86#define IRQ_RX_ERROR 0x0080
87#define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
88 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
89
90
91
92
93
94
95#define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
96 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
97#define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX)
98#define REG_ISR 0x02
99#define REG_RXSR 0x04
100#define RXSR_VALID 0x8000
101#define RXSR_BROADCAST 0x80
102#define RXSR_MULTICAST 0x40
103#define RXSR_UNICAST 0x20
104#define RXSR_FRAMETYPE 0x08
105#define RXSR_TOO_LONG 0x04
106#define RXSR_RUNT 0x02
107#define RXSR_CRC_ERROR 0x01
108#define RXSR_ERROR (RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR)
109
110
111#define REG_SW_ID_AND_ENABLE 0x00
112#define REG_SGCR1 0x02
113#define REG_SGCR2 0x04
114#define REG_SGCR3 0x06
115
116
117#define REG_MACAR1 0x00
118#define REG_MACAR2 0x02
119#define REG_MACAR3 0x04
120
121
122#define REG_P1MBCR 0x00
123#define REG_P1MBSR 0x02
124
125
126#define REG_P2MBCR 0x00
127#define REG_P2MBSR 0x02
128
129
130#define REG_P1CR2 0x02
131
132
133#define REG_P1CR4 0x02
134#define REG_P1SR 0x04
135
136
137#define MICREL_KS884X 0x01
138#define KS884X_16BIT 0x02
139
140#define DMA_BUFFER_SIZE 2048
141
142struct ks8842_tx_dma_ctl {
143 struct dma_chan *chan;
144 struct dma_async_tx_descriptor *adesc;
145 void *buf;
146 struct scatterlist sg;
147 int channel;
148};
149
150struct ks8842_rx_dma_ctl {
151 struct dma_chan *chan;
152 struct dma_async_tx_descriptor *adesc;
153 struct sk_buff *skb;
154 struct scatterlist sg;
155 struct tasklet_struct tasklet;
156 int channel;
157};
158
159#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
160 ((adapter)->dma_rx.channel != -1))
161
162struct ks8842_adapter {
163 void __iomem *hw_addr;
164 int irq;
165 unsigned long conf_flags;
166 struct tasklet_struct tasklet;
167 spinlock_t lock;
168 struct work_struct timeout_work;
169 struct net_device *netdev;
170 struct device *dev;
171 struct ks8842_tx_dma_ctl dma_tx;
172 struct ks8842_rx_dma_ctl dma_rx;
173};
174
175static void ks8842_dma_rx_cb(void *data);
176static void ks8842_dma_tx_cb(void *data);
177
178static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
179{
180 iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
181}
182
183static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
184{
185 iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
186}
187
188static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank,
189 u8 value, int offset)
190{
191 ks8842_select_bank(adapter, bank);
192 iowrite8(value, adapter->hw_addr + offset);
193}
194
195static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank,
196 u16 value, int offset)
197{
198 ks8842_select_bank(adapter, bank);
199 iowrite16(value, adapter->hw_addr + offset);
200}
201
202static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank,
203 u16 bits, int offset)
204{
205 u16 reg;
206 ks8842_select_bank(adapter, bank);
207 reg = ioread16(adapter->hw_addr + offset);
208 reg |= bits;
209 iowrite16(reg, adapter->hw_addr + offset);
210}
211
212static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank,
213 u16 bits, int offset)
214{
215 u16 reg;
216 ks8842_select_bank(adapter, bank);
217 reg = ioread16(adapter->hw_addr + offset);
218 reg &= ~bits;
219 iowrite16(reg, adapter->hw_addr + offset);
220}
221
222static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank,
223 u32 value, int offset)
224{
225 ks8842_select_bank(adapter, bank);
226 iowrite32(value, adapter->hw_addr + offset);
227}
228
229static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank,
230 int offset)
231{
232 ks8842_select_bank(adapter, bank);
233 return ioread8(adapter->hw_addr + offset);
234}
235
236static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank,
237 int offset)
238{
239 ks8842_select_bank(adapter, bank);
240 return ioread16(adapter->hw_addr + offset);
241}
242
243static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank,
244 int offset)
245{
246 ks8842_select_bank(adapter, bank);
247 return ioread32(adapter->hw_addr + offset);
248}
249
250static void ks8842_reset(struct ks8842_adapter *adapter)
251{
252 if (adapter->conf_flags & MICREL_KS884X) {
253 ks8842_write16(adapter, 3, 1, REG_GRR);
254 msleep(10);
255 iowrite16(0, adapter->hw_addr + REG_GRR);
256 } else {
257
258
259
260
261
262
263
264 iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
265 msleep(20);
266 }
267}
268
269static void ks8842_update_link_status(struct net_device *netdev,
270 struct ks8842_adapter *adapter)
271{
272
273 if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) {
274 netif_carrier_on(netdev);
275 netif_wake_queue(netdev);
276 } else {
277 netif_stop_queue(netdev);
278 netif_carrier_off(netdev);
279 }
280}
281
282static void ks8842_enable_tx(struct ks8842_adapter *adapter)
283{
284 ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR);
285}
286
287static void ks8842_disable_tx(struct ks8842_adapter *adapter)
288{
289 ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR);
290}
291
292static void ks8842_enable_rx(struct ks8842_adapter *adapter)
293{
294 ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR);
295}
296
297static void ks8842_disable_rx(struct ks8842_adapter *adapter)
298{
299 ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR);
300}
301
302static void ks8842_reset_hw(struct ks8842_adapter *adapter)
303{
304
305 ks8842_reset(adapter);
306
307
308 ks8842_write16(adapter, 16, 0x000E, REG_TXCR);
309
310
311
312 ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400,
313 REG_RXCR);
314
315
316 ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR);
317
318
319 ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR);
320
321
322 ks8842_write16(adapter, 0, 0x1000, REG_QRFCR);
323
324
325 ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1);
326
327
328 ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
329
330
331 ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2);
332
333
334 ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
335
336
337 ks8842_enable_tx(adapter);
338
339
340 ks8842_enable_rx(adapter);
341
342
343 ks8842_write16(adapter, 18, 0xffff, REG_ISR);
344
345
346 if (KS8842_USE_DMA(adapter)) {
347
348
349
350
351 iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
352 ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
353 } else {
354 if (!(adapter->conf_flags & MICREL_KS884X))
355 iowrite16(ENABLED_IRQS,
356 adapter->hw_addr + REG_TIMB_IER);
357 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
358 }
359
360 ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
361}
362
363static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
364{
365 int i;
366 u16 mac;
367
368 for (i = 0; i < ETH_ALEN; i++)
369 dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
370
371 if (adapter->conf_flags & MICREL_KS884X) {
372
373
374
375
376
377 mac = ks8842_read16(adapter, 2, REG_MARL);
378 ks8842_write16(adapter, 39, mac, REG_MACAR3);
379 mac = ks8842_read16(adapter, 2, REG_MARM);
380 ks8842_write16(adapter, 39, mac, REG_MACAR2);
381 mac = ks8842_read16(adapter, 2, REG_MARH);
382 ks8842_write16(adapter, 39, mac, REG_MACAR1);
383 } else {
384
385
386 mac = ks8842_read16(adapter, 2, REG_MARL);
387 ks8842_write16(adapter, 39, mac, REG_MACAR1);
388 mac = ks8842_read16(adapter, 2, REG_MARM);
389 ks8842_write16(adapter, 39, mac, REG_MACAR2);
390 mac = ks8842_read16(adapter, 2, REG_MARH);
391 ks8842_write16(adapter, 39, mac, REG_MACAR3);
392 }
393}
394
395static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
396{
397 unsigned long flags;
398 unsigned i;
399
400 spin_lock_irqsave(&adapter->lock, flags);
401 for (i = 0; i < ETH_ALEN; i++) {
402 ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
403 if (!(adapter->conf_flags & MICREL_KS884X))
404 ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
405 REG_MACAR1 + i);
406 }
407
408 if (adapter->conf_flags & MICREL_KS884X) {
409
410
411
412
413
414 u16 mac;
415
416 mac = ks8842_read16(adapter, 2, REG_MARL);
417 ks8842_write16(adapter, 39, mac, REG_MACAR3);
418 mac = ks8842_read16(adapter, 2, REG_MARM);
419 ks8842_write16(adapter, 39, mac, REG_MACAR2);
420 mac = ks8842_read16(adapter, 2, REG_MARH);
421 ks8842_write16(adapter, 39, mac, REG_MACAR1);
422 }
423 spin_unlock_irqrestore(&adapter->lock, flags);
424}
425
426static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
427{
428 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
429}
430
431static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
432{
433 struct ks8842_adapter *adapter = netdev_priv(netdev);
434 struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
435 u8 *buf = ctl->buf;
436
437 if (ctl->adesc) {
438 netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
439
440 return NETDEV_TX_BUSY;
441 }
442
443 sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
444
445
446
447 *buf++ = 0x00;
448 *buf++ = 0x01;
449 *buf++ = skb->len & 0xff;
450 *buf++ = (skb->len >> 8) & 0xff;
451 skb_copy_from_linear_data(skb, buf, skb->len);
452
453 dma_sync_single_range_for_device(adapter->dev,
454 sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
455 DMA_TO_DEVICE);
456
457
458 if (sg_dma_len(&ctl->sg) % 4)
459 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
460
461 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
462 &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
463 if (!ctl->adesc)
464 return NETDEV_TX_BUSY;
465
466 ctl->adesc->callback_param = netdev;
467 ctl->adesc->callback = ks8842_dma_tx_cb;
468 ctl->adesc->tx_submit(ctl->adesc);
469
470 netdev->stats.tx_bytes += skb->len;
471
472 dev_kfree_skb(skb);
473
474 return NETDEV_TX_OK;
475}
476
477static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
478{
479 struct ks8842_adapter *adapter = netdev_priv(netdev);
480 int len = skb->len;
481
482 netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n",
483 __func__, skb->len, skb->head, skb->data,
484 skb_tail_pointer(skb), skb_end_pointer(skb));
485
486
487 if (ks8842_tx_fifo_space(adapter) < len + 8)
488 return NETDEV_TX_BUSY;
489
490 if (adapter->conf_flags & KS884X_16BIT) {
491 u16 *ptr16 = (u16 *)skb->data;
492 ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO);
493 ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI);
494 netdev->stats.tx_bytes += len;
495
496
497 while (len > 0) {
498 iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO);
499 iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI);
500 len -= sizeof(u32);
501 }
502 } else {
503
504 u32 *ptr = (u32 *)skb->data;
505 u32 ctrl;
506
507 ctrl = 0x8000 | 0x100 | (len << 16);
508 ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);
509
510 netdev->stats.tx_bytes += len;
511
512
513 while (len > 0) {
514 iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
515 len -= sizeof(u32);
516 ptr++;
517 }
518 }
519
520
521 ks8842_write16(adapter, 17, 1, REG_TXQCR);
522
523 dev_kfree_skb(skb);
524
525 return NETDEV_TX_OK;
526}
527
528static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status)
529{
530 netdev_dbg(netdev, "RX error, status: %x\n", status);
531
532 netdev->stats.rx_errors++;
533 if (status & RXSR_TOO_LONG)
534 netdev->stats.rx_length_errors++;
535 if (status & RXSR_CRC_ERROR)
536 netdev->stats.rx_crc_errors++;
537 if (status & RXSR_RUNT)
538 netdev->stats.rx_frame_errors++;
539}
540
541static void ks8842_update_rx_counters(struct net_device *netdev, u32 status,
542 int len)
543{
544 netdev_dbg(netdev, "RX packet, len: %d\n", len);
545
546 netdev->stats.rx_packets++;
547 netdev->stats.rx_bytes += len;
548 if (status & RXSR_MULTICAST)
549 netdev->stats.multicast++;
550}
551
552static int __ks8842_start_new_rx_dma(struct net_device *netdev)
553{
554 struct ks8842_adapter *adapter = netdev_priv(netdev);
555 struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
556 struct scatterlist *sg = &ctl->sg;
557 int err;
558
559 ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
560 if (ctl->skb) {
561 sg_init_table(sg, 1);
562 sg_dma_address(sg) = dma_map_single(adapter->dev,
563 ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
564 err = dma_mapping_error(adapter->dev, sg_dma_address(sg));
565 if (unlikely(err)) {
566 sg_dma_address(sg) = 0;
567 goto out;
568 }
569
570 sg_dma_len(sg) = DMA_BUFFER_SIZE;
571
572 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
573 sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
574
575 if (!ctl->adesc)
576 goto out;
577
578 ctl->adesc->callback_param = netdev;
579 ctl->adesc->callback = ks8842_dma_rx_cb;
580 ctl->adesc->tx_submit(ctl->adesc);
581 } else {
582 err = -ENOMEM;
583 sg_dma_address(sg) = 0;
584 goto out;
585 }
586
587 return err;
588out:
589 if (sg_dma_address(sg))
590 dma_unmap_single(adapter->dev, sg_dma_address(sg),
591 DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
592 sg_dma_address(sg) = 0;
593 if (ctl->skb)
594 dev_kfree_skb(ctl->skb);
595
596 ctl->skb = NULL;
597
598 printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
599 return err;
600}
601
602static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
603{
604 struct net_device *netdev = (struct net_device *)arg;
605 struct ks8842_adapter *adapter = netdev_priv(netdev);
606 struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
607 struct sk_buff *skb = ctl->skb;
608 dma_addr_t addr = sg_dma_address(&ctl->sg);
609 u32 status;
610
611 ctl->adesc = NULL;
612
613
614 __ks8842_start_new_rx_dma(netdev);
615
616
617 dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
618
619 status = *((u32 *)skb->data);
620
621 netdev_dbg(netdev, "%s - rx_data: status: %x\n",
622 __func__, status & 0xffff);
623
624
625 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
626 int len = (status >> 16) & 0x7ff;
627
628 ks8842_update_rx_counters(netdev, status, len);
629
630
631 skb_reserve(skb, 4);
632 skb_put(skb, len);
633
634 skb->protocol = eth_type_trans(skb, netdev);
635 netif_rx(skb);
636 } else {
637 ks8842_update_rx_err_counters(netdev, status);
638 dev_kfree_skb(skb);
639 }
640}
641
642static void ks8842_rx_frame(struct net_device *netdev,
643 struct ks8842_adapter *adapter)
644{
645 u32 status;
646 int len;
647
648 if (adapter->conf_flags & KS884X_16BIT) {
649 status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO);
650 len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI);
651 netdev_dbg(netdev, "%s - rx_data: status: %x\n",
652 __func__, status);
653 } else {
654 status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO);
655 len = (status >> 16) & 0x7ff;
656 status &= 0xffff;
657 netdev_dbg(netdev, "%s - rx_data: status: %x\n",
658 __func__, status);
659 }
660
661
662 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
663 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
664
665 if (skb) {
666
667 ks8842_update_rx_counters(netdev, status, len);
668
669 if (adapter->conf_flags & KS884X_16BIT) {
670 u16 *data16 = (u16 *)skb_put(skb, len);
671 ks8842_select_bank(adapter, 17);
672 while (len > 0) {
673 *data16++ = ioread16(adapter->hw_addr +
674 REG_QMU_DATA_LO);
675 *data16++ = ioread16(adapter->hw_addr +
676 REG_QMU_DATA_HI);
677 len -= sizeof(u32);
678 }
679 } else {
680 u32 *data = (u32 *)skb_put(skb, len);
681
682 ks8842_select_bank(adapter, 17);
683 while (len > 0) {
684 *data++ = ioread32(adapter->hw_addr +
685 REG_QMU_DATA_LO);
686 len -= sizeof(u32);
687 }
688 }
689 skb->protocol = eth_type_trans(skb, netdev);
690 netif_rx(skb);
691 } else
692 netdev->stats.rx_dropped++;
693 } else
694 ks8842_update_rx_err_counters(netdev, status);
695
696
697 ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
698
699
700 ks8842_write16(adapter, 17, 0x01, REG_RXQCR);
701
702
703 ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
704}
705
706static void ks8842_handle_rx(struct net_device *netdev,
707 struct ks8842_adapter *adapter)
708{
709 u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
710 netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
711 while (rx_data) {
712 ks8842_rx_frame(netdev, adapter);
713 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
714 }
715}
716
717static void ks8842_handle_tx(struct net_device *netdev,
718 struct ks8842_adapter *adapter)
719{
720 u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
721 netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
722 netdev->stats.tx_packets++;
723 if (netif_queue_stopped(netdev))
724 netif_wake_queue(netdev);
725}
726
727static void ks8842_handle_rx_overrun(struct net_device *netdev,
728 struct ks8842_adapter *adapter)
729{
730 netdev_dbg(netdev, "%s: entry\n", __func__);
731 netdev->stats.rx_errors++;
732 netdev->stats.rx_fifo_errors++;
733}
734
735static void ks8842_tasklet(unsigned long arg)
736{
737 struct net_device *netdev = (struct net_device *)arg;
738 struct ks8842_adapter *adapter = netdev_priv(netdev);
739 u16 isr;
740 unsigned long flags;
741 u16 entry_bank;
742
743
744 spin_lock_irqsave(&adapter->lock, flags);
745 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
746 spin_unlock_irqrestore(&adapter->lock, flags);
747
748 isr = ks8842_read16(adapter, 18, REG_ISR);
749 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
750
751
752
753
754 if (KS8842_USE_DMA(adapter))
755 isr &= ~IRQ_RX;
756
757
758 ks8842_write16(adapter, 18, isr, REG_ISR);
759
760 if (!(adapter->conf_flags & MICREL_KS884X))
761
762 iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
763
764 if (!netif_running(netdev))
765 return;
766
767 if (isr & IRQ_LINK_CHANGE)
768 ks8842_update_link_status(netdev, adapter);
769
770
771 if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter))
772 ks8842_handle_rx(netdev, adapter);
773
774
775 if (isr & IRQ_TX)
776 ks8842_handle_tx(netdev, adapter);
777
778 if (isr & IRQ_RX_OVERRUN)
779 ks8842_handle_rx_overrun(netdev, adapter);
780
781 if (isr & IRQ_TX_STOPPED) {
782 ks8842_disable_tx(adapter);
783 ks8842_enable_tx(adapter);
784 }
785
786 if (isr & IRQ_RX_STOPPED) {
787 ks8842_disable_rx(adapter);
788 ks8842_enable_rx(adapter);
789 }
790
791
792 spin_lock_irqsave(&adapter->lock, flags);
793 if (KS8842_USE_DMA(adapter))
794 ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
795 else
796 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
797 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
798
799
800
801 if (KS8842_USE_DMA(adapter))
802 ks8842_resume_dma(adapter);
803
804 spin_unlock_irqrestore(&adapter->lock, flags);
805}
806
807static irqreturn_t ks8842_irq(int irq, void *devid)
808{
809 struct net_device *netdev = devid;
810 struct ks8842_adapter *adapter = netdev_priv(netdev);
811 u16 isr;
812 u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
813 irqreturn_t ret = IRQ_NONE;
814
815 isr = ks8842_read16(adapter, 18, REG_ISR);
816 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
817
818 if (isr) {
819 if (KS8842_USE_DMA(adapter))
820
821 ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
822 else
823
824 ks8842_write16(adapter, 18, 0x00, REG_IER);
825
826
827 tasklet_schedule(&adapter->tasklet);
828
829 ret = IRQ_HANDLED;
830 }
831
832 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
833
834
835
836
837 ks8842_resume_dma(adapter);
838
839 return ret;
840}
841
842static void ks8842_dma_rx_cb(void *data)
843{
844 struct net_device *netdev = data;
845 struct ks8842_adapter *adapter = netdev_priv(netdev);
846
847 netdev_dbg(netdev, "RX DMA finished\n");
848
849 if (adapter->dma_rx.adesc)
850 tasklet_schedule(&adapter->dma_rx.tasklet);
851}
852
853static void ks8842_dma_tx_cb(void *data)
854{
855 struct net_device *netdev = data;
856 struct ks8842_adapter *adapter = netdev_priv(netdev);
857 struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
858
859 netdev_dbg(netdev, "TX DMA finished\n");
860
861 if (!ctl->adesc)
862 return;
863
864 netdev->stats.tx_packets++;
865 ctl->adesc = NULL;
866
867 if (netif_queue_stopped(netdev))
868 netif_wake_queue(netdev);
869}
870
871static void ks8842_stop_dma(struct ks8842_adapter *adapter)
872{
873 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
874 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
875
876 tx_ctl->adesc = NULL;
877 if (tx_ctl->chan)
878 tx_ctl->chan->device->device_control(tx_ctl->chan,
879 DMA_TERMINATE_ALL, 0);
880
881 rx_ctl->adesc = NULL;
882 if (rx_ctl->chan)
883 rx_ctl->chan->device->device_control(rx_ctl->chan,
884 DMA_TERMINATE_ALL, 0);
885
886 if (sg_dma_address(&rx_ctl->sg))
887 dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg),
888 DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
889 sg_dma_address(&rx_ctl->sg) = 0;
890
891 dev_kfree_skb(rx_ctl->skb);
892 rx_ctl->skb = NULL;
893}
894
895static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
896{
897 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
898 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
899
900 ks8842_stop_dma(adapter);
901
902 if (tx_ctl->chan)
903 dma_release_channel(tx_ctl->chan);
904 tx_ctl->chan = NULL;
905
906 if (rx_ctl->chan)
907 dma_release_channel(rx_ctl->chan);
908 rx_ctl->chan = NULL;
909
910 tasklet_kill(&rx_ctl->tasklet);
911
912 if (sg_dma_address(&tx_ctl->sg))
913 dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg),
914 DMA_BUFFER_SIZE, DMA_TO_DEVICE);
915 sg_dma_address(&tx_ctl->sg) = 0;
916
917 kfree(tx_ctl->buf);
918 tx_ctl->buf = NULL;
919}
920
921static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
922{
923 return chan->chan_id == (long)filter_param;
924}
925
926static int ks8842_alloc_dma_bufs(struct net_device *netdev)
927{
928 struct ks8842_adapter *adapter = netdev_priv(netdev);
929 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
930 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
931 int err;
932
933 dma_cap_mask_t mask;
934
935 dma_cap_zero(mask);
936 dma_cap_set(DMA_SLAVE, mask);
937 dma_cap_set(DMA_PRIVATE, mask);
938
939 sg_init_table(&tx_ctl->sg, 1);
940
941 tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
942 (void *)(long)tx_ctl->channel);
943 if (!tx_ctl->chan) {
944 err = -ENODEV;
945 goto err;
946 }
947
948
949 tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
950 if (!tx_ctl->buf) {
951 err = -ENOMEM;
952 goto err;
953 }
954
955 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
956 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
957 err = dma_mapping_error(adapter->dev,
958 sg_dma_address(&tx_ctl->sg));
959 if (err) {
960 sg_dma_address(&tx_ctl->sg) = 0;
961 goto err;
962 }
963
964 rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
965 (void *)(long)rx_ctl->channel);
966 if (!rx_ctl->chan) {
967 err = -ENODEV;
968 goto err;
969 }
970
971 tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
972 (unsigned long)netdev);
973
974 return 0;
975err:
976 ks8842_dealloc_dma_bufs(adapter);
977 return err;
978}
979
980
981
982static int ks8842_open(struct net_device *netdev)
983{
984 struct ks8842_adapter *adapter = netdev_priv(netdev);
985 int err;
986
987 netdev_dbg(netdev, "%s - entry\n", __func__);
988
989 if (KS8842_USE_DMA(adapter)) {
990 err = ks8842_alloc_dma_bufs(netdev);
991
992 if (!err) {
993
994 err = __ks8842_start_new_rx_dma(netdev);
995 if (err)
996 ks8842_dealloc_dma_bufs(adapter);
997 }
998
999 if (err) {
1000 printk(KERN_WARNING DRV_NAME
1001 ": Failed to initiate DMA, running PIO\n");
1002 ks8842_dealloc_dma_bufs(adapter);
1003 adapter->dma_rx.channel = -1;
1004 adapter->dma_tx.channel = -1;
1005 }
1006 }
1007
1008
1009 ks8842_reset_hw(adapter);
1010
1011 ks8842_write_mac_addr(adapter, netdev->dev_addr);
1012
1013 ks8842_update_link_status(netdev, adapter);
1014
1015 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
1016 netdev);
1017 if (err) {
1018 pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
1019 return err;
1020 }
1021
1022 return 0;
1023}
1024
1025static int ks8842_close(struct net_device *netdev)
1026{
1027 struct ks8842_adapter *adapter = netdev_priv(netdev);
1028
1029 netdev_dbg(netdev, "%s - entry\n", __func__);
1030
1031 cancel_work_sync(&adapter->timeout_work);
1032
1033 if (KS8842_USE_DMA(adapter))
1034 ks8842_dealloc_dma_bufs(adapter);
1035
1036
1037 free_irq(adapter->irq, netdev);
1038
1039
1040 ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE);
1041
1042 return 0;
1043}
1044
1045static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
1046 struct net_device *netdev)
1047{
1048 int ret;
1049 struct ks8842_adapter *adapter = netdev_priv(netdev);
1050
1051 netdev_dbg(netdev, "%s: entry\n", __func__);
1052
1053 if (KS8842_USE_DMA(adapter)) {
1054 unsigned long flags;
1055 ret = ks8842_tx_frame_dma(skb, netdev);
1056
1057 spin_lock_irqsave(&adapter->lock, flags);
1058 if (adapter->dma_tx.adesc)
1059 netif_stop_queue(netdev);
1060 spin_unlock_irqrestore(&adapter->lock, flags);
1061 return ret;
1062 }
1063
1064 ret = ks8842_tx_frame(skb, netdev);
1065
1066 if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8)
1067 netif_stop_queue(netdev);
1068
1069 return ret;
1070}
1071
1072static int ks8842_set_mac(struct net_device *netdev, void *p)
1073{
1074 struct ks8842_adapter *adapter = netdev_priv(netdev);
1075 struct sockaddr *addr = p;
1076 char *mac = (u8 *)addr->sa_data;
1077
1078 netdev_dbg(netdev, "%s: entry\n", __func__);
1079
1080 if (!is_valid_ether_addr(addr->sa_data))
1081 return -EADDRNOTAVAIL;
1082
1083 memcpy(netdev->dev_addr, mac, netdev->addr_len);
1084
1085 ks8842_write_mac_addr(adapter, mac);
1086 return 0;
1087}
1088
1089static void ks8842_tx_timeout_work(struct work_struct *work)
1090{
1091 struct ks8842_adapter *adapter =
1092 container_of(work, struct ks8842_adapter, timeout_work);
1093 struct net_device *netdev = adapter->netdev;
1094 unsigned long flags;
1095
1096 netdev_dbg(netdev, "%s: entry\n", __func__);
1097
1098 spin_lock_irqsave(&adapter->lock, flags);
1099
1100 if (KS8842_USE_DMA(adapter))
1101 ks8842_stop_dma(adapter);
1102
1103
1104 ks8842_write16(adapter, 18, 0, REG_IER);
1105 ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
1106
1107 netif_stop_queue(netdev);
1108
1109 spin_unlock_irqrestore(&adapter->lock, flags);
1110
1111 ks8842_reset_hw(adapter);
1112
1113 ks8842_write_mac_addr(adapter, netdev->dev_addr);
1114
1115 ks8842_update_link_status(netdev, adapter);
1116
1117 if (KS8842_USE_DMA(adapter))
1118 __ks8842_start_new_rx_dma(netdev);
1119}
1120
1121static void ks8842_tx_timeout(struct net_device *netdev)
1122{
1123 struct ks8842_adapter *adapter = netdev_priv(netdev);
1124
1125 netdev_dbg(netdev, "%s: entry\n", __func__);
1126
1127 schedule_work(&adapter->timeout_work);
1128}
1129
1130static const struct net_device_ops ks8842_netdev_ops = {
1131 .ndo_open = ks8842_open,
1132 .ndo_stop = ks8842_close,
1133 .ndo_start_xmit = ks8842_xmit_frame,
1134 .ndo_set_mac_address = ks8842_set_mac,
1135 .ndo_tx_timeout = ks8842_tx_timeout,
1136 .ndo_validate_addr = eth_validate_addr
1137};
1138
1139static const struct ethtool_ops ks8842_ethtool_ops = {
1140 .get_link = ethtool_op_get_link,
1141};
1142
1143static int ks8842_probe(struct platform_device *pdev)
1144{
1145 int err = -ENOMEM;
1146 struct resource *iomem;
1147 struct net_device *netdev;
1148 struct ks8842_adapter *adapter;
1149 struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev);
1150 u16 id;
1151 unsigned i;
1152
1153 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1154 if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
1155 goto err_mem_region;
1156
1157 netdev = alloc_etherdev(sizeof(struct ks8842_adapter));
1158 if (!netdev)
1159 goto err_alloc_etherdev;
1160
1161 SET_NETDEV_DEV(netdev, &pdev->dev);
1162
1163 adapter = netdev_priv(netdev);
1164 adapter->netdev = netdev;
1165 INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
1166 adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
1167 adapter->conf_flags = iomem->flags;
1168
1169 if (!adapter->hw_addr)
1170 goto err_ioremap;
1171
1172 adapter->irq = platform_get_irq(pdev, 0);
1173 if (adapter->irq < 0) {
1174 err = adapter->irq;
1175 goto err_get_irq;
1176 }
1177
1178 adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev;
1179
1180
1181 if (!(adapter->conf_flags & MICREL_KS884X) && pdata &&
1182 (pdata->tx_dma_channel != -1) &&
1183 (pdata->rx_dma_channel != -1)) {
1184 adapter->dma_rx.channel = pdata->rx_dma_channel;
1185 adapter->dma_tx.channel = pdata->tx_dma_channel;
1186 } else {
1187 adapter->dma_rx.channel = -1;
1188 adapter->dma_tx.channel = -1;
1189 }
1190
1191 tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
1192 spin_lock_init(&adapter->lock);
1193
1194 netdev->netdev_ops = &ks8842_netdev_ops;
1195 netdev->ethtool_ops = &ks8842_ethtool_ops;
1196
1197
1198 i = netdev->addr_len;
1199 if (pdata) {
1200 for (i = 0; i < netdev->addr_len; i++)
1201 if (pdata->macaddr[i] != 0)
1202 break;
1203
1204 if (i < netdev->addr_len)
1205
1206 memcpy(netdev->dev_addr, pdata->macaddr,
1207 netdev->addr_len);
1208 }
1209
1210 if (i == netdev->addr_len) {
1211 ks8842_read_mac_addr(adapter, netdev->dev_addr);
1212
1213 if (!is_valid_ether_addr(netdev->dev_addr))
1214 eth_hw_addr_random(netdev);
1215 }
1216
1217 id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
1218
1219 strcpy(netdev->name, "eth%d");
1220 err = register_netdev(netdev);
1221 if (err)
1222 goto err_register;
1223
1224 platform_set_drvdata(pdev, netdev);
1225
1226 pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1227 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1228
1229 return 0;
1230
1231err_register:
1232err_get_irq:
1233 iounmap(adapter->hw_addr);
1234err_ioremap:
1235 free_netdev(netdev);
1236err_alloc_etherdev:
1237 release_mem_region(iomem->start, resource_size(iomem));
1238err_mem_region:
1239 return err;
1240}
1241
1242static int ks8842_remove(struct platform_device *pdev)
1243{
1244 struct net_device *netdev = platform_get_drvdata(pdev);
1245 struct ks8842_adapter *adapter = netdev_priv(netdev);
1246 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1247
1248 unregister_netdev(netdev);
1249 tasklet_kill(&adapter->tasklet);
1250 iounmap(adapter->hw_addr);
1251 free_netdev(netdev);
1252 release_mem_region(iomem->start, resource_size(iomem));
1253 return 0;
1254}
1255
1256
1257static struct platform_driver ks8842_platform_driver = {
1258 .driver = {
1259 .name = DRV_NAME,
1260 .owner = THIS_MODULE,
1261 },
1262 .probe = ks8842_probe,
1263 .remove = ks8842_remove,
1264};
1265
1266module_platform_driver(ks8842_platform_driver);
1267
1268MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
1269MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
1270MODULE_LICENSE("GPL v2");
1271MODULE_ALIAS("platform:ks8842");
1272
1273