1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/slab.h>
20#include <net/irda/wrapper.h>
21#include <net/irda/irda_device.h>
22#include <asm/clock.h>
23
24#define DRIVER_NAME "sh_sir"
25
26#define RX_PHASE (1 << 0)
27#define TX_PHASE (1 << 1)
28#define TX_COMP_PHASE (1 << 2)
29#define NONE_PHASE (1 << 31)
30
31#define IRIF_RINTCLR 0x0016
32#define IRIF_TINTCLR 0x0018
33#define IRIF_SIR0 0x0020
34#define IRIF_SIR1 0x0022
35#define IRIF_SIR2 0x0024
36#define IRIF_SIR3 0x0026
37#define IRIF_SIR_FRM 0x0028
38#define IRIF_SIR_EOF 0x002A
39#define IRIF_SIR_FLG 0x002C
40#define IRIF_UART_STS2 0x002E
41#define IRIF_UART0 0x0030
42#define IRIF_UART1 0x0032
43#define IRIF_UART2 0x0034
44#define IRIF_UART3 0x0036
45#define IRIF_UART4 0x0038
46#define IRIF_UART5 0x003A
47#define IRIF_UART6 0x003C
48#define IRIF_UART7 0x003E
49#define IRIF_CRC0 0x0040
50#define IRIF_CRC1 0x0042
51#define IRIF_CRC2 0x0044
52#define IRIF_CRC3 0x0046
53#define IRIF_CRC4 0x0048
54
55
56#define IRTPW (1 << 1)
57#define IRERRC (1 << 0)
58
59
60#define IRERR (1 << 0)
61
62
63#define EOFD (1 << 9)
64#define FRER (1 << 8)
65#define FRP (1 << 0)
66
67
68#define IRSME (1 << 6)
69#define IROVE (1 << 5)
70#define IRFRE (1 << 4)
71#define IRPRE (1 << 3)
72
73
74#define TBEC (1 << 2)
75#define RIE (1 << 1)
76#define TIE (1 << 0)
77
78
79#define URSME (1 << 6)
80#define UROVE (1 << 5)
81#define URFRE (1 << 4)
82#define URPRE (1 << 3)
83#define RBF (1 << 2)
84#define TSBE (1 << 1)
85#define TBE (1 << 0)
86#define TBCOMP (TSBE | TBE)
87
88
89#define RSEIM (1 << 6)
90#define RBFIM (1 << 2)
91#define TSBEIM (1 << 1)
92#define TBEIM (1 << 0)
93#define RX_MASK (RSEIM | RBFIM)
94
95
96#define CRC_RST (1 << 15)
97#define CRC_CT_MASK 0x0FFF
98
99
100
101
102
103
104
105
106struct sh_sir_self {
107 void __iomem *membase;
108 unsigned int irq;
109 struct clk *clk;
110
111 struct net_device *ndev;
112
113 struct irlap_cb *irlap;
114 struct qos_info qos;
115
116 iobuff_t tx_buff;
117 iobuff_t rx_buff;
118};
119
120
121
122
123
124
125
126
127static void sh_sir_write(struct sh_sir_self *self, u32 offset, u16 data)
128{
129 iowrite16(data, self->membase + offset);
130}
131
132static u16 sh_sir_read(struct sh_sir_self *self, u32 offset)
133{
134 return ioread16(self->membase + offset);
135}
136
137static void sh_sir_update_bits(struct sh_sir_self *self, u32 offset,
138 u16 mask, u16 data)
139{
140 u16 old, new;
141
142 old = sh_sir_read(self, offset);
143 new = (old & ~mask) | data;
144 if (old != new)
145 sh_sir_write(self, offset, new);
146}
147
148
149
150
151
152
153
154
155static void sh_sir_crc_reset(struct sh_sir_self *self)
156{
157 sh_sir_write(self, IRIF_CRC0, CRC_RST);
158}
159
160static void sh_sir_crc_add(struct sh_sir_self *self, u8 data)
161{
162 sh_sir_write(self, IRIF_CRC1, (u16)data);
163}
164
165static u16 sh_sir_crc_cnt(struct sh_sir_self *self)
166{
167 return CRC_CT_MASK & sh_sir_read(self, IRIF_CRC0);
168}
169
170static u16 sh_sir_crc_out(struct sh_sir_self *self)
171{
172 return sh_sir_read(self, IRIF_CRC4);
173}
174
175static int sh_sir_crc_init(struct sh_sir_self *self)
176{
177 struct device *dev = &self->ndev->dev;
178 int ret = -EIO;
179 u16 val;
180
181 sh_sir_crc_reset(self);
182
183 sh_sir_crc_add(self, 0xCC);
184 sh_sir_crc_add(self, 0xF5);
185 sh_sir_crc_add(self, 0xF1);
186 sh_sir_crc_add(self, 0xA7);
187
188 val = sh_sir_crc_cnt(self);
189 if (4 != val) {
190 dev_err(dev, "CRC count error %x\n", val);
191 goto crc_init_out;
192 }
193
194 val = sh_sir_crc_out(self);
195 if (0x51DF != val) {
196 dev_err(dev, "CRC result error%x\n", val);
197 goto crc_init_out;
198 }
199
200 ret = 0;
201
202crc_init_out:
203
204 sh_sir_crc_reset(self);
205 return ret;
206}
207
208
209
210
211
212
213
214
215#define SCLK_BASE 1843200
216
217static u32 sh_sir_find_sclk(struct clk *irda_clk)
218{
219 struct cpufreq_frequency_table *freq_table = irda_clk->freq_table;
220 struct cpufreq_frequency_table *pos;
221 struct clk *pclk = clk_get(NULL, "peripheral_clk");
222 u32 limit, min = 0xffffffff, tmp;
223 int index = 0;
224
225 limit = clk_get_rate(pclk);
226 clk_put(pclk);
227
228
229 cpufreq_for_each_valid_entry(pos, freq_table) {
230 u32 freq = pos->frequency;
231
232
233 if (freq > limit)
234 continue;
235
236 tmp = freq % SCLK_BASE;
237 if (tmp < min) {
238 min = tmp;
239 index = pos - freq_table;
240 }
241 }
242
243 return freq_table[index].frequency;
244}
245
246#define ERR_ROUNDING(a) ((a + 5000) / 10000)
247static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
248{
249 struct clk *clk;
250 struct device *dev = &self->ndev->dev;
251 u32 rate;
252 u16 uabca, uabc;
253 u16 irbca, irbc;
254 u32 min, rerr, tmp;
255 int i;
256
257
258 u32 rate_err_array[] = {
259 0, 625, 1250, 1875,
260 2500, 3125, 3750, 4375,
261 5000, 5625, 6250, 6875,
262 7500, 8125, 8750, 9375,
263 };
264
265
266
267
268
269
270 switch (baudrate) {
271 case 9600:
272 break;
273 default:
274 dev_err(dev, "un-supported baudrate %d\n", baudrate);
275 return -EIO;
276 }
277
278 clk = clk_get(NULL, "irda_clk");
279 if (IS_ERR(clk)) {
280 dev_err(dev, "can not get irda_clk\n");
281 return -EIO;
282 }
283
284 clk_set_rate(clk, sh_sir_find_sclk(clk));
285 rate = clk_get_rate(clk);
286 clk_put(clk);
287
288 dev_dbg(dev, "selected sclk = %d\n", rate);
289
290
291
292
293
294
295
296 irbc = rate / SCLK_BASE;
297
298 tmp = rate - (SCLK_BASE * irbc);
299 tmp *= 10000;
300
301 rerr = tmp / SCLK_BASE;
302
303 min = 0xffffffff;
304 irbca = 0;
305 for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
306 tmp = abs(rate_err_array[i] - rerr);
307 if (min > tmp) {
308 min = tmp;
309 irbca = i;
310 }
311 }
312
313 tmp = rate / (irbc + ERR_ROUNDING(rate_err_array[irbca]));
314 if ((SCLK_BASE / 100) < abs(tmp - SCLK_BASE))
315 dev_warn(dev, "IrDA freq error margin over %d\n", tmp);
316
317 dev_dbg(dev, "target = %d, result = %d, infrared = %d.%d\n",
318 SCLK_BASE, tmp, irbc, rate_err_array[irbca]);
319
320 irbca = (irbca & 0xF) << 4;
321 irbc = (irbc - 1) & 0xF;
322
323 if (!irbc) {
324 dev_err(dev, "sh_sir can not set 0 in IRIF_SIR2\n");
325 return -EIO;
326 }
327
328 sh_sir_write(self, IRIF_SIR0, IRTPW | IRERRC);
329 sh_sir_write(self, IRIF_SIR1, irbca);
330 sh_sir_write(self, IRIF_SIR2, irbc);
331
332
333
334
335
336
337
338 uabc = rate / baudrate;
339 uabc = (uabc / 16) - 1;
340 uabc = (uabc + 1) * 16;
341
342 tmp = rate - (uabc * baudrate);
343 tmp *= 10000;
344
345 rerr = tmp / baudrate;
346
347 min = 0xffffffff;
348 uabca = 0;
349 for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
350 tmp = abs(rate_err_array[i] - rerr);
351 if (min > tmp) {
352 min = tmp;
353 uabca = i;
354 }
355 }
356
357 tmp = rate / (uabc + ERR_ROUNDING(rate_err_array[uabca]));
358 if ((baudrate / 100) < abs(tmp - baudrate))
359 dev_warn(dev, "UART freq error margin over %d\n", tmp);
360
361 dev_dbg(dev, "target = %d, result = %d, uart = %d.%d\n",
362 baudrate, tmp,
363 uabc, rate_err_array[uabca]);
364
365 uabca = (uabca & 0xF) << 4;
366 uabc = (uabc / 16) - 1;
367
368 sh_sir_write(self, IRIF_UART6, uabca);
369 sh_sir_write(self, IRIF_UART7, uabc);
370
371 return 0;
372}
373
374
375
376
377
378
379
380
381static int __sh_sir_init_iobuf(iobuff_t *io, int size)
382{
383 io->head = kmalloc(size, GFP_KERNEL);
384 if (!io->head)
385 return -ENOMEM;
386
387 io->truesize = size;
388 io->in_frame = FALSE;
389 io->state = OUTSIDE_FRAME;
390 io->data = io->head;
391
392 return 0;
393}
394
395static void sh_sir_remove_iobuf(struct sh_sir_self *self)
396{
397 kfree(self->rx_buff.head);
398 kfree(self->tx_buff.head);
399
400 self->rx_buff.head = NULL;
401 self->tx_buff.head = NULL;
402}
403
404static int sh_sir_init_iobuf(struct sh_sir_self *self, int rxsize, int txsize)
405{
406 int err = -ENOMEM;
407
408 if (self->rx_buff.head ||
409 self->tx_buff.head) {
410 dev_err(&self->ndev->dev, "iobuff has already existed.");
411 return err;
412 }
413
414 err = __sh_sir_init_iobuf(&self->rx_buff, rxsize);
415 if (err)
416 goto iobuf_err;
417
418 err = __sh_sir_init_iobuf(&self->tx_buff, txsize);
419
420iobuf_err:
421 if (err)
422 sh_sir_remove_iobuf(self);
423
424 return err;
425}
426
427
428
429
430
431
432
433
434static void sh_sir_clear_all_err(struct sh_sir_self *self)
435{
436
437 sh_sir_update_bits(self, IRIF_SIR0, IRERRC, IRERRC);
438
439
440 sh_sir_write(self, IRIF_SIR_FLG, 0xffff);
441
442
443 sh_sir_write(self, IRIF_UART_STS2, 0);
444}
445
446static void sh_sir_set_phase(struct sh_sir_self *self, int phase)
447{
448 u16 uart5 = 0;
449 u16 uart0 = 0;
450
451 switch (phase) {
452 case TX_PHASE:
453 uart5 = TBEIM;
454 uart0 = TBEC | TIE;
455 break;
456 case TX_COMP_PHASE:
457 uart5 = TSBEIM;
458 uart0 = TIE;
459 break;
460 case RX_PHASE:
461 uart5 = RX_MASK;
462 uart0 = RIE;
463 break;
464 default:
465 break;
466 }
467
468 sh_sir_write(self, IRIF_UART5, uart5);
469 sh_sir_write(self, IRIF_UART0, uart0);
470}
471
472static int sh_sir_is_which_phase(struct sh_sir_self *self)
473{
474 u16 val = sh_sir_read(self, IRIF_UART5);
475
476 if (val & TBEIM)
477 return TX_PHASE;
478
479 if (val & TSBEIM)
480 return TX_COMP_PHASE;
481
482 if (val & RX_MASK)
483 return RX_PHASE;
484
485 return NONE_PHASE;
486}
487
488static void sh_sir_tx(struct sh_sir_self *self, int phase)
489{
490 switch (phase) {
491 case TX_PHASE:
492 if (0 >= self->tx_buff.len) {
493 sh_sir_set_phase(self, TX_COMP_PHASE);
494 } else {
495 sh_sir_write(self, IRIF_UART3, self->tx_buff.data[0]);
496 self->tx_buff.len--;
497 self->tx_buff.data++;
498 }
499 break;
500 case TX_COMP_PHASE:
501 sh_sir_set_phase(self, RX_PHASE);
502 netif_wake_queue(self->ndev);
503 break;
504 default:
505 dev_err(&self->ndev->dev, "should not happen\n");
506 break;
507 }
508}
509
510static int sh_sir_read_data(struct sh_sir_self *self)
511{
512 u16 val = 0;
513 int timeout = 1024;
514
515 while (timeout--) {
516 val = sh_sir_read(self, IRIF_UART1);
517
518
519 if (val & RBF) {
520 if (val & (URSME | UROVE | URFRE | URPRE))
521 break;
522
523 return (int)sh_sir_read(self, IRIF_UART4);
524 }
525
526 udelay(1);
527 }
528
529 dev_err(&self->ndev->dev, "UART1 %04x : STATUS %04x\n",
530 val, sh_sir_read(self, IRIF_UART_STS2));
531
532
533 sh_sir_read(self, IRIF_UART4);
534
535 return -1;
536}
537
538static void sh_sir_rx(struct sh_sir_self *self)
539{
540 int timeout = 1024;
541 int data;
542
543 while (timeout--) {
544 data = sh_sir_read_data(self);
545 if (data < 0)
546 break;
547
548 async_unwrap_char(self->ndev, &self->ndev->stats,
549 &self->rx_buff, (u8)data);
550 self->ndev->last_rx = jiffies;
551
552 if (EOFD & sh_sir_read(self, IRIF_SIR_FRM))
553 continue;
554
555 break;
556 }
557}
558
559static irqreturn_t sh_sir_irq(int irq, void *dev_id)
560{
561 struct sh_sir_self *self = dev_id;
562 struct device *dev = &self->ndev->dev;
563 int phase = sh_sir_is_which_phase(self);
564
565 switch (phase) {
566 case TX_COMP_PHASE:
567 case TX_PHASE:
568 sh_sir_tx(self, phase);
569 break;
570 case RX_PHASE:
571 if (sh_sir_read(self, IRIF_SIR3))
572 dev_err(dev, "rcv pulse width error occurred\n");
573
574 sh_sir_rx(self);
575 sh_sir_clear_all_err(self);
576 break;
577 default:
578 dev_err(dev, "unknown interrupt\n");
579 }
580
581 return IRQ_HANDLED;
582}
583
584
585
586
587
588
589
590
591static int sh_sir_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
592{
593 struct sh_sir_self *self = netdev_priv(ndev);
594 int speed = irda_get_next_speed(skb);
595
596 if ((0 < speed) &&
597 (9600 != speed)) {
598 dev_err(&ndev->dev, "support 9600 only (%d)\n", speed);
599 return -EIO;
600 }
601
602 netif_stop_queue(ndev);
603
604 self->tx_buff.data = self->tx_buff.head;
605 self->tx_buff.len = 0;
606 if (skb->len)
607 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
608 self->tx_buff.truesize);
609
610 sh_sir_set_phase(self, TX_PHASE);
611 dev_kfree_skb(skb);
612
613 return 0;
614}
615
616static int sh_sir_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
617{
618
619
620
621
622
623
624 return 0;
625}
626
627static struct net_device_stats *sh_sir_stats(struct net_device *ndev)
628{
629 struct sh_sir_self *self = netdev_priv(ndev);
630
631 return &self->ndev->stats;
632}
633
634static int sh_sir_open(struct net_device *ndev)
635{
636 struct sh_sir_self *self = netdev_priv(ndev);
637 int err;
638
639 clk_enable(self->clk);
640 err = sh_sir_crc_init(self);
641 if (err)
642 goto open_err;
643
644 sh_sir_set_baudrate(self, 9600);
645
646 self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
647 if (!self->irlap) {
648 err = -ENODEV;
649 goto open_err;
650 }
651
652
653
654
655 sh_sir_update_bits(self, IRIF_SIR_FRM, FRP, FRP);
656 sh_sir_read(self, IRIF_UART1);
657 sh_sir_read(self, IRIF_UART4);
658 sh_sir_set_phase(self, RX_PHASE);
659
660 netif_start_queue(ndev);
661
662 dev_info(&self->ndev->dev, "opened\n");
663
664 return 0;
665
666open_err:
667 clk_disable(self->clk);
668
669 return err;
670}
671
672static int sh_sir_stop(struct net_device *ndev)
673{
674 struct sh_sir_self *self = netdev_priv(ndev);
675
676
677 if (self->irlap) {
678 irlap_close(self->irlap);
679 self->irlap = NULL;
680 }
681
682 netif_stop_queue(ndev);
683
684 dev_info(&ndev->dev, "stopped\n");
685
686 return 0;
687}
688
689static const struct net_device_ops sh_sir_ndo = {
690 .ndo_open = sh_sir_open,
691 .ndo_stop = sh_sir_stop,
692 .ndo_start_xmit = sh_sir_hard_xmit,
693 .ndo_do_ioctl = sh_sir_ioctl,
694 .ndo_get_stats = sh_sir_stats,
695};
696
697
698
699
700
701
702
703
704static int sh_sir_probe(struct platform_device *pdev)
705{
706 struct net_device *ndev;
707 struct sh_sir_self *self;
708 struct resource *res;
709 char clk_name[8];
710 int irq;
711 int err = -ENOMEM;
712
713 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
714 irq = platform_get_irq(pdev, 0);
715 if (!res || irq < 0) {
716 dev_err(&pdev->dev, "Not enough platform resources.\n");
717 goto exit;
718 }
719
720 ndev = alloc_irdadev(sizeof(*self));
721 if (!ndev)
722 goto exit;
723
724 self = netdev_priv(ndev);
725 self->membase = ioremap_nocache(res->start, resource_size(res));
726 if (!self->membase) {
727 err = -ENXIO;
728 dev_err(&pdev->dev, "Unable to ioremap.\n");
729 goto err_mem_1;
730 }
731
732 err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
733 if (err)
734 goto err_mem_2;
735
736 snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
737 self->clk = clk_get(&pdev->dev, clk_name);
738 if (IS_ERR(self->clk)) {
739 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
740 err = -ENODEV;
741 goto err_mem_3;
742 }
743
744 irda_init_max_qos_capabilies(&self->qos);
745
746 ndev->netdev_ops = &sh_sir_ndo;
747 ndev->irq = irq;
748
749 self->ndev = ndev;
750 self->qos.baud_rate.bits &= IR_9600;
751 self->qos.min_turn_time.bits = 1;
752
753 irda_qos_bits_to_value(&self->qos);
754
755 err = register_netdev(ndev);
756 if (err)
757 goto err_mem_4;
758
759 platform_set_drvdata(pdev, ndev);
760 err = devm_request_irq(&pdev->dev, irq, sh_sir_irq, 0, "sh_sir", self);
761 if (err) {
762 dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
763 goto err_mem_4;
764 }
765
766 dev_info(&pdev->dev, "SuperH IrDA probed\n");
767
768 goto exit;
769
770err_mem_4:
771 clk_put(self->clk);
772err_mem_3:
773 sh_sir_remove_iobuf(self);
774err_mem_2:
775 iounmap(self->membase);
776err_mem_1:
777 free_netdev(ndev);
778exit:
779 return err;
780}
781
782static int sh_sir_remove(struct platform_device *pdev)
783{
784 struct net_device *ndev = platform_get_drvdata(pdev);
785 struct sh_sir_self *self = netdev_priv(ndev);
786
787 if (!self)
788 return 0;
789
790 unregister_netdev(ndev);
791 clk_put(self->clk);
792 sh_sir_remove_iobuf(self);
793 iounmap(self->membase);
794 free_netdev(ndev);
795
796 return 0;
797}
798
799static struct platform_driver sh_sir_driver = {
800 .probe = sh_sir_probe,
801 .remove = sh_sir_remove,
802 .driver = {
803 .name = DRIVER_NAME,
804 },
805};
806
807module_platform_driver(sh_sir_driver);
808
809MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
810MODULE_DESCRIPTION("SuperH IrDA driver");
811MODULE_LICENSE("GPL");
812