1
2
3
4
5
6
7#include <linux/interrupt.h>
8#include <linux/delay.h>
9#include <linux/io.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/pci.h>
13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <linux/errno.h>
16#include <linux/netdevice.h>
17#include <linux/skbuff.h>
18#include <linux/can.h>
19#include <linux/can/dev.h>
20#include <linux/can/error.h>
21
22#define PCH_CTRL_INIT BIT(0)
23#define PCH_CTRL_IE BIT(1)
24#define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1))
25#define PCH_CTRL_CCE BIT(6)
26#define PCH_CTRL_OPT BIT(7)
27#define PCH_OPT_SILENT BIT(3)
28#define PCH_OPT_LBACK BIT(4)
29
30#define PCH_CMASK_RX_TX_SET 0x00f3
31#define PCH_CMASK_RX_TX_GET 0x0073
32#define PCH_CMASK_ALL 0xff
33#define PCH_CMASK_NEWDAT BIT(2)
34#define PCH_CMASK_CLRINTPND BIT(3)
35#define PCH_CMASK_CTRL BIT(4)
36#define PCH_CMASK_ARB BIT(5)
37#define PCH_CMASK_MASK BIT(6)
38#define PCH_CMASK_RDWR BIT(7)
39#define PCH_IF_MCONT_NEWDAT BIT(15)
40#define PCH_IF_MCONT_MSGLOST BIT(14)
41#define PCH_IF_MCONT_INTPND BIT(13)
42#define PCH_IF_MCONT_UMASK BIT(12)
43#define PCH_IF_MCONT_TXIE BIT(11)
44#define PCH_IF_MCONT_RXIE BIT(10)
45#define PCH_IF_MCONT_RMTEN BIT(9)
46#define PCH_IF_MCONT_TXRQXT BIT(8)
47#define PCH_IF_MCONT_EOB BIT(7)
48#define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3))
49#define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15))
50#define PCH_ID2_DIR BIT(13)
51#define PCH_ID2_XTD BIT(14)
52#define PCH_ID_MSGVAL BIT(15)
53#define PCH_IF_CREQ_BUSY BIT(15)
54
55#define PCH_STATUS_INT 0x8000
56#define PCH_RP 0x00008000
57#define PCH_REC 0x00007f00
58#define PCH_TEC 0x000000ff
59
60#define PCH_TX_OK BIT(3)
61#define PCH_RX_OK BIT(4)
62#define PCH_EPASSIV BIT(5)
63#define PCH_EWARN BIT(6)
64#define PCH_BUS_OFF BIT(7)
65
66
67#define PCH_BIT_BRP_SHIFT 0
68#define PCH_BIT_SJW_SHIFT 6
69#define PCH_BIT_TSEG1_SHIFT 8
70#define PCH_BIT_TSEG2_SHIFT 12
71#define PCH_BIT_BRPE_BRPE_SHIFT 6
72
73#define PCH_MSK_BITT_BRP 0x3f
74#define PCH_MSK_BRPE_BRPE 0x3c0
75#define PCH_MSK_CTRL_IE_SIE_EIE 0x07
76#define PCH_COUNTER_LIMIT 10
77
78#define PCH_CAN_CLK 50000000
79
80
81
82
83
84
85#define PCH_RX_OBJ_NUM 26
86#define PCH_TX_OBJ_NUM 6
87#define PCH_RX_OBJ_START 1
88#define PCH_RX_OBJ_END PCH_RX_OBJ_NUM
89#define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1)
90#define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM)
91
92#define PCH_FIFO_THRESH 16
93
94
95#define PCH_TREQ2_TX_MASK (((1 << PCH_TX_OBJ_NUM) - 1) <<\
96 (PCH_RX_OBJ_END - 16))
97
98enum pch_ifreg {
99 PCH_RX_IFREG,
100 PCH_TX_IFREG,
101};
102
103enum pch_can_err {
104 PCH_STUF_ERR = 1,
105 PCH_FORM_ERR,
106 PCH_ACK_ERR,
107 PCH_BIT1_ERR,
108 PCH_BIT0_ERR,
109 PCH_CRC_ERR,
110 PCH_LEC_ALL,
111};
112
113enum pch_can_mode {
114 PCH_CAN_ENABLE,
115 PCH_CAN_DISABLE,
116 PCH_CAN_ALL,
117 PCH_CAN_NONE,
118 PCH_CAN_STOP,
119 PCH_CAN_RUN,
120};
121
122struct pch_can_if_regs {
123 u32 creq;
124 u32 cmask;
125 u32 mask1;
126 u32 mask2;
127 u32 id1;
128 u32 id2;
129 u32 mcont;
130 u32 data[4];
131 u32 rsv[13];
132};
133
134struct pch_can_regs {
135 u32 cont;
136 u32 stat;
137 u32 errc;
138 u32 bitt;
139 u32 intr;
140 u32 opt;
141 u32 brpe;
142 u32 reserve;
143 struct pch_can_if_regs ifregs[2];
144 u32 reserve1[8];
145 u32 treq1;
146 u32 treq2;
147 u32 reserve2[6];
148 u32 data1;
149 u32 data2;
150 u32 reserve3[6];
151 u32 canipend1;
152 u32 canipend2;
153 u32 reserve4[6];
154 u32 canmval1;
155 u32 canmval2;
156 u32 reserve5[37];
157 u32 srst;
158};
159
160struct pch_can_priv {
161 struct can_priv can;
162 struct pci_dev *dev;
163 u32 tx_enable[PCH_TX_OBJ_END];
164 u32 rx_enable[PCH_TX_OBJ_END];
165 u32 rx_link[PCH_TX_OBJ_END];
166 u32 int_enables;
167 struct net_device *ndev;
168 struct pch_can_regs __iomem *regs;
169 struct napi_struct napi;
170 int tx_obj;
171 int use_msi;
172};
173
174static const struct can_bittiming_const pch_can_bittiming_const = {
175 .name = KBUILD_MODNAME,
176 .tseg1_min = 2,
177 .tseg1_max = 16,
178 .tseg2_min = 1,
179 .tseg2_max = 8,
180 .sjw_max = 4,
181 .brp_min = 1,
182 .brp_max = 1024,
183 .brp_inc = 1,
184};
185
186static const struct pci_device_id pch_pci_tbl[] = {
187 {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
188 {0,}
189};
190MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
191
192static inline void pch_can_bit_set(void __iomem *addr, u32 mask)
193{
194 iowrite32(ioread32(addr) | mask, addr);
195}
196
197static inline void pch_can_bit_clear(void __iomem *addr, u32 mask)
198{
199 iowrite32(ioread32(addr) & ~mask, addr);
200}
201
202static void pch_can_set_run_mode(struct pch_can_priv *priv,
203 enum pch_can_mode mode)
204{
205 switch (mode) {
206 case PCH_CAN_RUN:
207 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
208 break;
209
210 case PCH_CAN_STOP:
211 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
212 break;
213
214 default:
215 netdev_err(priv->ndev, "%s -> Invalid Mode.\n", __func__);
216 break;
217 }
218}
219
220static void pch_can_set_optmode(struct pch_can_priv *priv)
221{
222 u32 reg_val = ioread32(&priv->regs->opt);
223
224 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
225 reg_val |= PCH_OPT_SILENT;
226
227 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
228 reg_val |= PCH_OPT_LBACK;
229
230 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
231 iowrite32(reg_val, &priv->regs->opt);
232}
233
234static void pch_can_rw_msg_obj(void __iomem *creq_addr, u32 num)
235{
236 int counter = PCH_COUNTER_LIMIT;
237 u32 ifx_creq;
238
239 iowrite32(num, creq_addr);
240 while (counter) {
241 ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
242 if (!ifx_creq)
243 break;
244 counter--;
245 udelay(1);
246 }
247 if (!counter)
248 pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
249}
250
251static void pch_can_set_int_enables(struct pch_can_priv *priv,
252 enum pch_can_mode interrupt_no)
253{
254 switch (interrupt_no) {
255 case PCH_CAN_DISABLE:
256 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
257 break;
258
259 case PCH_CAN_ALL:
260 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
261 break;
262
263 case PCH_CAN_NONE:
264 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
265 break;
266
267 default:
268 netdev_err(priv->ndev, "Invalid interrupt number.\n");
269 break;
270 }
271}
272
273static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num,
274 int set, enum pch_ifreg dir)
275{
276 u32 ie;
277
278 if (dir)
279 ie = PCH_IF_MCONT_TXIE;
280 else
281 ie = PCH_IF_MCONT_RXIE;
282
283
284 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
285 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
286
287
288 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
289 &priv->regs->ifregs[dir].cmask);
290
291 if (set) {
292
293 pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie);
294 pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
295 } else {
296
297 pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie);
298 pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
299 }
300
301 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
302}
303
304static void pch_can_set_rx_all(struct pch_can_priv *priv, int set)
305{
306 int i;
307
308
309 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++)
310 pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG);
311}
312
313static void pch_can_set_tx_all(struct pch_can_priv *priv, int set)
314{
315 int i;
316
317
318 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
319 pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG);
320}
321
322static u32 pch_can_int_pending(struct pch_can_priv *priv)
323{
324 return ioread32(&priv->regs->intr) & 0xffff;
325}
326
327static void pch_can_clear_if_buffers(struct pch_can_priv *priv)
328{
329 int i;
330
331 for (i = PCH_RX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
332 iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask);
333 iowrite32(0xffff, &priv->regs->ifregs[0].mask1);
334 iowrite32(0xffff, &priv->regs->ifregs[0].mask2);
335 iowrite32(0x0, &priv->regs->ifregs[0].id1);
336 iowrite32(0x0, &priv->regs->ifregs[0].id2);
337 iowrite32(0x0, &priv->regs->ifregs[0].mcont);
338 iowrite32(0x0, &priv->regs->ifregs[0].data[0]);
339 iowrite32(0x0, &priv->regs->ifregs[0].data[1]);
340 iowrite32(0x0, &priv->regs->ifregs[0].data[2]);
341 iowrite32(0x0, &priv->regs->ifregs[0].data[3]);
342 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
343 PCH_CMASK_ARB | PCH_CMASK_CTRL,
344 &priv->regs->ifregs[0].cmask);
345 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
346 }
347}
348
349static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
350{
351 int i;
352
353 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
354 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
355 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
356
357 iowrite32(0x0, &priv->regs->ifregs[0].id1);
358 iowrite32(0x0, &priv->regs->ifregs[0].id2);
359
360 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
361 PCH_IF_MCONT_UMASK);
362
363
364 if (i == PCH_RX_OBJ_END)
365 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
366 PCH_IF_MCONT_EOB);
367 else
368 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
369 PCH_IF_MCONT_EOB);
370
371 iowrite32(0, &priv->regs->ifregs[0].mask1);
372 pch_can_bit_clear(&priv->regs->ifregs[0].mask2,
373 0x1fff | PCH_MASK2_MDIR_MXTD);
374
375
376 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
377 PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask);
378
379 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
380 }
381
382 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
383 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask);
384 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
385
386
387 iowrite32(0x0, &priv->regs->ifregs[1].id1);
388 iowrite32(PCH_ID2_DIR, &priv->regs->ifregs[1].id2);
389
390
391 iowrite32(PCH_IF_MCONT_EOB | PCH_IF_MCONT_UMASK,
392 &priv->regs->ifregs[1].mcont);
393
394 iowrite32(0, &priv->regs->ifregs[1].mask1);
395 pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff);
396
397
398 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
399 PCH_CMASK_CTRL, &priv->regs->ifregs[1].cmask);
400
401 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
402 }
403}
404
405static void pch_can_init(struct pch_can_priv *priv)
406{
407
408 pch_can_set_run_mode(priv, PCH_CAN_STOP);
409
410
411 pch_can_clear_if_buffers(priv);
412
413
414 pch_can_config_rx_tx_buffers(priv);
415
416
417 pch_can_set_int_enables(priv, PCH_CAN_ALL);
418}
419
420static void pch_can_release(struct pch_can_priv *priv)
421{
422
423 pch_can_set_run_mode(priv, PCH_CAN_STOP);
424
425
426 pch_can_set_int_enables(priv, PCH_CAN_NONE);
427
428
429 pch_can_set_rx_all(priv, 0);
430
431
432 pch_can_set_tx_all(priv, 0);
433}
434
435
436static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
437{
438
439 if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) {
440
441 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
442 &priv->regs->ifregs[0].cmask);
443
444
445 pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
446
447
448 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
449 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
450
451 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, mask);
452 } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) {
453
454
455
456 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
457 &priv->regs->ifregs[1].cmask);
458
459
460 pch_can_bit_set(&priv->regs->ifregs[1].id2,
461 PCH_ID2_DIR | (0x7ff << 2));
462 iowrite32(0x0, &priv->regs->ifregs[1].id1);
463
464
465 pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
466 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
467 PCH_IF_MCONT_TXRQXT);
468 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, mask);
469 }
470}
471
472static void pch_can_reset(struct pch_can_priv *priv)
473{
474
475 iowrite32(1, &priv->regs->srst);
476 iowrite32(0, &priv->regs->srst);
477}
478
479static void pch_can_error(struct net_device *ndev, u32 status)
480{
481 struct sk_buff *skb;
482 struct pch_can_priv *priv = netdev_priv(ndev);
483 struct can_frame *cf;
484 u32 errc, lec;
485 struct net_device_stats *stats = &(priv->ndev->stats);
486 enum can_state state = priv->can.state;
487
488 skb = alloc_can_err_skb(ndev, &cf);
489 if (!skb)
490 return;
491
492 if (status & PCH_BUS_OFF) {
493 pch_can_set_tx_all(priv, 0);
494 pch_can_set_rx_all(priv, 0);
495 state = CAN_STATE_BUS_OFF;
496 cf->can_id |= CAN_ERR_BUSOFF;
497 priv->can.can_stats.bus_off++;
498 can_bus_off(ndev);
499 }
500
501 errc = ioread32(&priv->regs->errc);
502
503 if (status & PCH_EWARN) {
504 state = CAN_STATE_ERROR_WARNING;
505 priv->can.can_stats.error_warning++;
506 cf->can_id |= CAN_ERR_CRTL;
507 if (((errc & PCH_REC) >> 8) > 96)
508 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
509 if ((errc & PCH_TEC) > 96)
510 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
511 netdev_dbg(ndev,
512 "%s -> Error Counter is more than 96.\n", __func__);
513 }
514
515 if (status & PCH_EPASSIV) {
516 priv->can.can_stats.error_passive++;
517 state = CAN_STATE_ERROR_PASSIVE;
518 cf->can_id |= CAN_ERR_CRTL;
519 if (errc & PCH_RP)
520 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
521 if ((errc & PCH_TEC) > 127)
522 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
523 netdev_dbg(ndev,
524 "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
525 }
526
527 lec = status & PCH_LEC_ALL;
528 switch (lec) {
529 case PCH_STUF_ERR:
530 cf->data[2] |= CAN_ERR_PROT_STUFF;
531 priv->can.can_stats.bus_error++;
532 stats->rx_errors++;
533 break;
534 case PCH_FORM_ERR:
535 cf->data[2] |= CAN_ERR_PROT_FORM;
536 priv->can.can_stats.bus_error++;
537 stats->rx_errors++;
538 break;
539 case PCH_ACK_ERR:
540 cf->can_id |= CAN_ERR_ACK;
541 priv->can.can_stats.bus_error++;
542 stats->rx_errors++;
543 break;
544 case PCH_BIT1_ERR:
545 case PCH_BIT0_ERR:
546 cf->data[2] |= CAN_ERR_PROT_BIT;
547 priv->can.can_stats.bus_error++;
548 stats->rx_errors++;
549 break;
550 case PCH_CRC_ERR:
551 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
552 priv->can.can_stats.bus_error++;
553 stats->rx_errors++;
554 break;
555 case PCH_LEC_ALL:
556 break;
557 }
558
559 cf->data[6] = errc & PCH_TEC;
560 cf->data[7] = (errc & PCH_REC) >> 8;
561
562 priv->can.state = state;
563 netif_receive_skb(skb);
564
565 stats->rx_packets++;
566 stats->rx_bytes += cf->can_dlc;
567}
568
569static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
570{
571 struct net_device *ndev = (struct net_device *)dev_id;
572 struct pch_can_priv *priv = netdev_priv(ndev);
573
574 if (!pch_can_int_pending(priv))
575 return IRQ_NONE;
576
577 pch_can_set_int_enables(priv, PCH_CAN_NONE);
578 napi_schedule(&priv->napi);
579 return IRQ_HANDLED;
580}
581
582static void pch_fifo_thresh(struct pch_can_priv *priv, int obj_id)
583{
584 if (obj_id < PCH_FIFO_THRESH) {
585 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
586 PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask);
587
588
589 pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
590
591
592 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
593 PCH_IF_MCONT_INTPND);
594 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
595 } else if (obj_id > PCH_FIFO_THRESH) {
596 pch_can_int_clr(priv, obj_id);
597 } else if (obj_id == PCH_FIFO_THRESH) {
598 int cnt;
599 for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
600 pch_can_int_clr(priv, cnt + 1);
601 }
602}
603
604static void pch_can_rx_msg_lost(struct net_device *ndev, int obj_id)
605{
606 struct pch_can_priv *priv = netdev_priv(ndev);
607 struct net_device_stats *stats = &(priv->ndev->stats);
608 struct sk_buff *skb;
609 struct can_frame *cf;
610
611 netdev_dbg(priv->ndev, "Msg Obj is overwritten.\n");
612 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
613 PCH_IF_MCONT_MSGLOST);
614 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
615 &priv->regs->ifregs[0].cmask);
616 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
617
618 skb = alloc_can_err_skb(ndev, &cf);
619 if (!skb)
620 return;
621
622 cf->can_id |= CAN_ERR_CRTL;
623 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
624 stats->rx_over_errors++;
625 stats->rx_errors++;
626
627 netif_receive_skb(skb);
628}
629
630static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
631{
632 u32 reg;
633 canid_t id;
634 int rcv_pkts = 0;
635 struct sk_buff *skb;
636 struct can_frame *cf;
637 struct pch_can_priv *priv = netdev_priv(ndev);
638 struct net_device_stats *stats = &(priv->ndev->stats);
639 int i;
640 u32 id2;
641 u16 data_reg;
642
643 do {
644
645 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
646 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_num);
647
648
649 reg = ioread32(&priv->regs->ifregs[0].mcont);
650
651 if (reg & PCH_IF_MCONT_EOB)
652 break;
653
654
655 if (reg & PCH_IF_MCONT_MSGLOST) {
656 pch_can_rx_msg_lost(ndev, obj_num);
657 rcv_pkts++;
658 quota--;
659 obj_num++;
660 continue;
661 } else if (!(reg & PCH_IF_MCONT_NEWDAT)) {
662 obj_num++;
663 continue;
664 }
665
666 skb = alloc_can_skb(priv->ndev, &cf);
667 if (!skb) {
668 netdev_err(ndev, "alloc_can_skb Failed\n");
669 return rcv_pkts;
670 }
671
672
673 id2 = ioread32(&priv->regs->ifregs[0].id2);
674 if (id2 & PCH_ID2_XTD) {
675 id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff);
676 id |= (((id2) & 0x1fff) << 16);
677 cf->can_id = id | CAN_EFF_FLAG;
678 } else {
679 id = (id2 >> 2) & CAN_SFF_MASK;
680 cf->can_id = id;
681 }
682
683 if (id2 & PCH_ID2_DIR)
684 cf->can_id |= CAN_RTR_FLAG;
685
686 cf->can_dlc = get_can_dlc((ioread32(&priv->regs->
687 ifregs[0].mcont)) & 0xF);
688
689 for (i = 0; i < cf->can_dlc; i += 2) {
690 data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
691 cf->data[i] = data_reg;
692 cf->data[i + 1] = data_reg >> 8;
693 }
694
695 netif_receive_skb(skb);
696 rcv_pkts++;
697 stats->rx_packets++;
698 quota--;
699 stats->rx_bytes += cf->can_dlc;
700
701 pch_fifo_thresh(priv, obj_num);
702 obj_num++;
703 } while (quota > 0);
704
705 return rcv_pkts;
706}
707
708static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
709{
710 struct pch_can_priv *priv = netdev_priv(ndev);
711 struct net_device_stats *stats = &(priv->ndev->stats);
712 u32 dlc;
713
714 can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1);
715 iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
716 &priv->regs->ifregs[1].cmask);
717 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
718 dlc = get_can_dlc(ioread32(&priv->regs->ifregs[1].mcont) &
719 PCH_IF_MCONT_DLC);
720 stats->tx_bytes += dlc;
721 stats->tx_packets++;
722 if (int_stat == PCH_TX_OBJ_END)
723 netif_wake_queue(ndev);
724}
725
726static int pch_can_poll(struct napi_struct *napi, int quota)
727{
728 struct net_device *ndev = napi->dev;
729 struct pch_can_priv *priv = netdev_priv(ndev);
730 u32 int_stat;
731 u32 reg_stat;
732 int quota_save = quota;
733
734 int_stat = pch_can_int_pending(priv);
735 if (!int_stat)
736 goto end;
737
738 if (int_stat == PCH_STATUS_INT) {
739 reg_stat = ioread32(&priv->regs->stat);
740
741 if ((reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) &&
742 ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)) {
743 pch_can_error(ndev, reg_stat);
744 quota--;
745 }
746
747 if (reg_stat & (PCH_TX_OK | PCH_RX_OK))
748 pch_can_bit_clear(&priv->regs->stat,
749 reg_stat & (PCH_TX_OK | PCH_RX_OK));
750
751 int_stat = pch_can_int_pending(priv);
752 }
753
754 if (quota == 0)
755 goto end;
756
757 if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) {
758 quota -= pch_can_rx_normal(ndev, int_stat, quota);
759 } else if ((int_stat >= PCH_TX_OBJ_START) &&
760 (int_stat <= PCH_TX_OBJ_END)) {
761
762 pch_can_tx_complete(ndev, int_stat);
763 }
764
765end:
766 napi_complete(napi);
767 pch_can_set_int_enables(priv, PCH_CAN_ALL);
768
769 return quota_save - quota;
770}
771
772static int pch_set_bittiming(struct net_device *ndev)
773{
774 struct pch_can_priv *priv = netdev_priv(ndev);
775 const struct can_bittiming *bt = &priv->can.bittiming;
776 u32 canbit;
777 u32 bepe;
778
779
780 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
781
782 canbit = (bt->brp - 1) & PCH_MSK_BITT_BRP;
783 canbit |= (bt->sjw - 1) << PCH_BIT_SJW_SHIFT;
784 canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1_SHIFT;
785 canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2_SHIFT;
786 bepe = ((bt->brp - 1) & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE_SHIFT;
787 iowrite32(canbit, &priv->regs->bitt);
788 iowrite32(bepe, &priv->regs->brpe);
789 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
790
791 return 0;
792}
793
794static void pch_can_start(struct net_device *ndev)
795{
796 struct pch_can_priv *priv = netdev_priv(ndev);
797
798 if (priv->can.state != CAN_STATE_STOPPED)
799 pch_can_reset(priv);
800
801 pch_set_bittiming(ndev);
802 pch_can_set_optmode(priv);
803
804 pch_can_set_tx_all(priv, 1);
805 pch_can_set_rx_all(priv, 1);
806
807
808 pch_can_set_run_mode(priv, PCH_CAN_RUN);
809
810 priv->can.state = CAN_STATE_ERROR_ACTIVE;
811
812 return;
813}
814
815static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
816{
817 int ret = 0;
818
819 switch (mode) {
820 case CAN_MODE_START:
821 pch_can_start(ndev);
822 netif_wake_queue(ndev);
823 break;
824 default:
825 ret = -EOPNOTSUPP;
826 break;
827 }
828
829 return ret;
830}
831
832static int pch_can_open(struct net_device *ndev)
833{
834 struct pch_can_priv *priv = netdev_priv(ndev);
835 int retval;
836
837
838 retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
839 ndev->name, ndev);
840 if (retval) {
841 netdev_err(ndev, "request_irq failed.\n");
842 goto req_irq_err;
843 }
844
845
846 retval = open_candev(ndev);
847 if (retval) {
848 netdev_err(ndev, "open_candev() failed %d\n", retval);
849 goto err_open_candev;
850 }
851
852 pch_can_init(priv);
853 pch_can_start(ndev);
854 napi_enable(&priv->napi);
855 netif_start_queue(ndev);
856
857 return 0;
858
859err_open_candev:
860 free_irq(priv->dev->irq, ndev);
861req_irq_err:
862 pch_can_release(priv);
863
864 return retval;
865}
866
867static int pch_close(struct net_device *ndev)
868{
869 struct pch_can_priv *priv = netdev_priv(ndev);
870
871 netif_stop_queue(ndev);
872 napi_disable(&priv->napi);
873 pch_can_release(priv);
874 free_irq(priv->dev->irq, ndev);
875 close_candev(ndev);
876 priv->can.state = CAN_STATE_STOPPED;
877 return 0;
878}
879
880static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
881{
882 struct pch_can_priv *priv = netdev_priv(ndev);
883 struct can_frame *cf = (struct can_frame *)skb->data;
884 int tx_obj_no;
885 int i;
886 u32 id2;
887
888 if (can_dropped_invalid_skb(ndev, skb))
889 return NETDEV_TX_OK;
890
891 tx_obj_no = priv->tx_obj;
892 if (priv->tx_obj == PCH_TX_OBJ_END) {
893 if (ioread32(&priv->regs->treq2) & PCH_TREQ2_TX_MASK)
894 netif_stop_queue(ndev);
895
896 priv->tx_obj = PCH_TX_OBJ_START;
897 } else {
898 priv->tx_obj++;
899 }
900
901
902 pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL);
903
904
905 if (cf->can_id & CAN_EFF_FLAG) {
906 iowrite32(cf->can_id & 0xffff, &priv->regs->ifregs[1].id1);
907 id2 = ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD;
908 } else {
909 iowrite32(0, &priv->regs->ifregs[1].id1);
910 id2 = (cf->can_id & CAN_SFF_MASK) << 2;
911 }
912
913 id2 |= PCH_ID_MSGVAL;
914
915
916 if (!(cf->can_id & CAN_RTR_FLAG))
917 id2 |= PCH_ID2_DIR;
918
919 iowrite32(id2, &priv->regs->ifregs[1].id2);
920
921
922 for (i = 0; i < cf->can_dlc; i += 2) {
923 iowrite16(cf->data[i] | (cf->data[i + 1] << 8),
924 &priv->regs->ifregs[1].data[i / 2]);
925 }
926
927 can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1);
928
929
930 iowrite32(cf->can_dlc | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
931 PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont);
932
933 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no);
934
935 return NETDEV_TX_OK;
936}
937
938static const struct net_device_ops pch_can_netdev_ops = {
939 .ndo_open = pch_can_open,
940 .ndo_stop = pch_close,
941 .ndo_start_xmit = pch_xmit,
942 .ndo_change_mtu = can_change_mtu,
943};
944
945static void pch_can_remove(struct pci_dev *pdev)
946{
947 struct net_device *ndev = pci_get_drvdata(pdev);
948 struct pch_can_priv *priv = netdev_priv(ndev);
949
950 unregister_candev(priv->ndev);
951 if (priv->use_msi)
952 pci_disable_msi(priv->dev);
953 pci_release_regions(pdev);
954 pci_disable_device(pdev);
955 pch_can_reset(priv);
956 pci_iounmap(pdev, priv->regs);
957 free_candev(priv->ndev);
958}
959
960#ifdef CONFIG_PM
961static void pch_can_set_int_custom(struct pch_can_priv *priv)
962{
963
964 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
965
966
967 pch_can_bit_set(&priv->regs->cont,
968 ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
969}
970
971
972static u32 pch_can_get_int_enables(struct pch_can_priv *priv)
973{
974
975 return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1;
976}
977
978static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
979 enum pch_ifreg dir)
980{
981 u32 ie, enable;
982
983 if (dir)
984 ie = PCH_IF_MCONT_RXIE;
985 else
986 ie = PCH_IF_MCONT_TXIE;
987
988 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
989 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
990
991 if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) &&
992 ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie))
993 enable = 1;
994 else
995 enable = 0;
996
997 return enable;
998}
999
1000static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
1001 u32 buffer_num, int set)
1002{
1003 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
1004 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1005 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
1006 &priv->regs->ifregs[0].cmask);
1007 if (set)
1008 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
1009 PCH_IF_MCONT_EOB);
1010 else
1011 pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB);
1012
1013 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1014}
1015
1016static u32 pch_can_get_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num)
1017{
1018 u32 link;
1019
1020 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
1021 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1022
1023 if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB)
1024 link = 0;
1025 else
1026 link = 1;
1027 return link;
1028}
1029
1030static int pch_can_get_buffer_status(struct pch_can_priv *priv)
1031{
1032 return (ioread32(&priv->regs->treq1) & 0xffff) |
1033 (ioread32(&priv->regs->treq2) << 16);
1034}
1035
1036static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
1037{
1038 int i;
1039 int retval;
1040 u32 buf_stat;
1041 int counter = PCH_COUNTER_LIMIT;
1042
1043 struct net_device *dev = pci_get_drvdata(pdev);
1044 struct pch_can_priv *priv = netdev_priv(dev);
1045
1046
1047 pch_can_set_run_mode(priv, PCH_CAN_STOP);
1048
1049
1050 priv->can.state = CAN_STATE_STOPPED;
1051
1052
1053 while (counter) {
1054 buf_stat = pch_can_get_buffer_status(priv);
1055 if (!buf_stat)
1056 break;
1057 counter--;
1058 udelay(1);
1059 }
1060 if (!counter)
1061 dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
1062
1063
1064 priv->int_enables = pch_can_get_int_enables(priv);
1065 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1066
1067
1068 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1069 priv->tx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
1070 PCH_TX_IFREG);
1071
1072
1073 pch_can_set_tx_all(priv, 0);
1074
1075
1076 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1077 priv->rx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
1078 PCH_RX_IFREG);
1079 priv->rx_link[i - 1] = pch_can_get_rx_buffer_link(priv, i);
1080 }
1081
1082
1083 pch_can_set_rx_all(priv, 0);
1084 retval = pci_save_state(pdev);
1085 if (retval) {
1086 dev_err(&pdev->dev, "pci_save_state failed.\n");
1087 } else {
1088 pci_enable_wake(pdev, PCI_D3hot, 0);
1089 pci_disable_device(pdev);
1090 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1091 }
1092
1093 return retval;
1094}
1095
1096static int pch_can_resume(struct pci_dev *pdev)
1097{
1098 int i;
1099 int retval;
1100 struct net_device *dev = pci_get_drvdata(pdev);
1101 struct pch_can_priv *priv = netdev_priv(dev);
1102
1103 pci_set_power_state(pdev, PCI_D0);
1104 pci_restore_state(pdev);
1105 retval = pci_enable_device(pdev);
1106 if (retval) {
1107 dev_err(&pdev->dev, "pci_enable_device failed.\n");
1108 return retval;
1109 }
1110
1111 pci_enable_wake(pdev, PCI_D3hot, 0);
1112
1113 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1114
1115
1116 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1117
1118
1119 pch_can_set_run_mode(priv, PCH_CAN_STOP);
1120
1121
1122 pch_can_config_rx_tx_buffers(priv);
1123
1124
1125 pch_set_bittiming(dev);
1126
1127
1128 pch_can_set_optmode(priv);
1129
1130
1131 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1132 pch_can_set_rxtx(priv, i, priv->tx_enable[i - 1], PCH_TX_IFREG);
1133
1134
1135 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1136
1137 pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i - 1]);
1138
1139
1140 pch_can_set_rxtx(priv, i, priv->rx_enable[i - 1], PCH_RX_IFREG);
1141 }
1142
1143
1144 pch_can_set_int_custom(priv);
1145
1146
1147 pch_can_set_run_mode(priv, PCH_CAN_RUN);
1148
1149 return retval;
1150}
1151#else
1152#define pch_can_suspend NULL
1153#define pch_can_resume NULL
1154#endif
1155
1156static int pch_can_get_berr_counter(const struct net_device *dev,
1157 struct can_berr_counter *bec)
1158{
1159 struct pch_can_priv *priv = netdev_priv(dev);
1160 u32 errc = ioread32(&priv->regs->errc);
1161
1162 bec->txerr = errc & PCH_TEC;
1163 bec->rxerr = (errc & PCH_REC) >> 8;
1164
1165 return 0;
1166}
1167
1168static int pch_can_probe(struct pci_dev *pdev,
1169 const struct pci_device_id *id)
1170{
1171 struct net_device *ndev;
1172 struct pch_can_priv *priv;
1173 int rc;
1174 void __iomem *addr;
1175
1176 rc = pci_enable_device(pdev);
1177 if (rc) {
1178 dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
1179 goto probe_exit_endev;
1180 }
1181
1182 rc = pci_request_regions(pdev, KBUILD_MODNAME);
1183 if (rc) {
1184 dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
1185 goto probe_exit_pcireq;
1186 }
1187
1188 addr = pci_iomap(pdev, 1, 0);
1189 if (!addr) {
1190 rc = -EIO;
1191 dev_err(&pdev->dev, "Failed pci_iomap\n");
1192 goto probe_exit_ipmap;
1193 }
1194
1195 ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END);
1196 if (!ndev) {
1197 rc = -ENOMEM;
1198 dev_err(&pdev->dev, "Failed alloc_candev\n");
1199 goto probe_exit_alloc_candev;
1200 }
1201
1202 priv = netdev_priv(ndev);
1203 priv->ndev = ndev;
1204 priv->regs = addr;
1205 priv->dev = pdev;
1206 priv->can.bittiming_const = &pch_can_bittiming_const;
1207 priv->can.do_set_mode = pch_can_do_set_mode;
1208 priv->can.do_get_berr_counter = pch_can_get_berr_counter;
1209 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
1210 CAN_CTRLMODE_LOOPBACK;
1211 priv->tx_obj = PCH_TX_OBJ_START;
1212
1213 ndev->irq = pdev->irq;
1214 ndev->flags |= IFF_ECHO;
1215
1216 pci_set_drvdata(pdev, ndev);
1217 SET_NETDEV_DEV(ndev, &pdev->dev);
1218 ndev->netdev_ops = &pch_can_netdev_ops;
1219 priv->can.clock.freq = PCH_CAN_CLK;
1220
1221 netif_napi_add(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
1222
1223 rc = pci_enable_msi(priv->dev);
1224 if (rc) {
1225 netdev_err(ndev, "PCH CAN opened without MSI\n");
1226 priv->use_msi = 0;
1227 } else {
1228 netdev_err(ndev, "PCH CAN opened with MSI\n");
1229 pci_set_master(pdev);
1230 priv->use_msi = 1;
1231 }
1232
1233 rc = register_candev(ndev);
1234 if (rc) {
1235 dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
1236 goto probe_exit_reg_candev;
1237 }
1238
1239 return 0;
1240
1241probe_exit_reg_candev:
1242 if (priv->use_msi)
1243 pci_disable_msi(priv->dev);
1244 free_candev(ndev);
1245probe_exit_alloc_candev:
1246 pci_iounmap(pdev, addr);
1247probe_exit_ipmap:
1248 pci_release_regions(pdev);
1249probe_exit_pcireq:
1250 pci_disable_device(pdev);
1251probe_exit_endev:
1252 return rc;
1253}
1254
1255static struct pci_driver pch_can_pci_driver = {
1256 .name = "pch_can",
1257 .id_table = pch_pci_tbl,
1258 .probe = pch_can_probe,
1259 .remove = pch_can_remove,
1260 .suspend = pch_can_suspend,
1261 .resume = pch_can_resume,
1262};
1263
1264module_pci_driver(pch_can_pci_driver);
1265
1266MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
1267MODULE_LICENSE("GPL v2");
1268MODULE_VERSION("0.94");
1269