1
2
3
4
5
6
7#include <linux/interrupt.h>
8#include <linux/delay.h>
9#include <linux/io.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/pci.h>
13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <linux/errno.h>
16#include <linux/netdevice.h>
17#include <linux/skbuff.h>
18#include <linux/can.h>
19#include <linux/can/dev.h>
20#include <linux/can/error.h>
21
22#define PCH_CTRL_INIT BIT(0)
23#define PCH_CTRL_IE BIT(1)
24#define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1))
25#define PCH_CTRL_CCE BIT(6)
26#define PCH_CTRL_OPT BIT(7)
27#define PCH_OPT_SILENT BIT(3)
28#define PCH_OPT_LBACK BIT(4)
29
30#define PCH_CMASK_RX_TX_SET 0x00f3
31#define PCH_CMASK_RX_TX_GET 0x0073
32#define PCH_CMASK_ALL 0xff
33#define PCH_CMASK_NEWDAT BIT(2)
34#define PCH_CMASK_CLRINTPND BIT(3)
35#define PCH_CMASK_CTRL BIT(4)
36#define PCH_CMASK_ARB BIT(5)
37#define PCH_CMASK_MASK BIT(6)
38#define PCH_CMASK_RDWR BIT(7)
39#define PCH_IF_MCONT_NEWDAT BIT(15)
40#define PCH_IF_MCONT_MSGLOST BIT(14)
41#define PCH_IF_MCONT_INTPND BIT(13)
42#define PCH_IF_MCONT_UMASK BIT(12)
43#define PCH_IF_MCONT_TXIE BIT(11)
44#define PCH_IF_MCONT_RXIE BIT(10)
45#define PCH_IF_MCONT_RMTEN BIT(9)
46#define PCH_IF_MCONT_TXRQXT BIT(8)
47#define PCH_IF_MCONT_EOB BIT(7)
48#define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3))
49#define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15))
50#define PCH_ID2_DIR BIT(13)
51#define PCH_ID2_XTD BIT(14)
52#define PCH_ID_MSGVAL BIT(15)
53#define PCH_IF_CREQ_BUSY BIT(15)
54
55#define PCH_STATUS_INT 0x8000
56#define PCH_RP 0x00008000
57#define PCH_REC 0x00007f00
58#define PCH_TEC 0x000000ff
59
60#define PCH_TX_OK BIT(3)
61#define PCH_RX_OK BIT(4)
62#define PCH_EPASSIV BIT(5)
63#define PCH_EWARN BIT(6)
64#define PCH_BUS_OFF BIT(7)
65
66
67#define PCH_BIT_BRP_SHIFT 0
68#define PCH_BIT_SJW_SHIFT 6
69#define PCH_BIT_TSEG1_SHIFT 8
70#define PCH_BIT_TSEG2_SHIFT 12
71#define PCH_BIT_BRPE_BRPE_SHIFT 6
72
73#define PCH_MSK_BITT_BRP 0x3f
74#define PCH_MSK_BRPE_BRPE 0x3c0
75#define PCH_MSK_CTRL_IE_SIE_EIE 0x07
76#define PCH_COUNTER_LIMIT 10
77
78#define PCH_CAN_CLK 50000000
79
80
81
82
83
84
85#define PCH_RX_OBJ_NUM 26
86#define PCH_TX_OBJ_NUM 6
87#define PCH_RX_OBJ_START 1
88#define PCH_RX_OBJ_END PCH_RX_OBJ_NUM
89#define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1)
90#define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM)
91
92#define PCH_FIFO_THRESH 16
93
94
95#define PCH_TREQ2_TX_MASK (((1 << PCH_TX_OBJ_NUM) - 1) <<\
96 (PCH_RX_OBJ_END - 16))
97
98enum pch_ifreg {
99 PCH_RX_IFREG,
100 PCH_TX_IFREG,
101};
102
103enum pch_can_err {
104 PCH_STUF_ERR = 1,
105 PCH_FORM_ERR,
106 PCH_ACK_ERR,
107 PCH_BIT1_ERR,
108 PCH_BIT0_ERR,
109 PCH_CRC_ERR,
110 PCH_LEC_ALL,
111};
112
113enum pch_can_mode {
114 PCH_CAN_ENABLE,
115 PCH_CAN_DISABLE,
116 PCH_CAN_ALL,
117 PCH_CAN_NONE,
118 PCH_CAN_STOP,
119 PCH_CAN_RUN,
120};
121
122struct pch_can_if_regs {
123 u32 creq;
124 u32 cmask;
125 u32 mask1;
126 u32 mask2;
127 u32 id1;
128 u32 id2;
129 u32 mcont;
130 u32 data[4];
131 u32 rsv[13];
132};
133
134struct pch_can_regs {
135 u32 cont;
136 u32 stat;
137 u32 errc;
138 u32 bitt;
139 u32 intr;
140 u32 opt;
141 u32 brpe;
142 u32 reserve;
143 struct pch_can_if_regs ifregs[2];
144 u32 reserve1[8];
145 u32 treq1;
146 u32 treq2;
147 u32 reserve2[6];
148 u32 data1;
149 u32 data2;
150 u32 reserve3[6];
151 u32 canipend1;
152 u32 canipend2;
153 u32 reserve4[6];
154 u32 canmval1;
155 u32 canmval2;
156 u32 reserve5[37];
157 u32 srst;
158};
159
160struct pch_can_priv {
161 struct can_priv can;
162 struct pci_dev *dev;
163 u32 tx_enable[PCH_TX_OBJ_END];
164 u32 rx_enable[PCH_TX_OBJ_END];
165 u32 rx_link[PCH_TX_OBJ_END];
166 u32 int_enables;
167 struct net_device *ndev;
168 struct pch_can_regs __iomem *regs;
169 struct napi_struct napi;
170 int tx_obj;
171 int use_msi;
172};
173
174static const struct can_bittiming_const pch_can_bittiming_const = {
175 .name = KBUILD_MODNAME,
176 .tseg1_min = 2,
177 .tseg1_max = 16,
178 .tseg2_min = 1,
179 .tseg2_max = 8,
180 .sjw_max = 4,
181 .brp_min = 1,
182 .brp_max = 1024,
183 .brp_inc = 1,
184};
185
186static const struct pci_device_id pch_pci_tbl[] = {
187 {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
188 {0,}
189};
190MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
191
192static inline void pch_can_bit_set(void __iomem *addr, u32 mask)
193{
194 iowrite32(ioread32(addr) | mask, addr);
195}
196
197static inline void pch_can_bit_clear(void __iomem *addr, u32 mask)
198{
199 iowrite32(ioread32(addr) & ~mask, addr);
200}
201
202static void pch_can_set_run_mode(struct pch_can_priv *priv,
203 enum pch_can_mode mode)
204{
205 switch (mode) {
206 case PCH_CAN_RUN:
207 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
208 break;
209
210 case PCH_CAN_STOP:
211 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
212 break;
213
214 default:
215 netdev_err(priv->ndev, "%s -> Invalid Mode.\n", __func__);
216 break;
217 }
218}
219
220static void pch_can_set_optmode(struct pch_can_priv *priv)
221{
222 u32 reg_val = ioread32(&priv->regs->opt);
223
224 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
225 reg_val |= PCH_OPT_SILENT;
226
227 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
228 reg_val |= PCH_OPT_LBACK;
229
230 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
231 iowrite32(reg_val, &priv->regs->opt);
232}
233
234static void pch_can_rw_msg_obj(void __iomem *creq_addr, u32 num)
235{
236 int counter = PCH_COUNTER_LIMIT;
237 u32 ifx_creq;
238
239 iowrite32(num, creq_addr);
240 while (counter) {
241 ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
242 if (!ifx_creq)
243 break;
244 counter--;
245 udelay(1);
246 }
247 if (!counter)
248 pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
249}
250
251static void pch_can_set_int_enables(struct pch_can_priv *priv,
252 enum pch_can_mode interrupt_no)
253{
254 switch (interrupt_no) {
255 case PCH_CAN_DISABLE:
256 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
257 break;
258
259 case PCH_CAN_ALL:
260 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
261 break;
262
263 case PCH_CAN_NONE:
264 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
265 break;
266
267 default:
268 netdev_err(priv->ndev, "Invalid interrupt number.\n");
269 break;
270 }
271}
272
273static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num,
274 int set, enum pch_ifreg dir)
275{
276 u32 ie;
277
278 if (dir)
279 ie = PCH_IF_MCONT_TXIE;
280 else
281 ie = PCH_IF_MCONT_RXIE;
282
283
284 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
285 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
286
287
288 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
289 &priv->regs->ifregs[dir].cmask);
290
291 if (set) {
292
293 pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie);
294 pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
295 } else {
296
297 pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie);
298 pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
299 }
300
301 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
302}
303
304static void pch_can_set_rx_all(struct pch_can_priv *priv, int set)
305{
306 int i;
307
308
309 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++)
310 pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG);
311}
312
313static void pch_can_set_tx_all(struct pch_can_priv *priv, int set)
314{
315 int i;
316
317
318 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
319 pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG);
320}
321
322static u32 pch_can_int_pending(struct pch_can_priv *priv)
323{
324 return ioread32(&priv->regs->intr) & 0xffff;
325}
326
327static void pch_can_clear_if_buffers(struct pch_can_priv *priv)
328{
329 int i;
330
331 for (i = PCH_RX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
332 iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask);
333 iowrite32(0xffff, &priv->regs->ifregs[0].mask1);
334 iowrite32(0xffff, &priv->regs->ifregs[0].mask2);
335 iowrite32(0x0, &priv->regs->ifregs[0].id1);
336 iowrite32(0x0, &priv->regs->ifregs[0].id2);
337 iowrite32(0x0, &priv->regs->ifregs[0].mcont);
338 iowrite32(0x0, &priv->regs->ifregs[0].data[0]);
339 iowrite32(0x0, &priv->regs->ifregs[0].data[1]);
340 iowrite32(0x0, &priv->regs->ifregs[0].data[2]);
341 iowrite32(0x0, &priv->regs->ifregs[0].data[3]);
342 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
343 PCH_CMASK_ARB | PCH_CMASK_CTRL,
344 &priv->regs->ifregs[0].cmask);
345 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
346 }
347}
348
349static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
350{
351 int i;
352
353 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
354 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
355 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
356
357 iowrite32(0x0, &priv->regs->ifregs[0].id1);
358 iowrite32(0x0, &priv->regs->ifregs[0].id2);
359
360 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
361 PCH_IF_MCONT_UMASK);
362
363
364 if (i == PCH_RX_OBJ_END)
365 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
366 PCH_IF_MCONT_EOB);
367 else
368 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
369 PCH_IF_MCONT_EOB);
370
371 iowrite32(0, &priv->regs->ifregs[0].mask1);
372 pch_can_bit_clear(&priv->regs->ifregs[0].mask2,
373 0x1fff | PCH_MASK2_MDIR_MXTD);
374
375
376 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
377 PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask);
378
379 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
380 }
381
382 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
383 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask);
384 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
385
386
387 iowrite32(0x0, &priv->regs->ifregs[1].id1);
388 iowrite32(PCH_ID2_DIR, &priv->regs->ifregs[1].id2);
389
390
391 iowrite32(PCH_IF_MCONT_EOB | PCH_IF_MCONT_UMASK,
392 &priv->regs->ifregs[1].mcont);
393
394 iowrite32(0, &priv->regs->ifregs[1].mask1);
395 pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff);
396
397
398 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
399 PCH_CMASK_CTRL, &priv->regs->ifregs[1].cmask);
400
401 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
402 }
403}
404
405static void pch_can_init(struct pch_can_priv *priv)
406{
407
408 pch_can_set_run_mode(priv, PCH_CAN_STOP);
409
410
411 pch_can_clear_if_buffers(priv);
412
413
414 pch_can_config_rx_tx_buffers(priv);
415
416
417 pch_can_set_int_enables(priv, PCH_CAN_ALL);
418}
419
420static void pch_can_release(struct pch_can_priv *priv)
421{
422
423 pch_can_set_run_mode(priv, PCH_CAN_STOP);
424
425
426 pch_can_set_int_enables(priv, PCH_CAN_NONE);
427
428
429 pch_can_set_rx_all(priv, 0);
430
431
432 pch_can_set_tx_all(priv, 0);
433}
434
435
436static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
437{
438
439 if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) {
440
441 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
442 &priv->regs->ifregs[0].cmask);
443
444
445 pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
446
447
448 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
449 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
450
451 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, mask);
452 } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) {
453
454
455
456 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
457 &priv->regs->ifregs[1].cmask);
458
459
460 pch_can_bit_set(&priv->regs->ifregs[1].id2,
461 PCH_ID2_DIR | (0x7ff << 2));
462 iowrite32(0x0, &priv->regs->ifregs[1].id1);
463
464
465 pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
466 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
467 PCH_IF_MCONT_TXRQXT);
468 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, mask);
469 }
470}
471
472static void pch_can_reset(struct pch_can_priv *priv)
473{
474
475 iowrite32(1, &priv->regs->srst);
476 iowrite32(0, &priv->regs->srst);
477}
478
479static void pch_can_error(struct net_device *ndev, u32 status)
480{
481 struct sk_buff *skb;
482 struct pch_can_priv *priv = netdev_priv(ndev);
483 struct can_frame *cf;
484 u32 errc, lec;
485 struct net_device_stats *stats = &(priv->ndev->stats);
486 enum can_state state = priv->can.state;
487
488 skb = alloc_can_err_skb(ndev, &cf);
489 if (!skb)
490 return;
491
492 if (status & PCH_BUS_OFF) {
493 pch_can_set_tx_all(priv, 0);
494 pch_can_set_rx_all(priv, 0);
495 state = CAN_STATE_BUS_OFF;
496 cf->can_id |= CAN_ERR_BUSOFF;
497 priv->can.can_stats.bus_off++;
498 can_bus_off(ndev);
499 }
500
501 errc = ioread32(&priv->regs->errc);
502
503 if (status & PCH_EWARN) {
504 state = CAN_STATE_ERROR_WARNING;
505 priv->can.can_stats.error_warning++;
506 cf->can_id |= CAN_ERR_CRTL;
507 if (((errc & PCH_REC) >> 8) > 96)
508 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
509 if ((errc & PCH_TEC) > 96)
510 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
511 netdev_dbg(ndev,
512 "%s -> Error Counter is more than 96.\n", __func__);
513 }
514
515 if (status & PCH_EPASSIV) {
516 priv->can.can_stats.error_passive++;
517 state = CAN_STATE_ERROR_PASSIVE;
518 cf->can_id |= CAN_ERR_CRTL;
519 if (errc & PCH_RP)
520 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
521 if ((errc & PCH_TEC) > 127)
522 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
523 netdev_dbg(ndev,
524 "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
525 }
526
527 lec = status & PCH_LEC_ALL;
528 switch (lec) {
529 case PCH_STUF_ERR:
530 cf->data[2] |= CAN_ERR_PROT_STUFF;
531 priv->can.can_stats.bus_error++;
532 stats->rx_errors++;
533 break;
534 case PCH_FORM_ERR:
535 cf->data[2] |= CAN_ERR_PROT_FORM;
536 priv->can.can_stats.bus_error++;
537 stats->rx_errors++;
538 break;
539 case PCH_ACK_ERR:
540 cf->can_id |= CAN_ERR_ACK;
541 priv->can.can_stats.bus_error++;
542 stats->rx_errors++;
543 break;
544 case PCH_BIT1_ERR:
545 case PCH_BIT0_ERR:
546 cf->data[2] |= CAN_ERR_PROT_BIT;
547 priv->can.can_stats.bus_error++;
548 stats->rx_errors++;
549 break;
550 case PCH_CRC_ERR:
551 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
552 priv->can.can_stats.bus_error++;
553 stats->rx_errors++;
554 break;
555 case PCH_LEC_ALL:
556 break;
557 }
558
559 cf->data[6] = errc & PCH_TEC;
560 cf->data[7] = (errc & PCH_REC) >> 8;
561
562 priv->can.state = state;
563 netif_receive_skb(skb);
564}
565
566static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
567{
568 struct net_device *ndev = (struct net_device *)dev_id;
569 struct pch_can_priv *priv = netdev_priv(ndev);
570
571 if (!pch_can_int_pending(priv))
572 return IRQ_NONE;
573
574 pch_can_set_int_enables(priv, PCH_CAN_NONE);
575 napi_schedule(&priv->napi);
576 return IRQ_HANDLED;
577}
578
579static void pch_fifo_thresh(struct pch_can_priv *priv, int obj_id)
580{
581 if (obj_id < PCH_FIFO_THRESH) {
582 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
583 PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask);
584
585
586 pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
587
588
589 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
590 PCH_IF_MCONT_INTPND);
591 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
592 } else if (obj_id > PCH_FIFO_THRESH) {
593 pch_can_int_clr(priv, obj_id);
594 } else if (obj_id == PCH_FIFO_THRESH) {
595 int cnt;
596 for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
597 pch_can_int_clr(priv, cnt + 1);
598 }
599}
600
601static void pch_can_rx_msg_lost(struct net_device *ndev, int obj_id)
602{
603 struct pch_can_priv *priv = netdev_priv(ndev);
604 struct net_device_stats *stats = &(priv->ndev->stats);
605 struct sk_buff *skb;
606 struct can_frame *cf;
607
608 netdev_dbg(priv->ndev, "Msg Obj is overwritten.\n");
609 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
610 PCH_IF_MCONT_MSGLOST);
611 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
612 &priv->regs->ifregs[0].cmask);
613 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
614
615 skb = alloc_can_err_skb(ndev, &cf);
616 if (!skb)
617 return;
618
619 cf->can_id |= CAN_ERR_CRTL;
620 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
621 stats->rx_over_errors++;
622 stats->rx_errors++;
623
624 netif_receive_skb(skb);
625}
626
627static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
628{
629 u32 reg;
630 canid_t id;
631 int rcv_pkts = 0;
632 struct sk_buff *skb;
633 struct can_frame *cf;
634 struct pch_can_priv *priv = netdev_priv(ndev);
635 struct net_device_stats *stats = &(priv->ndev->stats);
636 int i;
637 u32 id2;
638 u16 data_reg;
639
640 do {
641
642 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
643 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_num);
644
645
646 reg = ioread32(&priv->regs->ifregs[0].mcont);
647
648 if (reg & PCH_IF_MCONT_EOB)
649 break;
650
651
652 if (reg & PCH_IF_MCONT_MSGLOST) {
653 pch_can_rx_msg_lost(ndev, obj_num);
654 rcv_pkts++;
655 quota--;
656 obj_num++;
657 continue;
658 } else if (!(reg & PCH_IF_MCONT_NEWDAT)) {
659 obj_num++;
660 continue;
661 }
662
663 skb = alloc_can_skb(priv->ndev, &cf);
664 if (!skb) {
665 netdev_err(ndev, "alloc_can_skb Failed\n");
666 return rcv_pkts;
667 }
668
669
670 id2 = ioread32(&priv->regs->ifregs[0].id2);
671 if (id2 & PCH_ID2_XTD) {
672 id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff);
673 id |= (((id2) & 0x1fff) << 16);
674 cf->can_id = id | CAN_EFF_FLAG;
675 } else {
676 id = (id2 >> 2) & CAN_SFF_MASK;
677 cf->can_id = id;
678 }
679
680 cf->len = can_cc_dlc2len((ioread32(&priv->regs->
681 ifregs[0].mcont)) & 0xF);
682
683 if (id2 & PCH_ID2_DIR) {
684 cf->can_id |= CAN_RTR_FLAG;
685 } else {
686 for (i = 0; i < cf->len; i += 2) {
687 data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
688 cf->data[i] = data_reg;
689 cf->data[i + 1] = data_reg >> 8;
690 }
691
692 stats->rx_bytes += cf->len;
693 }
694 stats->rx_packets++;
695 rcv_pkts++;
696 quota--;
697 netif_receive_skb(skb);
698
699 pch_fifo_thresh(priv, obj_num);
700 obj_num++;
701 } while (quota > 0);
702
703 return rcv_pkts;
704}
705
706static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
707{
708 struct pch_can_priv *priv = netdev_priv(ndev);
709 struct net_device_stats *stats = &(priv->ndev->stats);
710
711 stats->tx_bytes += can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1,
712 NULL);
713 stats->tx_packets++;
714 iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
715 &priv->regs->ifregs[1].cmask);
716 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
717 if (int_stat == PCH_TX_OBJ_END)
718 netif_wake_queue(ndev);
719}
720
721static int pch_can_poll(struct napi_struct *napi, int quota)
722{
723 struct net_device *ndev = napi->dev;
724 struct pch_can_priv *priv = netdev_priv(ndev);
725 u32 int_stat;
726 u32 reg_stat;
727 int quota_save = quota;
728
729 int_stat = pch_can_int_pending(priv);
730 if (!int_stat)
731 goto end;
732
733 if (int_stat == PCH_STATUS_INT) {
734 reg_stat = ioread32(&priv->regs->stat);
735
736 if ((reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) &&
737 ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)) {
738 pch_can_error(ndev, reg_stat);
739 quota--;
740 }
741
742 if (reg_stat & (PCH_TX_OK | PCH_RX_OK))
743 pch_can_bit_clear(&priv->regs->stat,
744 reg_stat & (PCH_TX_OK | PCH_RX_OK));
745
746 int_stat = pch_can_int_pending(priv);
747 }
748
749 if (quota == 0)
750 goto end;
751
752 if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) {
753 quota -= pch_can_rx_normal(ndev, int_stat, quota);
754 } else if ((int_stat >= PCH_TX_OBJ_START) &&
755 (int_stat <= PCH_TX_OBJ_END)) {
756
757 pch_can_tx_complete(ndev, int_stat);
758 }
759
760end:
761 napi_complete(napi);
762 pch_can_set_int_enables(priv, PCH_CAN_ALL);
763
764 return quota_save - quota;
765}
766
767static int pch_set_bittiming(struct net_device *ndev)
768{
769 struct pch_can_priv *priv = netdev_priv(ndev);
770 const struct can_bittiming *bt = &priv->can.bittiming;
771 u32 canbit;
772 u32 bepe;
773
774
775 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
776
777 canbit = (bt->brp - 1) & PCH_MSK_BITT_BRP;
778 canbit |= (bt->sjw - 1) << PCH_BIT_SJW_SHIFT;
779 canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1_SHIFT;
780 canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2_SHIFT;
781 bepe = ((bt->brp - 1) & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE_SHIFT;
782 iowrite32(canbit, &priv->regs->bitt);
783 iowrite32(bepe, &priv->regs->brpe);
784 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
785
786 return 0;
787}
788
789static void pch_can_start(struct net_device *ndev)
790{
791 struct pch_can_priv *priv = netdev_priv(ndev);
792
793 if (priv->can.state != CAN_STATE_STOPPED)
794 pch_can_reset(priv);
795
796 pch_set_bittiming(ndev);
797 pch_can_set_optmode(priv);
798
799 pch_can_set_tx_all(priv, 1);
800 pch_can_set_rx_all(priv, 1);
801
802
803 pch_can_set_run_mode(priv, PCH_CAN_RUN);
804
805 priv->can.state = CAN_STATE_ERROR_ACTIVE;
806
807 return;
808}
809
810static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
811{
812 int ret = 0;
813
814 switch (mode) {
815 case CAN_MODE_START:
816 pch_can_start(ndev);
817 netif_wake_queue(ndev);
818 break;
819 default:
820 ret = -EOPNOTSUPP;
821 break;
822 }
823
824 return ret;
825}
826
827static int pch_can_open(struct net_device *ndev)
828{
829 struct pch_can_priv *priv = netdev_priv(ndev);
830 int retval;
831
832
833 retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
834 ndev->name, ndev);
835 if (retval) {
836 netdev_err(ndev, "request_irq failed.\n");
837 goto req_irq_err;
838 }
839
840
841 retval = open_candev(ndev);
842 if (retval) {
843 netdev_err(ndev, "open_candev() failed %d\n", retval);
844 goto err_open_candev;
845 }
846
847 pch_can_init(priv);
848 pch_can_start(ndev);
849 napi_enable(&priv->napi);
850 netif_start_queue(ndev);
851
852 return 0;
853
854err_open_candev:
855 free_irq(priv->dev->irq, ndev);
856req_irq_err:
857 pch_can_release(priv);
858
859 return retval;
860}
861
862static int pch_close(struct net_device *ndev)
863{
864 struct pch_can_priv *priv = netdev_priv(ndev);
865
866 netif_stop_queue(ndev);
867 napi_disable(&priv->napi);
868 pch_can_release(priv);
869 free_irq(priv->dev->irq, ndev);
870 close_candev(ndev);
871 priv->can.state = CAN_STATE_STOPPED;
872 return 0;
873}
874
875static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
876{
877 struct pch_can_priv *priv = netdev_priv(ndev);
878 struct can_frame *cf = (struct can_frame *)skb->data;
879 int tx_obj_no;
880 int i;
881 u32 id2;
882
883 if (can_dropped_invalid_skb(ndev, skb))
884 return NETDEV_TX_OK;
885
886 tx_obj_no = priv->tx_obj;
887 if (priv->tx_obj == PCH_TX_OBJ_END) {
888 if (ioread32(&priv->regs->treq2) & PCH_TREQ2_TX_MASK)
889 netif_stop_queue(ndev);
890
891 priv->tx_obj = PCH_TX_OBJ_START;
892 } else {
893 priv->tx_obj++;
894 }
895
896
897 pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL);
898
899
900 if (cf->can_id & CAN_EFF_FLAG) {
901 iowrite32(cf->can_id & 0xffff, &priv->regs->ifregs[1].id1);
902 id2 = ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD;
903 } else {
904 iowrite32(0, &priv->regs->ifregs[1].id1);
905 id2 = (cf->can_id & CAN_SFF_MASK) << 2;
906 }
907
908 id2 |= PCH_ID_MSGVAL;
909
910
911 if (!(cf->can_id & CAN_RTR_FLAG))
912 id2 |= PCH_ID2_DIR;
913
914 iowrite32(id2, &priv->regs->ifregs[1].id2);
915
916
917 for (i = 0; i < cf->len; i += 2) {
918 iowrite16(cf->data[i] | (cf->data[i + 1] << 8),
919 &priv->regs->ifregs[1].data[i / 2]);
920 }
921
922 can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1, 0);
923
924
925 iowrite32(cf->len | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
926 PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont);
927
928 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no);
929
930 return NETDEV_TX_OK;
931}
932
933static const struct net_device_ops pch_can_netdev_ops = {
934 .ndo_open = pch_can_open,
935 .ndo_stop = pch_close,
936 .ndo_start_xmit = pch_xmit,
937 .ndo_change_mtu = can_change_mtu,
938};
939
940static void pch_can_remove(struct pci_dev *pdev)
941{
942 struct net_device *ndev = pci_get_drvdata(pdev);
943 struct pch_can_priv *priv = netdev_priv(ndev);
944
945 unregister_candev(priv->ndev);
946 if (priv->use_msi)
947 pci_disable_msi(priv->dev);
948 pci_release_regions(pdev);
949 pci_disable_device(pdev);
950 pch_can_reset(priv);
951 pci_iounmap(pdev, priv->regs);
952 free_candev(priv->ndev);
953}
954
955static void __maybe_unused pch_can_set_int_custom(struct pch_can_priv *priv)
956{
957
958 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
959
960
961 pch_can_bit_set(&priv->regs->cont,
962 ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
963}
964
965
966static u32 __maybe_unused pch_can_get_int_enables(struct pch_can_priv *priv)
967{
968
969 return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1;
970}
971
972static u32 __maybe_unused pch_can_get_rxtx_ir(struct pch_can_priv *priv,
973 u32 buff_num, enum pch_ifreg dir)
974{
975 u32 ie, enable;
976
977 if (dir)
978 ie = PCH_IF_MCONT_RXIE;
979 else
980 ie = PCH_IF_MCONT_TXIE;
981
982 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
983 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
984
985 if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) &&
986 ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie))
987 enable = 1;
988 else
989 enable = 0;
990
991 return enable;
992}
993
994static void __maybe_unused pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
995 u32 buffer_num, int set)
996{
997 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
998 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
999 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
1000 &priv->regs->ifregs[0].cmask);
1001 if (set)
1002 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
1003 PCH_IF_MCONT_EOB);
1004 else
1005 pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB);
1006
1007 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1008}
1009
1010static u32 __maybe_unused pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
1011 u32 buffer_num)
1012{
1013 u32 link;
1014
1015 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
1016 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1017
1018 if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB)
1019 link = 0;
1020 else
1021 link = 1;
1022 return link;
1023}
1024
1025static int __maybe_unused pch_can_get_buffer_status(struct pch_can_priv *priv)
1026{
1027 return (ioread32(&priv->regs->treq1) & 0xffff) |
1028 (ioread32(&priv->regs->treq2) << 16);
1029}
1030
1031static int __maybe_unused pch_can_suspend(struct device *dev_d)
1032{
1033 int i;
1034 u32 buf_stat;
1035 int counter = PCH_COUNTER_LIMIT;
1036
1037 struct net_device *dev = dev_get_drvdata(dev_d);
1038 struct pch_can_priv *priv = netdev_priv(dev);
1039
1040
1041 pch_can_set_run_mode(priv, PCH_CAN_STOP);
1042
1043
1044 priv->can.state = CAN_STATE_STOPPED;
1045
1046
1047 while (counter) {
1048 buf_stat = pch_can_get_buffer_status(priv);
1049 if (!buf_stat)
1050 break;
1051 counter--;
1052 udelay(1);
1053 }
1054 if (!counter)
1055 dev_err(dev_d, "%s -> Transmission time out.\n", __func__);
1056
1057
1058 priv->int_enables = pch_can_get_int_enables(priv);
1059 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1060
1061
1062 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1063 priv->tx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
1064 PCH_TX_IFREG);
1065
1066
1067 pch_can_set_tx_all(priv, 0);
1068
1069
1070 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1071 priv->rx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
1072 PCH_RX_IFREG);
1073 priv->rx_link[i - 1] = pch_can_get_rx_buffer_link(priv, i);
1074 }
1075
1076
1077 pch_can_set_rx_all(priv, 0);
1078
1079 return 0;
1080}
1081
1082static int __maybe_unused pch_can_resume(struct device *dev_d)
1083{
1084 int i;
1085 struct net_device *dev = dev_get_drvdata(dev_d);
1086 struct pch_can_priv *priv = netdev_priv(dev);
1087
1088 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1089
1090
1091 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1092
1093
1094 pch_can_set_run_mode(priv, PCH_CAN_STOP);
1095
1096
1097 pch_can_config_rx_tx_buffers(priv);
1098
1099
1100 pch_set_bittiming(dev);
1101
1102
1103 pch_can_set_optmode(priv);
1104
1105
1106 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1107 pch_can_set_rxtx(priv, i, priv->tx_enable[i - 1], PCH_TX_IFREG);
1108
1109
1110 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1111
1112 pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i - 1]);
1113
1114
1115 pch_can_set_rxtx(priv, i, priv->rx_enable[i - 1], PCH_RX_IFREG);
1116 }
1117
1118
1119 pch_can_set_int_custom(priv);
1120
1121
1122 pch_can_set_run_mode(priv, PCH_CAN_RUN);
1123
1124 return 0;
1125}
1126
1127static int pch_can_get_berr_counter(const struct net_device *dev,
1128 struct can_berr_counter *bec)
1129{
1130 struct pch_can_priv *priv = netdev_priv(dev);
1131 u32 errc = ioread32(&priv->regs->errc);
1132
1133 bec->txerr = errc & PCH_TEC;
1134 bec->rxerr = (errc & PCH_REC) >> 8;
1135
1136 return 0;
1137}
1138
1139static int pch_can_probe(struct pci_dev *pdev,
1140 const struct pci_device_id *id)
1141{
1142 struct net_device *ndev;
1143 struct pch_can_priv *priv;
1144 int rc;
1145 void __iomem *addr;
1146
1147 rc = pci_enable_device(pdev);
1148 if (rc) {
1149 dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
1150 goto probe_exit_endev;
1151 }
1152
1153 rc = pci_request_regions(pdev, KBUILD_MODNAME);
1154 if (rc) {
1155 dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
1156 goto probe_exit_pcireq;
1157 }
1158
1159 addr = pci_iomap(pdev, 1, 0);
1160 if (!addr) {
1161 rc = -EIO;
1162 dev_err(&pdev->dev, "Failed pci_iomap\n");
1163 goto probe_exit_ipmap;
1164 }
1165
1166 ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END);
1167 if (!ndev) {
1168 rc = -ENOMEM;
1169 dev_err(&pdev->dev, "Failed alloc_candev\n");
1170 goto probe_exit_alloc_candev;
1171 }
1172
1173 priv = netdev_priv(ndev);
1174 priv->ndev = ndev;
1175 priv->regs = addr;
1176 priv->dev = pdev;
1177 priv->can.bittiming_const = &pch_can_bittiming_const;
1178 priv->can.do_set_mode = pch_can_do_set_mode;
1179 priv->can.do_get_berr_counter = pch_can_get_berr_counter;
1180 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
1181 CAN_CTRLMODE_LOOPBACK;
1182 priv->tx_obj = PCH_TX_OBJ_START;
1183
1184 ndev->irq = pdev->irq;
1185 ndev->flags |= IFF_ECHO;
1186
1187 pci_set_drvdata(pdev, ndev);
1188 SET_NETDEV_DEV(ndev, &pdev->dev);
1189 ndev->netdev_ops = &pch_can_netdev_ops;
1190 priv->can.clock.freq = PCH_CAN_CLK;
1191
1192 netif_napi_add_weight(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
1193
1194 rc = pci_enable_msi(priv->dev);
1195 if (rc) {
1196 netdev_err(ndev, "PCH CAN opened without MSI\n");
1197 priv->use_msi = 0;
1198 } else {
1199 netdev_err(ndev, "PCH CAN opened with MSI\n");
1200 pci_set_master(pdev);
1201 priv->use_msi = 1;
1202 }
1203
1204 rc = register_candev(ndev);
1205 if (rc) {
1206 dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
1207 goto probe_exit_reg_candev;
1208 }
1209
1210 return 0;
1211
1212probe_exit_reg_candev:
1213 if (priv->use_msi)
1214 pci_disable_msi(priv->dev);
1215 free_candev(ndev);
1216probe_exit_alloc_candev:
1217 pci_iounmap(pdev, addr);
1218probe_exit_ipmap:
1219 pci_release_regions(pdev);
1220probe_exit_pcireq:
1221 pci_disable_device(pdev);
1222probe_exit_endev:
1223 return rc;
1224}
1225
1226static SIMPLE_DEV_PM_OPS(pch_can_pm_ops,
1227 pch_can_suspend,
1228 pch_can_resume);
1229
1230static struct pci_driver pch_can_pci_driver = {
1231 .name = "pch_can",
1232 .id_table = pch_pci_tbl,
1233 .probe = pch_can_probe,
1234 .remove = pch_can_remove,
1235 .driver.pm = &pch_can_pm_ops,
1236};
1237
1238module_pci_driver(pch_can_pci_driver);
1239
1240MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
1241MODULE_LICENSE("GPL v2");
1242MODULE_VERSION("0.94");
1243