1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 1999 - 2010 Intel Corporation.
4 * Copyright (C) 2010 LAPIS SEMICONDUCTOR CO., LTD.
5 */
6
7 #include <linux/interrupt.h>
8 #include <linux/delay.h>
9 #include <linux/io.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/pci.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/netdevice.h>
17 #include <linux/skbuff.h>
18 #include <linux/can.h>
19 #include <linux/can/dev.h>
20 #include <linux/can/error.h>
21
22 #define PCH_CTRL_INIT BIT(0) /* The INIT bit of CANCONT register. */
23 #define PCH_CTRL_IE BIT(1) /* The IE bit of CAN control register */
24 #define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1))
25 #define PCH_CTRL_CCE BIT(6)
26 #define PCH_CTRL_OPT BIT(7) /* The OPT bit of CANCONT register. */
27 #define PCH_OPT_SILENT BIT(3) /* The Silent bit of CANOPT reg. */
28 #define PCH_OPT_LBACK BIT(4) /* The LoopBack bit of CANOPT reg. */
29
30 #define PCH_CMASK_RX_TX_SET 0x00f3
31 #define PCH_CMASK_RX_TX_GET 0x0073
32 #define PCH_CMASK_ALL 0xff
33 #define PCH_CMASK_NEWDAT BIT(2)
34 #define PCH_CMASK_CLRINTPND BIT(3)
35 #define PCH_CMASK_CTRL BIT(4)
36 #define PCH_CMASK_ARB BIT(5)
37 #define PCH_CMASK_MASK BIT(6)
38 #define PCH_CMASK_RDWR BIT(7)
39 #define PCH_IF_MCONT_NEWDAT BIT(15)
40 #define PCH_IF_MCONT_MSGLOST BIT(14)
41 #define PCH_IF_MCONT_INTPND BIT(13)
42 #define PCH_IF_MCONT_UMASK BIT(12)
43 #define PCH_IF_MCONT_TXIE BIT(11)
44 #define PCH_IF_MCONT_RXIE BIT(10)
45 #define PCH_IF_MCONT_RMTEN BIT(9)
46 #define PCH_IF_MCONT_TXRQXT BIT(8)
47 #define PCH_IF_MCONT_EOB BIT(7)
48 #define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3))
49 #define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15))
50 #define PCH_ID2_DIR BIT(13)
51 #define PCH_ID2_XTD BIT(14)
52 #define PCH_ID_MSGVAL BIT(15)
53 #define PCH_IF_CREQ_BUSY BIT(15)
54
55 #define PCH_STATUS_INT 0x8000
56 #define PCH_RP 0x00008000
57 #define PCH_REC 0x00007f00
58 #define PCH_TEC 0x000000ff
59
60 #define PCH_TX_OK BIT(3)
61 #define PCH_RX_OK BIT(4)
62 #define PCH_EPASSIV BIT(5)
63 #define PCH_EWARN BIT(6)
64 #define PCH_BUS_OFF BIT(7)
65
66 /* bit position of certain controller bits. */
67 #define PCH_BIT_BRP_SHIFT 0
68 #define PCH_BIT_SJW_SHIFT 6
69 #define PCH_BIT_TSEG1_SHIFT 8
70 #define PCH_BIT_TSEG2_SHIFT 12
71 #define PCH_BIT_BRPE_BRPE_SHIFT 6
72
73 #define PCH_MSK_BITT_BRP 0x3f
74 #define PCH_MSK_BRPE_BRPE 0x3c0
75 #define PCH_MSK_CTRL_IE_SIE_EIE 0x07
76 #define PCH_COUNTER_LIMIT 10
77
78 #define PCH_CAN_CLK 50000000 /* 50MHz */
79
80 /*
81 * Define the number of message object.
82 * PCH CAN communications are done via Message RAM.
83 * The Message RAM consists of 32 message objects.
84 */
85 #define PCH_RX_OBJ_NUM 26
86 #define PCH_TX_OBJ_NUM 6
87 #define PCH_RX_OBJ_START 1
88 #define PCH_RX_OBJ_END PCH_RX_OBJ_NUM
89 #define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1)
90 #define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM)
91
92 #define PCH_FIFO_THRESH 16
93
94 /* TxRqst2 show status of MsgObjNo.17~32 */
95 #define PCH_TREQ2_TX_MASK (((1 << PCH_TX_OBJ_NUM) - 1) <<\
96 (PCH_RX_OBJ_END - 16))
97
98 enum pch_ifreg {
99 PCH_RX_IFREG,
100 PCH_TX_IFREG,
101 };
102
103 enum pch_can_err {
104 PCH_STUF_ERR = 1,
105 PCH_FORM_ERR,
106 PCH_ACK_ERR,
107 PCH_BIT1_ERR,
108 PCH_BIT0_ERR,
109 PCH_CRC_ERR,
110 PCH_LEC_ALL,
111 };
112
113 enum pch_can_mode {
114 PCH_CAN_ENABLE,
115 PCH_CAN_DISABLE,
116 PCH_CAN_ALL,
117 PCH_CAN_NONE,
118 PCH_CAN_STOP,
119 PCH_CAN_RUN,
120 };
121
122 struct pch_can_if_regs {
123 u32 creq;
124 u32 cmask;
125 u32 mask1;
126 u32 mask2;
127 u32 id1;
128 u32 id2;
129 u32 mcont;
130 u32 data[4];
131 u32 rsv[13];
132 };
133
134 struct pch_can_regs {
135 u32 cont;
136 u32 stat;
137 u32 errc;
138 u32 bitt;
139 u32 intr;
140 u32 opt;
141 u32 brpe;
142 u32 reserve;
143 struct pch_can_if_regs ifregs[2]; /* [0]=if1 [1]=if2 */
144 u32 reserve1[8];
145 u32 treq1;
146 u32 treq2;
147 u32 reserve2[6];
148 u32 data1;
149 u32 data2;
150 u32 reserve3[6];
151 u32 canipend1;
152 u32 canipend2;
153 u32 reserve4[6];
154 u32 canmval1;
155 u32 canmval2;
156 u32 reserve5[37];
157 u32 srst;
158 };
159
160 struct pch_can_priv {
161 struct can_priv can;
162 struct pci_dev *dev;
163 u32 tx_enable[PCH_TX_OBJ_END];
164 u32 rx_enable[PCH_TX_OBJ_END];
165 u32 rx_link[PCH_TX_OBJ_END];
166 u32 int_enables;
167 struct net_device *ndev;
168 struct pch_can_regs __iomem *regs;
169 struct napi_struct napi;
170 int tx_obj; /* Point next Tx Obj index */
171 int use_msi;
172 };
173
174 static const struct can_bittiming_const pch_can_bittiming_const = {
175 .name = KBUILD_MODNAME,
176 .tseg1_min = 2,
177 .tseg1_max = 16,
178 .tseg2_min = 1,
179 .tseg2_max = 8,
180 .sjw_max = 4,
181 .brp_min = 1,
182 .brp_max = 1024, /* 6bit + extended 4bit */
183 .brp_inc = 1,
184 };
185
186 static const struct pci_device_id pch_pci_tbl[] = {
187 {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
188 {0,}
189 };
190 MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
191
pch_can_bit_set(void __iomem * addr,u32 mask)192 static inline void pch_can_bit_set(void __iomem *addr, u32 mask)
193 {
194 iowrite32(ioread32(addr) | mask, addr);
195 }
196
pch_can_bit_clear(void __iomem * addr,u32 mask)197 static inline void pch_can_bit_clear(void __iomem *addr, u32 mask)
198 {
199 iowrite32(ioread32(addr) & ~mask, addr);
200 }
201
pch_can_set_run_mode(struct pch_can_priv * priv,enum pch_can_mode mode)202 static void pch_can_set_run_mode(struct pch_can_priv *priv,
203 enum pch_can_mode mode)
204 {
205 switch (mode) {
206 case PCH_CAN_RUN:
207 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
208 break;
209
210 case PCH_CAN_STOP:
211 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
212 break;
213
214 default:
215 netdev_err(priv->ndev, "%s -> Invalid Mode.\n", __func__);
216 break;
217 }
218 }
219
pch_can_set_optmode(struct pch_can_priv * priv)220 static void pch_can_set_optmode(struct pch_can_priv *priv)
221 {
222 u32 reg_val = ioread32(&priv->regs->opt);
223
224 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
225 reg_val |= PCH_OPT_SILENT;
226
227 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
228 reg_val |= PCH_OPT_LBACK;
229
230 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
231 iowrite32(reg_val, &priv->regs->opt);
232 }
233
pch_can_rw_msg_obj(void __iomem * creq_addr,u32 num)234 static void pch_can_rw_msg_obj(void __iomem *creq_addr, u32 num)
235 {
236 int counter = PCH_COUNTER_LIMIT;
237 u32 ifx_creq;
238
239 iowrite32(num, creq_addr);
240 while (counter) {
241 ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
242 if (!ifx_creq)
243 break;
244 counter--;
245 udelay(1);
246 }
247 if (!counter)
248 pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
249 }
250
pch_can_set_int_enables(struct pch_can_priv * priv,enum pch_can_mode interrupt_no)251 static void pch_can_set_int_enables(struct pch_can_priv *priv,
252 enum pch_can_mode interrupt_no)
253 {
254 switch (interrupt_no) {
255 case PCH_CAN_DISABLE:
256 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
257 break;
258
259 case PCH_CAN_ALL:
260 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
261 break;
262
263 case PCH_CAN_NONE:
264 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
265 break;
266
267 default:
268 netdev_err(priv->ndev, "Invalid interrupt number.\n");
269 break;
270 }
271 }
272
pch_can_set_rxtx(struct pch_can_priv * priv,u32 buff_num,int set,enum pch_ifreg dir)273 static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num,
274 int set, enum pch_ifreg dir)
275 {
276 u32 ie;
277
278 if (dir)
279 ie = PCH_IF_MCONT_TXIE;
280 else
281 ie = PCH_IF_MCONT_RXIE;
282
283 /* Reading the Msg buffer from Message RAM to IF1/2 registers. */
284 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
285 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
286
287 /* Setting the IF1/2MASK1 register to access MsgVal and RxIE bits */
288 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
289 &priv->regs->ifregs[dir].cmask);
290
291 if (set) {
292 /* Setting the MsgVal and RxIE/TxIE bits */
293 pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie);
294 pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
295 } else {
296 /* Clearing the MsgVal and RxIE/TxIE bits */
297 pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie);
298 pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
299 }
300
301 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
302 }
303
pch_can_set_rx_all(struct pch_can_priv * priv,int set)304 static void pch_can_set_rx_all(struct pch_can_priv *priv, int set)
305 {
306 int i;
307
308 /* Traversing to obtain the object configured as receivers. */
309 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++)
310 pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG);
311 }
312
pch_can_set_tx_all(struct pch_can_priv * priv,int set)313 static void pch_can_set_tx_all(struct pch_can_priv *priv, int set)
314 {
315 int i;
316
317 /* Traversing to obtain the object configured as transmit object. */
318 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
319 pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG);
320 }
321
pch_can_int_pending(struct pch_can_priv * priv)322 static u32 pch_can_int_pending(struct pch_can_priv *priv)
323 {
324 return ioread32(&priv->regs->intr) & 0xffff;
325 }
326
pch_can_clear_if_buffers(struct pch_can_priv * priv)327 static void pch_can_clear_if_buffers(struct pch_can_priv *priv)
328 {
329 int i; /* Msg Obj ID (1~32) */
330
331 for (i = PCH_RX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
332 iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask);
333 iowrite32(0xffff, &priv->regs->ifregs[0].mask1);
334 iowrite32(0xffff, &priv->regs->ifregs[0].mask2);
335 iowrite32(0x0, &priv->regs->ifregs[0].id1);
336 iowrite32(0x0, &priv->regs->ifregs[0].id2);
337 iowrite32(0x0, &priv->regs->ifregs[0].mcont);
338 iowrite32(0x0, &priv->regs->ifregs[0].data[0]);
339 iowrite32(0x0, &priv->regs->ifregs[0].data[1]);
340 iowrite32(0x0, &priv->regs->ifregs[0].data[2]);
341 iowrite32(0x0, &priv->regs->ifregs[0].data[3]);
342 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
343 PCH_CMASK_ARB | PCH_CMASK_CTRL,
344 &priv->regs->ifregs[0].cmask);
345 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
346 }
347 }
348
pch_can_config_rx_tx_buffers(struct pch_can_priv * priv)349 static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
350 {
351 int i;
352
353 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
354 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
355 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
356
357 iowrite32(0x0, &priv->regs->ifregs[0].id1);
358 iowrite32(0x0, &priv->regs->ifregs[0].id2);
359
360 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
361 PCH_IF_MCONT_UMASK);
362
363 /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
364 if (i == PCH_RX_OBJ_END)
365 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
366 PCH_IF_MCONT_EOB);
367 else
368 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
369 PCH_IF_MCONT_EOB);
370
371 iowrite32(0, &priv->regs->ifregs[0].mask1);
372 pch_can_bit_clear(&priv->regs->ifregs[0].mask2,
373 0x1fff | PCH_MASK2_MDIR_MXTD);
374
375 /* Setting CMASK for writing */
376 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
377 PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask);
378
379 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
380 }
381
382 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
383 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask);
384 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
385
386 /* Resetting DIR bit for reception */
387 iowrite32(0x0, &priv->regs->ifregs[1].id1);
388 iowrite32(PCH_ID2_DIR, &priv->regs->ifregs[1].id2);
389
390 /* Setting EOB bit for transmitter */
391 iowrite32(PCH_IF_MCONT_EOB | PCH_IF_MCONT_UMASK,
392 &priv->regs->ifregs[1].mcont);
393
394 iowrite32(0, &priv->regs->ifregs[1].mask1);
395 pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff);
396
397 /* Setting CMASK for writing */
398 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
399 PCH_CMASK_CTRL, &priv->regs->ifregs[1].cmask);
400
401 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
402 }
403 }
404
pch_can_init(struct pch_can_priv * priv)405 static void pch_can_init(struct pch_can_priv *priv)
406 {
407 /* Stopping the Can device. */
408 pch_can_set_run_mode(priv, PCH_CAN_STOP);
409
410 /* Clearing all the message object buffers. */
411 pch_can_clear_if_buffers(priv);
412
413 /* Configuring the respective message object as either rx/tx object. */
414 pch_can_config_rx_tx_buffers(priv);
415
416 /* Enabling the interrupts. */
417 pch_can_set_int_enables(priv, PCH_CAN_ALL);
418 }
419
pch_can_release(struct pch_can_priv * priv)420 static void pch_can_release(struct pch_can_priv *priv)
421 {
422 /* Stooping the CAN device. */
423 pch_can_set_run_mode(priv, PCH_CAN_STOP);
424
425 /* Disabling the interrupts. */
426 pch_can_set_int_enables(priv, PCH_CAN_NONE);
427
428 /* Disabling all the receive object. */
429 pch_can_set_rx_all(priv, 0);
430
431 /* Disabling all the transmit object. */
432 pch_can_set_tx_all(priv, 0);
433 }
434
435 /* This function clears interrupt(s) from the CAN device. */
pch_can_int_clr(struct pch_can_priv * priv,u32 mask)436 static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
437 {
438 /* Clear interrupt for transmit object */
439 if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) {
440 /* Setting CMASK for clearing the reception interrupts. */
441 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
442 &priv->regs->ifregs[0].cmask);
443
444 /* Clearing the Dir bit. */
445 pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
446
447 /* Clearing NewDat & IntPnd */
448 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
449 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
450
451 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, mask);
452 } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) {
453 /*
454 * Setting CMASK for clearing interrupts for frame transmission.
455 */
456 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
457 &priv->regs->ifregs[1].cmask);
458
459 /* Resetting the ID registers. */
460 pch_can_bit_set(&priv->regs->ifregs[1].id2,
461 PCH_ID2_DIR | (0x7ff << 2));
462 iowrite32(0x0, &priv->regs->ifregs[1].id1);
463
464 /* Clearing NewDat, TxRqst & IntPnd */
465 pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
466 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
467 PCH_IF_MCONT_TXRQXT);
468 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, mask);
469 }
470 }
471
pch_can_reset(struct pch_can_priv * priv)472 static void pch_can_reset(struct pch_can_priv *priv)
473 {
474 /* write to sw reset register */
475 iowrite32(1, &priv->regs->srst);
476 iowrite32(0, &priv->regs->srst);
477 }
478
pch_can_error(struct net_device * ndev,u32 status)479 static void pch_can_error(struct net_device *ndev, u32 status)
480 {
481 struct sk_buff *skb;
482 struct pch_can_priv *priv = netdev_priv(ndev);
483 struct can_frame *cf;
484 u32 errc, lec;
485 struct net_device_stats *stats = &(priv->ndev->stats);
486 enum can_state state = priv->can.state;
487
488 skb = alloc_can_err_skb(ndev, &cf);
489 if (!skb)
490 return;
491
492 errc = ioread32(&priv->regs->errc);
493 if (status & PCH_BUS_OFF) {
494 pch_can_set_tx_all(priv, 0);
495 pch_can_set_rx_all(priv, 0);
496 state = CAN_STATE_BUS_OFF;
497 cf->can_id |= CAN_ERR_BUSOFF;
498 priv->can.can_stats.bus_off++;
499 can_bus_off(ndev);
500 } else {
501 cf->data[6] = errc & PCH_TEC;
502 cf->data[7] = (errc & PCH_REC) >> 8;
503 }
504
505 /* Warning interrupt. */
506 if (status & PCH_EWARN) {
507 state = CAN_STATE_ERROR_WARNING;
508 priv->can.can_stats.error_warning++;
509 cf->can_id |= CAN_ERR_CRTL;
510 if (((errc & PCH_REC) >> 8) > 96)
511 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
512 if ((errc & PCH_TEC) > 96)
513 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
514 netdev_dbg(ndev,
515 "%s -> Error Counter is more than 96.\n", __func__);
516 }
517 /* Error passive interrupt. */
518 if (status & PCH_EPASSIV) {
519 priv->can.can_stats.error_passive++;
520 state = CAN_STATE_ERROR_PASSIVE;
521 cf->can_id |= CAN_ERR_CRTL;
522 if (errc & PCH_RP)
523 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
524 if ((errc & PCH_TEC) > 127)
525 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
526 netdev_dbg(ndev,
527 "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
528 }
529
530 lec = status & PCH_LEC_ALL;
531 switch (lec) {
532 case PCH_STUF_ERR:
533 cf->data[2] |= CAN_ERR_PROT_STUFF;
534 priv->can.can_stats.bus_error++;
535 stats->rx_errors++;
536 break;
537 case PCH_FORM_ERR:
538 cf->data[2] |= CAN_ERR_PROT_FORM;
539 priv->can.can_stats.bus_error++;
540 stats->rx_errors++;
541 break;
542 case PCH_ACK_ERR:
543 cf->can_id |= CAN_ERR_ACK;
544 priv->can.can_stats.bus_error++;
545 stats->rx_errors++;
546 break;
547 case PCH_BIT1_ERR:
548 case PCH_BIT0_ERR:
549 cf->data[2] |= CAN_ERR_PROT_BIT;
550 priv->can.can_stats.bus_error++;
551 stats->rx_errors++;
552 break;
553 case PCH_CRC_ERR:
554 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
555 priv->can.can_stats.bus_error++;
556 stats->rx_errors++;
557 break;
558 case PCH_LEC_ALL: /* Written by CPU. No error status */
559 break;
560 }
561
562 priv->can.state = state;
563 netif_receive_skb(skb);
564
565 stats->rx_packets++;
566 stats->rx_bytes += cf->can_dlc;
567 }
568
pch_can_interrupt(int irq,void * dev_id)569 static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
570 {
571 struct net_device *ndev = (struct net_device *)dev_id;
572 struct pch_can_priv *priv = netdev_priv(ndev);
573
574 if (!pch_can_int_pending(priv))
575 return IRQ_NONE;
576
577 pch_can_set_int_enables(priv, PCH_CAN_NONE);
578 napi_schedule(&priv->napi);
579 return IRQ_HANDLED;
580 }
581
pch_fifo_thresh(struct pch_can_priv * priv,int obj_id)582 static void pch_fifo_thresh(struct pch_can_priv *priv, int obj_id)
583 {
584 if (obj_id < PCH_FIFO_THRESH) {
585 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
586 PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask);
587
588 /* Clearing the Dir bit. */
589 pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
590
591 /* Clearing NewDat & IntPnd */
592 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
593 PCH_IF_MCONT_INTPND);
594 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
595 } else if (obj_id > PCH_FIFO_THRESH) {
596 pch_can_int_clr(priv, obj_id);
597 } else if (obj_id == PCH_FIFO_THRESH) {
598 int cnt;
599 for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
600 pch_can_int_clr(priv, cnt + 1);
601 }
602 }
603
pch_can_rx_msg_lost(struct net_device * ndev,int obj_id)604 static void pch_can_rx_msg_lost(struct net_device *ndev, int obj_id)
605 {
606 struct pch_can_priv *priv = netdev_priv(ndev);
607 struct net_device_stats *stats = &(priv->ndev->stats);
608 struct sk_buff *skb;
609 struct can_frame *cf;
610
611 netdev_dbg(priv->ndev, "Msg Obj is overwritten.\n");
612 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
613 PCH_IF_MCONT_MSGLOST);
614 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
615 &priv->regs->ifregs[0].cmask);
616 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
617
618 skb = alloc_can_err_skb(ndev, &cf);
619 if (!skb)
620 return;
621
622 cf->can_id |= CAN_ERR_CRTL;
623 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
624 stats->rx_over_errors++;
625 stats->rx_errors++;
626
627 netif_receive_skb(skb);
628 }
629
pch_can_rx_normal(struct net_device * ndev,u32 obj_num,int quota)630 static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
631 {
632 u32 reg;
633 canid_t id;
634 int rcv_pkts = 0;
635 struct sk_buff *skb;
636 struct can_frame *cf;
637 struct pch_can_priv *priv = netdev_priv(ndev);
638 struct net_device_stats *stats = &(priv->ndev->stats);
639 int i;
640 u32 id2;
641 u16 data_reg;
642
643 do {
644 /* Reading the message object from the Message RAM */
645 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
646 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_num);
647
648 /* Reading the MCONT register. */
649 reg = ioread32(&priv->regs->ifregs[0].mcont);
650
651 if (reg & PCH_IF_MCONT_EOB)
652 break;
653
654 /* If MsgLost bit set. */
655 if (reg & PCH_IF_MCONT_MSGLOST) {
656 pch_can_rx_msg_lost(ndev, obj_num);
657 rcv_pkts++;
658 quota--;
659 obj_num++;
660 continue;
661 } else if (!(reg & PCH_IF_MCONT_NEWDAT)) {
662 obj_num++;
663 continue;
664 }
665
666 skb = alloc_can_skb(priv->ndev, &cf);
667 if (!skb) {
668 netdev_err(ndev, "alloc_can_skb Failed\n");
669 return rcv_pkts;
670 }
671
672 /* Get Received data */
673 id2 = ioread32(&priv->regs->ifregs[0].id2);
674 if (id2 & PCH_ID2_XTD) {
675 id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff);
676 id |= (((id2) & 0x1fff) << 16);
677 cf->can_id = id | CAN_EFF_FLAG;
678 } else {
679 id = (id2 >> 2) & CAN_SFF_MASK;
680 cf->can_id = id;
681 }
682
683 if (id2 & PCH_ID2_DIR)
684 cf->can_id |= CAN_RTR_FLAG;
685
686 cf->can_dlc = get_can_dlc((ioread32(&priv->regs->
687 ifregs[0].mcont)) & 0xF);
688
689 for (i = 0; i < cf->can_dlc; i += 2) {
690 data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
691 cf->data[i] = data_reg;
692 cf->data[i + 1] = data_reg >> 8;
693 }
694
695 rcv_pkts++;
696 stats->rx_packets++;
697 quota--;
698 stats->rx_bytes += cf->can_dlc;
699 netif_receive_skb(skb);
700
701 pch_fifo_thresh(priv, obj_num);
702 obj_num++;
703 } while (quota > 0);
704
705 return rcv_pkts;
706 }
707
pch_can_tx_complete(struct net_device * ndev,u32 int_stat)708 static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
709 {
710 struct pch_can_priv *priv = netdev_priv(ndev);
711 struct net_device_stats *stats = &(priv->ndev->stats);
712 u32 dlc;
713
714 can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1);
715 iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
716 &priv->regs->ifregs[1].cmask);
717 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
718 dlc = get_can_dlc(ioread32(&priv->regs->ifregs[1].mcont) &
719 PCH_IF_MCONT_DLC);
720 stats->tx_bytes += dlc;
721 stats->tx_packets++;
722 if (int_stat == PCH_TX_OBJ_END)
723 netif_wake_queue(ndev);
724 }
725
pch_can_poll(struct napi_struct * napi,int quota)726 static int pch_can_poll(struct napi_struct *napi, int quota)
727 {
728 struct net_device *ndev = napi->dev;
729 struct pch_can_priv *priv = netdev_priv(ndev);
730 u32 int_stat;
731 u32 reg_stat;
732 int quota_save = quota;
733
734 int_stat = pch_can_int_pending(priv);
735 if (!int_stat)
736 goto end;
737
738 if (int_stat == PCH_STATUS_INT) {
739 reg_stat = ioread32(&priv->regs->stat);
740
741 if ((reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) &&
742 ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)) {
743 pch_can_error(ndev, reg_stat);
744 quota--;
745 }
746
747 if (reg_stat & (PCH_TX_OK | PCH_RX_OK))
748 pch_can_bit_clear(&priv->regs->stat,
749 reg_stat & (PCH_TX_OK | PCH_RX_OK));
750
751 int_stat = pch_can_int_pending(priv);
752 }
753
754 if (quota == 0)
755 goto end;
756
757 if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) {
758 quota -= pch_can_rx_normal(ndev, int_stat, quota);
759 } else if ((int_stat >= PCH_TX_OBJ_START) &&
760 (int_stat <= PCH_TX_OBJ_END)) {
761 /* Handle transmission interrupt */
762 pch_can_tx_complete(ndev, int_stat);
763 }
764
765 end:
766 napi_complete(napi);
767 pch_can_set_int_enables(priv, PCH_CAN_ALL);
768
769 return quota_save - quota;
770 }
771
pch_set_bittiming(struct net_device * ndev)772 static int pch_set_bittiming(struct net_device *ndev)
773 {
774 struct pch_can_priv *priv = netdev_priv(ndev);
775 const struct can_bittiming *bt = &priv->can.bittiming;
776 u32 canbit;
777 u32 bepe;
778
779 /* Setting the CCE bit for accessing the Can Timing register. */
780 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
781
782 canbit = (bt->brp - 1) & PCH_MSK_BITT_BRP;
783 canbit |= (bt->sjw - 1) << PCH_BIT_SJW_SHIFT;
784 canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1_SHIFT;
785 canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2_SHIFT;
786 bepe = ((bt->brp - 1) & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE_SHIFT;
787 iowrite32(canbit, &priv->regs->bitt);
788 iowrite32(bepe, &priv->regs->brpe);
789 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
790
791 return 0;
792 }
793
pch_can_start(struct net_device * ndev)794 static void pch_can_start(struct net_device *ndev)
795 {
796 struct pch_can_priv *priv = netdev_priv(ndev);
797
798 if (priv->can.state != CAN_STATE_STOPPED)
799 pch_can_reset(priv);
800
801 pch_set_bittiming(ndev);
802 pch_can_set_optmode(priv);
803
804 pch_can_set_tx_all(priv, 1);
805 pch_can_set_rx_all(priv, 1);
806
807 /* Setting the CAN to run mode. */
808 pch_can_set_run_mode(priv, PCH_CAN_RUN);
809
810 priv->can.state = CAN_STATE_ERROR_ACTIVE;
811
812 return;
813 }
814
pch_can_do_set_mode(struct net_device * ndev,enum can_mode mode)815 static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
816 {
817 int ret = 0;
818
819 switch (mode) {
820 case CAN_MODE_START:
821 pch_can_start(ndev);
822 netif_wake_queue(ndev);
823 break;
824 default:
825 ret = -EOPNOTSUPP;
826 break;
827 }
828
829 return ret;
830 }
831
pch_can_open(struct net_device * ndev)832 static int pch_can_open(struct net_device *ndev)
833 {
834 struct pch_can_priv *priv = netdev_priv(ndev);
835 int retval;
836
837 /* Registering the interrupt. */
838 retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
839 ndev->name, ndev);
840 if (retval) {
841 netdev_err(ndev, "request_irq failed.\n");
842 goto req_irq_err;
843 }
844
845 /* Open common can device */
846 retval = open_candev(ndev);
847 if (retval) {
848 netdev_err(ndev, "open_candev() failed %d\n", retval);
849 goto err_open_candev;
850 }
851
852 pch_can_init(priv);
853 pch_can_start(ndev);
854 napi_enable(&priv->napi);
855 netif_start_queue(ndev);
856
857 return 0;
858
859 err_open_candev:
860 free_irq(priv->dev->irq, ndev);
861 req_irq_err:
862 pch_can_release(priv);
863
864 return retval;
865 }
866
pch_close(struct net_device * ndev)867 static int pch_close(struct net_device *ndev)
868 {
869 struct pch_can_priv *priv = netdev_priv(ndev);
870
871 netif_stop_queue(ndev);
872 napi_disable(&priv->napi);
873 pch_can_release(priv);
874 free_irq(priv->dev->irq, ndev);
875 close_candev(ndev);
876 priv->can.state = CAN_STATE_STOPPED;
877 return 0;
878 }
879
pch_xmit(struct sk_buff * skb,struct net_device * ndev)880 static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
881 {
882 struct pch_can_priv *priv = netdev_priv(ndev);
883 struct can_frame *cf = (struct can_frame *)skb->data;
884 int tx_obj_no;
885 int i;
886 u32 id2;
887
888 if (can_dropped_invalid_skb(ndev, skb))
889 return NETDEV_TX_OK;
890
891 tx_obj_no = priv->tx_obj;
892 if (priv->tx_obj == PCH_TX_OBJ_END) {
893 if (ioread32(&priv->regs->treq2) & PCH_TREQ2_TX_MASK)
894 netif_stop_queue(ndev);
895
896 priv->tx_obj = PCH_TX_OBJ_START;
897 } else {
898 priv->tx_obj++;
899 }
900
901 /* Setting the CMASK register. */
902 pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL);
903
904 /* If ID extended is set. */
905 if (cf->can_id & CAN_EFF_FLAG) {
906 iowrite32(cf->can_id & 0xffff, &priv->regs->ifregs[1].id1);
907 id2 = ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD;
908 } else {
909 iowrite32(0, &priv->regs->ifregs[1].id1);
910 id2 = (cf->can_id & CAN_SFF_MASK) << 2;
911 }
912
913 id2 |= PCH_ID_MSGVAL;
914
915 /* If remote frame has to be transmitted.. */
916 if (!(cf->can_id & CAN_RTR_FLAG))
917 id2 |= PCH_ID2_DIR;
918
919 iowrite32(id2, &priv->regs->ifregs[1].id2);
920
921 /* Copy data to register */
922 for (i = 0; i < cf->can_dlc; i += 2) {
923 iowrite16(cf->data[i] | (cf->data[i + 1] << 8),
924 &priv->regs->ifregs[1].data[i / 2]);
925 }
926
927 can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1);
928
929 /* Set the size of the data. Update if2_mcont */
930 iowrite32(cf->can_dlc | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
931 PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont);
932
933 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no);
934
935 return NETDEV_TX_OK;
936 }
937
938 static const struct net_device_ops pch_can_netdev_ops = {
939 .ndo_open = pch_can_open,
940 .ndo_stop = pch_close,
941 .ndo_start_xmit = pch_xmit,
942 .ndo_change_mtu = can_change_mtu,
943 };
944
pch_can_remove(struct pci_dev * pdev)945 static void pch_can_remove(struct pci_dev *pdev)
946 {
947 struct net_device *ndev = pci_get_drvdata(pdev);
948 struct pch_can_priv *priv = netdev_priv(ndev);
949
950 unregister_candev(priv->ndev);
951 if (priv->use_msi)
952 pci_disable_msi(priv->dev);
953 pci_release_regions(pdev);
954 pci_disable_device(pdev);
955 pch_can_reset(priv);
956 pci_iounmap(pdev, priv->regs);
957 free_candev(priv->ndev);
958 }
959
pch_can_set_int_custom(struct pch_can_priv * priv)960 static void __maybe_unused pch_can_set_int_custom(struct pch_can_priv *priv)
961 {
962 /* Clearing the IE, SIE and EIE bits of Can control register. */
963 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
964
965 /* Appropriately setting them. */
966 pch_can_bit_set(&priv->regs->cont,
967 ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
968 }
969
970 /* This function retrieves interrupt enabled for the CAN device. */
pch_can_get_int_enables(struct pch_can_priv * priv)971 static u32 __maybe_unused pch_can_get_int_enables(struct pch_can_priv *priv)
972 {
973 /* Obtaining the status of IE, SIE and EIE interrupt bits. */
974 return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1;
975 }
976
pch_can_get_rxtx_ir(struct pch_can_priv * priv,u32 buff_num,enum pch_ifreg dir)977 static u32 __maybe_unused pch_can_get_rxtx_ir(struct pch_can_priv *priv,
978 u32 buff_num, enum pch_ifreg dir)
979 {
980 u32 ie, enable;
981
982 if (dir)
983 ie = PCH_IF_MCONT_RXIE;
984 else
985 ie = PCH_IF_MCONT_TXIE;
986
987 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
988 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
989
990 if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) &&
991 ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie))
992 enable = 1;
993 else
994 enable = 0;
995
996 return enable;
997 }
998
pch_can_set_rx_buffer_link(struct pch_can_priv * priv,u32 buffer_num,int set)999 static void __maybe_unused pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
1000 u32 buffer_num, int set)
1001 {
1002 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
1003 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1004 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
1005 &priv->regs->ifregs[0].cmask);
1006 if (set)
1007 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
1008 PCH_IF_MCONT_EOB);
1009 else
1010 pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB);
1011
1012 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1013 }
1014
pch_can_get_rx_buffer_link(struct pch_can_priv * priv,u32 buffer_num)1015 static u32 __maybe_unused pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
1016 u32 buffer_num)
1017 {
1018 u32 link;
1019
1020 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
1021 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1022
1023 if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB)
1024 link = 0;
1025 else
1026 link = 1;
1027 return link;
1028 }
1029
pch_can_get_buffer_status(struct pch_can_priv * priv)1030 static int __maybe_unused pch_can_get_buffer_status(struct pch_can_priv *priv)
1031 {
1032 return (ioread32(&priv->regs->treq1) & 0xffff) |
1033 (ioread32(&priv->regs->treq2) << 16);
1034 }
1035
pch_can_suspend(struct device * dev_d)1036 static int __maybe_unused pch_can_suspend(struct device *dev_d)
1037 {
1038 int i;
1039 u32 buf_stat; /* Variable for reading the transmit buffer status. */
1040 int counter = PCH_COUNTER_LIMIT;
1041
1042 struct net_device *dev = dev_get_drvdata(dev_d);
1043 struct pch_can_priv *priv = netdev_priv(dev);
1044
1045 /* Stop the CAN controller */
1046 pch_can_set_run_mode(priv, PCH_CAN_STOP);
1047
1048 /* Indicate that we are aboutto/in suspend */
1049 priv->can.state = CAN_STATE_STOPPED;
1050
1051 /* Waiting for all transmission to complete. */
1052 while (counter) {
1053 buf_stat = pch_can_get_buffer_status(priv);
1054 if (!buf_stat)
1055 break;
1056 counter--;
1057 udelay(1);
1058 }
1059 if (!counter)
1060 dev_err(dev_d, "%s -> Transmission time out.\n", __func__);
1061
1062 /* Save interrupt configuration and then disable them */
1063 priv->int_enables = pch_can_get_int_enables(priv);
1064 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1065
1066 /* Save Tx buffer enable state */
1067 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1068 priv->tx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
1069 PCH_TX_IFREG);
1070
1071 /* Disable all Transmit buffers */
1072 pch_can_set_tx_all(priv, 0);
1073
1074 /* Save Rx buffer enable state */
1075 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1076 priv->rx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
1077 PCH_RX_IFREG);
1078 priv->rx_link[i - 1] = pch_can_get_rx_buffer_link(priv, i);
1079 }
1080
1081 /* Disable all Receive buffers */
1082 pch_can_set_rx_all(priv, 0);
1083
1084 return 0;
1085 }
1086
pch_can_resume(struct device * dev_d)1087 static int __maybe_unused pch_can_resume(struct device *dev_d)
1088 {
1089 int i;
1090 struct net_device *dev = dev_get_drvdata(dev_d);
1091 struct pch_can_priv *priv = netdev_priv(dev);
1092
1093 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1094
1095 /* Disabling all interrupts. */
1096 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1097
1098 /* Setting the CAN device in Stop Mode. */
1099 pch_can_set_run_mode(priv, PCH_CAN_STOP);
1100
1101 /* Configuring the transmit and receive buffers. */
1102 pch_can_config_rx_tx_buffers(priv);
1103
1104 /* Restore the CAN state */
1105 pch_set_bittiming(dev);
1106
1107 /* Listen/Active */
1108 pch_can_set_optmode(priv);
1109
1110 /* Enabling the transmit buffer. */
1111 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1112 pch_can_set_rxtx(priv, i, priv->tx_enable[i - 1], PCH_TX_IFREG);
1113
1114 /* Configuring the receive buffer and enabling them. */
1115 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1116 /* Restore buffer link */
1117 pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i - 1]);
1118
1119 /* Restore buffer enables */
1120 pch_can_set_rxtx(priv, i, priv->rx_enable[i - 1], PCH_RX_IFREG);
1121 }
1122
1123 /* Enable CAN Interrupts */
1124 pch_can_set_int_custom(priv);
1125
1126 /* Restore Run Mode */
1127 pch_can_set_run_mode(priv, PCH_CAN_RUN);
1128
1129 return 0;
1130 }
1131
pch_can_get_berr_counter(const struct net_device * dev,struct can_berr_counter * bec)1132 static int pch_can_get_berr_counter(const struct net_device *dev,
1133 struct can_berr_counter *bec)
1134 {
1135 struct pch_can_priv *priv = netdev_priv(dev);
1136 u32 errc = ioread32(&priv->regs->errc);
1137
1138 bec->txerr = errc & PCH_TEC;
1139 bec->rxerr = (errc & PCH_REC) >> 8;
1140
1141 return 0;
1142 }
1143
pch_can_probe(struct pci_dev * pdev,const struct pci_device_id * id)1144 static int pch_can_probe(struct pci_dev *pdev,
1145 const struct pci_device_id *id)
1146 {
1147 struct net_device *ndev;
1148 struct pch_can_priv *priv;
1149 int rc;
1150 void __iomem *addr;
1151
1152 rc = pci_enable_device(pdev);
1153 if (rc) {
1154 dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
1155 goto probe_exit_endev;
1156 }
1157
1158 rc = pci_request_regions(pdev, KBUILD_MODNAME);
1159 if (rc) {
1160 dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
1161 goto probe_exit_pcireq;
1162 }
1163
1164 addr = pci_iomap(pdev, 1, 0);
1165 if (!addr) {
1166 rc = -EIO;
1167 dev_err(&pdev->dev, "Failed pci_iomap\n");
1168 goto probe_exit_ipmap;
1169 }
1170
1171 ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END);
1172 if (!ndev) {
1173 rc = -ENOMEM;
1174 dev_err(&pdev->dev, "Failed alloc_candev\n");
1175 goto probe_exit_alloc_candev;
1176 }
1177
1178 priv = netdev_priv(ndev);
1179 priv->ndev = ndev;
1180 priv->regs = addr;
1181 priv->dev = pdev;
1182 priv->can.bittiming_const = &pch_can_bittiming_const;
1183 priv->can.do_set_mode = pch_can_do_set_mode;
1184 priv->can.do_get_berr_counter = pch_can_get_berr_counter;
1185 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
1186 CAN_CTRLMODE_LOOPBACK;
1187 priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj */
1188
1189 ndev->irq = pdev->irq;
1190 ndev->flags |= IFF_ECHO;
1191
1192 pci_set_drvdata(pdev, ndev);
1193 SET_NETDEV_DEV(ndev, &pdev->dev);
1194 ndev->netdev_ops = &pch_can_netdev_ops;
1195 priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
1196
1197 netif_napi_add(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
1198
1199 rc = pci_enable_msi(priv->dev);
1200 if (rc) {
1201 netdev_err(ndev, "PCH CAN opened without MSI\n");
1202 priv->use_msi = 0;
1203 } else {
1204 netdev_err(ndev, "PCH CAN opened with MSI\n");
1205 pci_set_master(pdev);
1206 priv->use_msi = 1;
1207 }
1208
1209 rc = register_candev(ndev);
1210 if (rc) {
1211 dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
1212 goto probe_exit_reg_candev;
1213 }
1214
1215 return 0;
1216
1217 probe_exit_reg_candev:
1218 if (priv->use_msi)
1219 pci_disable_msi(priv->dev);
1220 free_candev(ndev);
1221 probe_exit_alloc_candev:
1222 pci_iounmap(pdev, addr);
1223 probe_exit_ipmap:
1224 pci_release_regions(pdev);
1225 probe_exit_pcireq:
1226 pci_disable_device(pdev);
1227 probe_exit_endev:
1228 return rc;
1229 }
1230
1231 static SIMPLE_DEV_PM_OPS(pch_can_pm_ops,
1232 pch_can_suspend,
1233 pch_can_resume);
1234
1235 static struct pci_driver pch_can_pci_driver = {
1236 .name = "pch_can",
1237 .id_table = pch_pci_tbl,
1238 .probe = pch_can_probe,
1239 .remove = pch_can_remove,
1240 .driver.pm = &pch_can_pm_ops,
1241 };
1242
1243 module_pci_driver(pch_can_pci_driver);
1244
1245 MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
1246 MODULE_LICENSE("GPL v2");
1247 MODULE_VERSION("0.94");
1248