1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020 Pengutronix,
6 // Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14
15 #include <linux/bitfield.h>
16 #include <linux/clk.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/pm_runtime.h>
23
24 #include <asm/unaligned.h>
25
26 #include "mcp251xfd.h"
27
28 #define DEVICE_NAME "mcp251xfd"
29
30 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = {
31 .quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG |
32 MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX |
33 MCP251XFD_QUIRK_ECC,
34 .model = MCP251XFD_MODEL_MCP2517FD,
35 };
36
37 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
38 .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
39 MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
40 .model = MCP251XFD_MODEL_MCP2518FD,
41 };
42
43 /* Autodetect model, start with CRC enabled. */
44 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
45 .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
46 MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
47 .model = MCP251XFD_MODEL_MCP251XFD,
48 };
49
50 static const struct can_bittiming_const mcp251xfd_bittiming_const = {
51 .name = DEVICE_NAME,
52 .tseg1_min = 2,
53 .tseg1_max = 256,
54 .tseg2_min = 1,
55 .tseg2_max = 128,
56 .sjw_max = 128,
57 .brp_min = 1,
58 .brp_max = 256,
59 .brp_inc = 1,
60 };
61
62 static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
63 .name = DEVICE_NAME,
64 .tseg1_min = 1,
65 .tseg1_max = 32,
66 .tseg2_min = 1,
67 .tseg2_max = 16,
68 .sjw_max = 16,
69 .brp_min = 1,
70 .brp_max = 256,
71 .brp_inc = 1,
72 };
73
__mcp251xfd_get_model_str(enum mcp251xfd_model model)74 static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
75 {
76 switch (model) {
77 case MCP251XFD_MODEL_MCP2517FD:
78 return "MCP2517FD";
79 case MCP251XFD_MODEL_MCP2518FD:
80 return "MCP2518FD";
81 case MCP251XFD_MODEL_MCP251XFD:
82 return "MCP251xFD";
83 }
84
85 return "<unknown>";
86 }
87
88 static inline const char *
mcp251xfd_get_model_str(const struct mcp251xfd_priv * priv)89 mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv)
90 {
91 return __mcp251xfd_get_model_str(priv->devtype_data.model);
92 }
93
mcp251xfd_get_mode_str(const u8 mode)94 static const char *mcp251xfd_get_mode_str(const u8 mode)
95 {
96 switch (mode) {
97 case MCP251XFD_REG_CON_MODE_MIXED:
98 return "Mixed (CAN FD/CAN 2.0)";
99 case MCP251XFD_REG_CON_MODE_SLEEP:
100 return "Sleep";
101 case MCP251XFD_REG_CON_MODE_INT_LOOPBACK:
102 return "Internal Loopback";
103 case MCP251XFD_REG_CON_MODE_LISTENONLY:
104 return "Listen Only";
105 case MCP251XFD_REG_CON_MODE_CONFIG:
106 return "Configuration";
107 case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK:
108 return "External Loopback";
109 case MCP251XFD_REG_CON_MODE_CAN2_0:
110 return "CAN 2.0";
111 case MCP251XFD_REG_CON_MODE_RESTRICTED:
112 return "Restricted Operation";
113 }
114
115 return "<unknown>";
116 }
117
mcp251xfd_vdd_enable(const struct mcp251xfd_priv * priv)118 static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
119 {
120 if (!priv->reg_vdd)
121 return 0;
122
123 return regulator_enable(priv->reg_vdd);
124 }
125
mcp251xfd_vdd_disable(const struct mcp251xfd_priv * priv)126 static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv)
127 {
128 if (!priv->reg_vdd)
129 return 0;
130
131 return regulator_disable(priv->reg_vdd);
132 }
133
134 static inline int
mcp251xfd_transceiver_enable(const struct mcp251xfd_priv * priv)135 mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv)
136 {
137 if (!priv->reg_xceiver)
138 return 0;
139
140 return regulator_enable(priv->reg_xceiver);
141 }
142
143 static inline int
mcp251xfd_transceiver_disable(const struct mcp251xfd_priv * priv)144 mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv)
145 {
146 if (!priv->reg_xceiver)
147 return 0;
148
149 return regulator_disable(priv->reg_xceiver);
150 }
151
mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv * priv)152 static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv)
153 {
154 int err;
155
156 err = clk_prepare_enable(priv->clk);
157 if (err)
158 return err;
159
160 err = mcp251xfd_vdd_enable(priv);
161 if (err)
162 clk_disable_unprepare(priv->clk);
163
164 /* Wait for oscillator stabilisation time after power up */
165 usleep_range(MCP251XFD_OSC_STAB_SLEEP_US,
166 2 * MCP251XFD_OSC_STAB_SLEEP_US);
167
168 return err;
169 }
170
mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv * priv)171 static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
172 {
173 int err;
174
175 err = mcp251xfd_vdd_disable(priv);
176 if (err)
177 return err;
178
179 clk_disable_unprepare(priv->clk);
180
181 return 0;
182 }
183
184 static inline u8
mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv * priv,union mcp251xfd_write_reg_buf * write_reg_buf,const u16 reg,const u32 mask,const u32 val)185 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
186 union mcp251xfd_write_reg_buf *write_reg_buf,
187 const u16 reg, const u32 mask, const u32 val)
188 {
189 u8 first_byte, last_byte, len;
190 u8 *data;
191 __le32 val_le32;
192
193 first_byte = mcp251xfd_first_byte_set(mask);
194 last_byte = mcp251xfd_last_byte_set(mask);
195 len = last_byte - first_byte + 1;
196
197 data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
198 val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
199 memcpy(data, &val_le32, len);
200
201 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
202 u16 crc;
203
204 mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
205 len);
206 /* CRC */
207 len += sizeof(write_reg_buf->crc.cmd);
208 crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
209 put_unaligned_be16(crc, (void *)write_reg_buf + len);
210
211 /* Total length */
212 len += sizeof(write_reg_buf->crc.crc);
213 } else {
214 len += sizeof(write_reg_buf->nocrc.cmd);
215 }
216
217 return len;
218 }
219
220 static inline int
mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv * priv,u8 * tef_tail)221 mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
222 u8 *tef_tail)
223 {
224 u32 tef_ua;
225 int err;
226
227 err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
228 if (err)
229 return err;
230
231 *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
232
233 return 0;
234 }
235
236 static inline int
mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv * priv,u8 * tx_tail)237 mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
238 u8 *tx_tail)
239 {
240 u32 fifo_sta;
241 int err;
242
243 err = regmap_read(priv->map_reg,
244 MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
245 &fifo_sta);
246 if (err)
247 return err;
248
249 *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
250
251 return 0;
252 }
253
254 static inline int
mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv * priv,const struct mcp251xfd_rx_ring * ring,u8 * rx_head)255 mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
256 const struct mcp251xfd_rx_ring *ring,
257 u8 *rx_head)
258 {
259 u32 fifo_sta;
260 int err;
261
262 err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
263 &fifo_sta);
264 if (err)
265 return err;
266
267 *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
268
269 return 0;
270 }
271
272 static inline int
mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv * priv,const struct mcp251xfd_rx_ring * ring,u8 * rx_tail)273 mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
274 const struct mcp251xfd_rx_ring *ring,
275 u8 *rx_tail)
276 {
277 u32 fifo_ua;
278 int err;
279
280 err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
281 &fifo_ua);
282 if (err)
283 return err;
284
285 fifo_ua -= ring->base - MCP251XFD_RAM_START;
286 *rx_tail = fifo_ua / ring->obj_size;
287
288 return 0;
289 }
290
291 static void
mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv * priv,const struct mcp251xfd_tx_ring * ring,struct mcp251xfd_tx_obj * tx_obj,const u8 rts_buf_len,const u8 n)292 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
293 const struct mcp251xfd_tx_ring *ring,
294 struct mcp251xfd_tx_obj *tx_obj,
295 const u8 rts_buf_len,
296 const u8 n)
297 {
298 struct spi_transfer *xfer;
299 u16 addr;
300
301 /* FIFO load */
302 addr = mcp251xfd_get_tx_obj_addr(ring, n);
303 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
304 mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
305 addr);
306 else
307 mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
308 addr);
309
310 xfer = &tx_obj->xfer[0];
311 xfer->tx_buf = &tx_obj->buf;
312 xfer->len = 0; /* actual len is assigned on the fly */
313 xfer->cs_change = 1;
314 xfer->cs_change_delay.value = 0;
315 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
316
317 /* FIFO request to send */
318 xfer = &tx_obj->xfer[1];
319 xfer->tx_buf = &ring->rts_buf;
320 xfer->len = rts_buf_len;
321
322 /* SPI message */
323 spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
324 ARRAY_SIZE(tx_obj->xfer));
325 }
326
mcp251xfd_ring_init(struct mcp251xfd_priv * priv)327 static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
328 {
329 struct mcp251xfd_tx_ring *tx_ring;
330 struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
331 struct mcp251xfd_tx_obj *tx_obj;
332 u32 val;
333 u16 addr;
334 u8 len;
335 int i;
336
337 /* TEF */
338 priv->tef.head = 0;
339 priv->tef.tail = 0;
340
341 /* TX */
342 tx_ring = priv->tx;
343 tx_ring->head = 0;
344 tx_ring->tail = 0;
345 tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
346
347 /* FIFO request to send */
348 addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
349 val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
350 len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
351 addr, val, val);
352
353 mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
354 mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
355
356 /* RX */
357 mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
358 rx_ring->head = 0;
359 rx_ring->tail = 0;
360 rx_ring->nr = i;
361 rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
362
363 if (!prev_rx_ring)
364 rx_ring->base =
365 mcp251xfd_get_tx_obj_addr(tx_ring,
366 tx_ring->obj_num);
367 else
368 rx_ring->base = prev_rx_ring->base +
369 prev_rx_ring->obj_size *
370 prev_rx_ring->obj_num;
371
372 prev_rx_ring = rx_ring;
373 }
374 }
375
mcp251xfd_ring_free(struct mcp251xfd_priv * priv)376 static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
377 {
378 int i;
379
380 for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
381 kfree(priv->rx[i]);
382 priv->rx[i] = NULL;
383 }
384 }
385
mcp251xfd_ring_alloc(struct mcp251xfd_priv * priv)386 static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
387 {
388 struct mcp251xfd_tx_ring *tx_ring;
389 struct mcp251xfd_rx_ring *rx_ring;
390 int tef_obj_size, tx_obj_size, rx_obj_size;
391 int tx_obj_num;
392 int ram_free, i;
393
394 tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
395 /* listen-only mode works like FD mode */
396 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
397 tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
398 tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
399 rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
400 } else {
401 tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
402 tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
403 rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
404 }
405
406 tx_ring = priv->tx;
407 tx_ring->obj_num = tx_obj_num;
408 tx_ring->obj_size = tx_obj_size;
409
410 ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
411 (tef_obj_size + tx_obj_size);
412
413 for (i = 0;
414 i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
415 i++) {
416 int rx_obj_num;
417
418 rx_obj_num = ram_free / rx_obj_size;
419 rx_obj_num = min(1 << (fls(rx_obj_num) - 1), 32);
420
421 rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
422 GFP_KERNEL);
423 if (!rx_ring) {
424 mcp251xfd_ring_free(priv);
425 return -ENOMEM;
426 }
427 rx_ring->obj_num = rx_obj_num;
428 rx_ring->obj_size = rx_obj_size;
429 priv->rx[i] = rx_ring;
430
431 ram_free -= rx_ring->obj_num * rx_ring->obj_size;
432 }
433 priv->rx_ring_num = i;
434
435 netdev_dbg(priv->ndev,
436 "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
437 tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
438 tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
439
440 mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
441 netdev_dbg(priv->ndev,
442 "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
443 i, rx_ring->obj_num, rx_ring->obj_size,
444 rx_ring->obj_size * rx_ring->obj_num);
445 }
446
447 netdev_dbg(priv->ndev,
448 "FIFO setup: free: %d bytes\n",
449 ram_free);
450
451 return 0;
452 }
453
454 static inline int
mcp251xfd_chip_get_mode(const struct mcp251xfd_priv * priv,u8 * mode)455 mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
456 {
457 u32 val;
458 int err;
459
460 err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val);
461 if (err)
462 return err;
463
464 *mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val);
465
466 return 0;
467 }
468
469 static int
__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv * priv,const u8 mode_req,bool nowait)470 __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
471 const u8 mode_req, bool nowait)
472 {
473 u32 con, con_reqop;
474 int err;
475
476 con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
477 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
478 MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
479 if (err)
480 return err;
481
482 if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
483 return 0;
484
485 err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
486 FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
487 con) == mode_req,
488 MCP251XFD_POLL_SLEEP_US,
489 MCP251XFD_POLL_TIMEOUT_US);
490 if (err) {
491 u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
492
493 netdev_err(priv->ndev,
494 "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
495 mcp251xfd_get_mode_str(mode_req), mode_req,
496 mcp251xfd_get_mode_str(mode), mode);
497 return err;
498 }
499
500 return 0;
501 }
502
503 static inline int
mcp251xfd_chip_set_mode(const struct mcp251xfd_priv * priv,const u8 mode_req)504 mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
505 const u8 mode_req)
506 {
507 return __mcp251xfd_chip_set_mode(priv, mode_req, false);
508 }
509
510 static inline int
mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv * priv,const u8 mode_req)511 mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
512 const u8 mode_req)
513 {
514 return __mcp251xfd_chip_set_mode(priv, mode_req, true);
515 }
516
mcp251xfd_osc_invalid(u32 reg)517 static inline bool mcp251xfd_osc_invalid(u32 reg)
518 {
519 return reg == 0x0 || reg == 0xffffffff;
520 }
521
mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv * priv)522 static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
523 {
524 u32 osc, osc_reference, osc_mask;
525 int err;
526
527 /* Set Power On Defaults for "Clock Output Divisor" and remove
528 * "Oscillator Disable" bit.
529 */
530 osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
531 MCP251XFD_REG_OSC_CLKODIV_10);
532 osc_reference = MCP251XFD_REG_OSC_OSCRDY;
533 osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
534
535 /* Note:
536 *
537 * If the controller is in Sleep Mode the following write only
538 * removes the "Oscillator Disable" bit and powers it up. All
539 * other bits are unaffected.
540 */
541 err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
542 if (err)
543 return err;
544
545 /* Wait for "Oscillator Ready" bit */
546 err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
547 (osc & osc_mask) == osc_reference,
548 MCP251XFD_OSC_STAB_SLEEP_US,
549 MCP251XFD_OSC_STAB_TIMEOUT_US);
550 if (mcp251xfd_osc_invalid(osc)) {
551 netdev_err(priv->ndev,
552 "Failed to detect %s (osc=0x%08x).\n",
553 mcp251xfd_get_model_str(priv), osc);
554 return -ENODEV;
555 } else if (err == -ETIMEDOUT) {
556 netdev_err(priv->ndev,
557 "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
558 osc, osc_reference);
559 return -ETIMEDOUT;
560 } else if (err) {
561 return err;
562 }
563
564 return 0;
565 }
566
mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv * priv)567 static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
568 {
569 const __be16 cmd = mcp251xfd_cmd_reset();
570 int err;
571
572 /* The Set Mode and SPI Reset command only seems to works if
573 * the controller is not in Sleep Mode.
574 */
575 err = mcp251xfd_chip_clock_enable(priv);
576 if (err)
577 return err;
578
579 err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
580 if (err)
581 return err;
582
583 /* spi_write_then_read() works with non DMA-safe buffers */
584 return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0);
585 }
586
mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv * priv)587 static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
588 {
589 u32 osc, osc_reference;
590 u8 mode;
591 int err;
592
593 err = mcp251xfd_chip_get_mode(priv, &mode);
594 if (err)
595 return err;
596
597 if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
598 netdev_info(priv->ndev,
599 "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
600 mcp251xfd_get_mode_str(mode), mode);
601 return -ETIMEDOUT;
602 }
603
604 osc_reference = MCP251XFD_REG_OSC_OSCRDY |
605 FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
606 MCP251XFD_REG_OSC_CLKODIV_10);
607
608 /* check reset defaults of OSC reg */
609 err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
610 if (err)
611 return err;
612
613 if (osc != osc_reference) {
614 netdev_info(priv->ndev,
615 "Controller failed to reset. osc=0x%08x, reference value=0x%08x\n",
616 osc, osc_reference);
617 return -ETIMEDOUT;
618 }
619
620 return 0;
621 }
622
mcp251xfd_chip_softreset(const struct mcp251xfd_priv * priv)623 static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
624 {
625 int err, i;
626
627 for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) {
628 if (i)
629 netdev_info(priv->ndev,
630 "Retrying to reset Controller.\n");
631
632 err = mcp251xfd_chip_softreset_do(priv);
633 if (err == -ETIMEDOUT)
634 continue;
635 if (err)
636 return err;
637
638 err = mcp251xfd_chip_softreset_check(priv);
639 if (err == -ETIMEDOUT)
640 continue;
641 if (err)
642 return err;
643
644 return 0;
645 }
646
647 if (err)
648 return err;
649
650 return -ETIMEDOUT;
651 }
652
mcp251xfd_chip_clock_init(const struct mcp251xfd_priv * priv)653 static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
654 {
655 u32 osc;
656 int err;
657
658 /* Activate Low Power Mode on Oscillator Disable. This only
659 * works on the MCP2518FD. The MCP2517FD will go into normal
660 * Sleep Mode instead.
661 */
662 osc = MCP251XFD_REG_OSC_LPMEN |
663 FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
664 MCP251XFD_REG_OSC_CLKODIV_10);
665 err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
666 if (err)
667 return err;
668
669 /* Set Time Base Counter Prescaler to 1.
670 *
671 * This means an overflow of the 32 bit Time Base Counter
672 * register at 40 MHz every 107 seconds.
673 */
674 return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON,
675 MCP251XFD_REG_TSCON_TBCEN);
676 }
677
mcp251xfd_set_bittiming(const struct mcp251xfd_priv * priv)678 static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
679 {
680 const struct can_bittiming *bt = &priv->can.bittiming;
681 const struct can_bittiming *dbt = &priv->can.data_bittiming;
682 u32 val = 0;
683 s8 tdco;
684 int err;
685
686 /* CAN Control Register
687 *
688 * - no transmit bandwidth sharing
689 * - config mode
690 * - disable transmit queue
691 * - store in transmit FIFO event
692 * - transition to restricted operation mode on system error
693 * - ESI is transmitted recessive when ESI of message is high or
694 * CAN controller error passive
695 * - restricted retransmission attempts,
696 * use TQXCON_TXAT and FIFOCON_TXAT
697 * - wake-up filter bits T11FILTER
698 * - use CAN bus line filter for wakeup
699 * - protocol exception is treated as a form error
700 * - Do not compare data bytes
701 */
702 val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK,
703 MCP251XFD_REG_CON_MODE_CONFIG) |
704 MCP251XFD_REG_CON_STEF |
705 MCP251XFD_REG_CON_ESIGM |
706 MCP251XFD_REG_CON_RTXAT |
707 FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK,
708 MCP251XFD_REG_CON_WFT_T11FILTER) |
709 MCP251XFD_REG_CON_WAKFIL |
710 MCP251XFD_REG_CON_PXEDIS;
711
712 if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
713 val |= MCP251XFD_REG_CON_ISOCRCEN;
714
715 err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val);
716 if (err)
717 return err;
718
719 /* Nominal Bit Time */
720 val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) |
721 FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK,
722 bt->prop_seg + bt->phase_seg1 - 1) |
723 FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK,
724 bt->phase_seg2 - 1) |
725 FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1);
726
727 err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val);
728 if (err)
729 return err;
730
731 if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
732 return 0;
733
734 /* Data Bit Time */
735 val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) |
736 FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK,
737 dbt->prop_seg + dbt->phase_seg1 - 1) |
738 FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK,
739 dbt->phase_seg2 - 1) |
740 FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1);
741
742 err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val);
743 if (err)
744 return err;
745
746 /* Transmitter Delay Compensation */
747 tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
748 -64, 63);
749 val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
750 MCP251XFD_REG_TDC_TDCMOD_AUTO) |
751 FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
752
753 return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
754 }
755
mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv * priv)756 static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
757 {
758 u32 val;
759
760 if (!priv->rx_int)
761 return 0;
762
763 /* Configure GPIOs:
764 * - PIN0: GPIO Input
765 * - PIN1: GPIO Input/RX Interrupt
766 *
767 * PIN1 must be Input, otherwise there is a glitch on the
768 * rx-INT line. It happens between setting the PIN as output
769 * (in the first byte of the SPI transfer) and configuring the
770 * PIN as interrupt (in the last byte of the SPI transfer).
771 */
772 val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
773 MCP251XFD_REG_IOCON_TRIS0;
774 return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
775 }
776
mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv * priv)777 static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
778 {
779 u32 val;
780
781 if (!priv->rx_int)
782 return 0;
783
784 /* Configure GPIOs:
785 * - PIN0: GPIO Input
786 * - PIN1: GPIO Input
787 */
788 val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
789 MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
790 return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
791 }
792
793 static int
mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv * priv,const struct mcp251xfd_rx_ring * ring)794 mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
795 const struct mcp251xfd_rx_ring *ring)
796 {
797 u32 fifo_con;
798
799 /* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
800 *
801 * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
802 * generate a RXOVIF, use this to properly detect RX MAB
803 * overflows.
804 */
805 fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
806 ring->obj_num - 1) |
807 MCP251XFD_REG_FIFOCON_RXTSEN |
808 MCP251XFD_REG_FIFOCON_RXOVIE |
809 MCP251XFD_REG_FIFOCON_TFNRFNIE;
810
811 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
812 fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
813 MCP251XFD_REG_FIFOCON_PLSIZE_64);
814 else
815 fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
816 MCP251XFD_REG_FIFOCON_PLSIZE_8);
817
818 return regmap_write(priv->map_reg,
819 MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
820 }
821
822 static int
mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv * priv,const struct mcp251xfd_rx_ring * ring)823 mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
824 const struct mcp251xfd_rx_ring *ring)
825 {
826 u32 fltcon;
827
828 fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
829 MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
830
831 return regmap_update_bits(priv->map_reg,
832 MCP251XFD_REG_FLTCON(ring->nr >> 2),
833 MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
834 fltcon);
835 }
836
mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv * priv)837 static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
838 {
839 const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
840 const struct mcp251xfd_rx_ring *rx_ring;
841 u32 val;
842 int err, n;
843
844 /* TEF */
845 val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
846 tx_ring->obj_num - 1) |
847 MCP251XFD_REG_TEFCON_TEFTSEN |
848 MCP251XFD_REG_TEFCON_TEFOVIE |
849 MCP251XFD_REG_TEFCON_TEFNEIE;
850
851 err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
852 if (err)
853 return err;
854
855 /* FIFO 1 - TX */
856 val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
857 tx_ring->obj_num - 1) |
858 MCP251XFD_REG_FIFOCON_TXEN |
859 MCP251XFD_REG_FIFOCON_TXATIE;
860
861 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
862 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
863 MCP251XFD_REG_FIFOCON_PLSIZE_64);
864 else
865 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
866 MCP251XFD_REG_FIFOCON_PLSIZE_8);
867
868 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
869 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
870 MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
871 else
872 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
873 MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
874
875 err = regmap_write(priv->map_reg,
876 MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
877 val);
878 if (err)
879 return err;
880
881 /* RX FIFOs */
882 mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
883 err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
884 if (err)
885 return err;
886
887 err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
888 if (err)
889 return err;
890 }
891
892 return 0;
893 }
894
mcp251xfd_chip_ecc_init(struct mcp251xfd_priv * priv)895 static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
896 {
897 struct mcp251xfd_ecc *ecc = &priv->ecc;
898 void *ram;
899 u32 val = 0;
900 int err;
901
902 ecc->ecc_stat = 0;
903
904 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC)
905 val = MCP251XFD_REG_ECCCON_ECCEN;
906
907 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
908 MCP251XFD_REG_ECCCON_ECCEN, val);
909 if (err)
910 return err;
911
912 ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL);
913 if (!ram)
914 return -ENOMEM;
915
916 err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram,
917 MCP251XFD_RAM_SIZE);
918 kfree(ram);
919
920 return err;
921 }
922
mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv * priv)923 static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
924 {
925 struct mcp251xfd_ecc *ecc = &priv->ecc;
926
927 ecc->ecc_stat = 0;
928 }
929
mcp251xfd_get_normal_mode(const struct mcp251xfd_priv * priv)930 static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
931 {
932 u8 mode;
933
934 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
935 mode = MCP251XFD_REG_CON_MODE_LISTENONLY;
936 else if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
937 mode = MCP251XFD_REG_CON_MODE_MIXED;
938 else
939 mode = MCP251XFD_REG_CON_MODE_CAN2_0;
940
941 return mode;
942 }
943
944 static int
__mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv * priv,bool nowait)945 __mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv,
946 bool nowait)
947 {
948 u8 mode;
949
950 mode = mcp251xfd_get_normal_mode(priv);
951
952 return __mcp251xfd_chip_set_mode(priv, mode, nowait);
953 }
954
955 static inline int
mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv * priv)956 mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv)
957 {
958 return __mcp251xfd_chip_set_normal_mode(priv, false);
959 }
960
961 static inline int
mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv * priv)962 mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv)
963 {
964 return __mcp251xfd_chip_set_normal_mode(priv, true);
965 }
966
mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv * priv)967 static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv)
968 {
969 u32 val;
970 int err;
971
972 val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE;
973 err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val);
974 if (err)
975 return err;
976
977 val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
978 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val);
979 if (err)
980 return err;
981
982 val = MCP251XFD_REG_INT_CERRIE |
983 MCP251XFD_REG_INT_SERRIE |
984 MCP251XFD_REG_INT_RXOVIE |
985 MCP251XFD_REG_INT_TXATIE |
986 MCP251XFD_REG_INT_SPICRCIE |
987 MCP251XFD_REG_INT_ECCIE |
988 MCP251XFD_REG_INT_TEFIE |
989 MCP251XFD_REG_INT_MODIE |
990 MCP251XFD_REG_INT_RXIE;
991
992 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
993 val |= MCP251XFD_REG_INT_IVMIE;
994
995 return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val);
996 }
997
mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv * priv)998 static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
999 {
1000 int err;
1001 u32 mask;
1002
1003 err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0);
1004 if (err)
1005 return err;
1006
1007 mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1008 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
1009 mask, 0x0);
1010 if (err)
1011 return err;
1012
1013 return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
1014 }
1015
mcp251xfd_chip_stop(struct mcp251xfd_priv * priv,const enum can_state state)1016 static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
1017 const enum can_state state)
1018 {
1019 priv->can.state = state;
1020
1021 mcp251xfd_chip_interrupts_disable(priv);
1022 mcp251xfd_chip_rx_int_disable(priv);
1023 return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
1024 }
1025
mcp251xfd_chip_start(struct mcp251xfd_priv * priv)1026 static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
1027 {
1028 int err;
1029
1030 err = mcp251xfd_chip_softreset(priv);
1031 if (err)
1032 goto out_chip_stop;
1033
1034 err = mcp251xfd_chip_clock_init(priv);
1035 if (err)
1036 goto out_chip_stop;
1037
1038 err = mcp251xfd_set_bittiming(priv);
1039 if (err)
1040 goto out_chip_stop;
1041
1042 err = mcp251xfd_chip_rx_int_enable(priv);
1043 if (err)
1044 goto out_chip_stop;
1045
1046 err = mcp251xfd_chip_ecc_init(priv);
1047 if (err)
1048 goto out_chip_stop;
1049
1050 mcp251xfd_ring_init(priv);
1051
1052 err = mcp251xfd_chip_fifo_init(priv);
1053 if (err)
1054 goto out_chip_stop;
1055
1056 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1057
1058 err = mcp251xfd_chip_set_normal_mode(priv);
1059 if (err)
1060 goto out_chip_stop;
1061
1062 return 0;
1063
1064 out_chip_stop:
1065 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1066
1067 return err;
1068 }
1069
mcp251xfd_set_mode(struct net_device * ndev,enum can_mode mode)1070 static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode)
1071 {
1072 struct mcp251xfd_priv *priv = netdev_priv(ndev);
1073 int err;
1074
1075 switch (mode) {
1076 case CAN_MODE_START:
1077 err = mcp251xfd_chip_start(priv);
1078 if (err)
1079 return err;
1080
1081 err = mcp251xfd_chip_interrupts_enable(priv);
1082 if (err) {
1083 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1084 return err;
1085 }
1086
1087 netif_wake_queue(ndev);
1088 break;
1089
1090 default:
1091 return -EOPNOTSUPP;
1092 }
1093
1094 return 0;
1095 }
1096
__mcp251xfd_get_berr_counter(const struct net_device * ndev,struct can_berr_counter * bec)1097 static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
1098 struct can_berr_counter *bec)
1099 {
1100 const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1101 u32 trec;
1102 int err;
1103
1104 err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1105 if (err)
1106 return err;
1107
1108 if (trec & MCP251XFD_REG_TREC_TXBO)
1109 bec->txerr = 256;
1110 else
1111 bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
1112 bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
1113
1114 return 0;
1115 }
1116
mcp251xfd_get_berr_counter(const struct net_device * ndev,struct can_berr_counter * bec)1117 static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
1118 struct can_berr_counter *bec)
1119 {
1120 const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1121
1122 /* Avoid waking up the controller if the interface is down */
1123 if (!(ndev->flags & IFF_UP))
1124 return 0;
1125
1126 /* The controller is powered down during Bus Off, use saved
1127 * bec values.
1128 */
1129 if (priv->can.state == CAN_STATE_BUS_OFF) {
1130 *bec = priv->bec;
1131 return 0;
1132 }
1133
1134 return __mcp251xfd_get_berr_counter(ndev, bec);
1135 }
1136
mcp251xfd_check_tef_tail(const struct mcp251xfd_priv * priv)1137 static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
1138 {
1139 u8 tef_tail_chip, tef_tail;
1140 int err;
1141
1142 if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1143 return 0;
1144
1145 err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
1146 if (err)
1147 return err;
1148
1149 tef_tail = mcp251xfd_get_tef_tail(priv);
1150 if (tef_tail_chip != tef_tail) {
1151 netdev_err(priv->ndev,
1152 "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
1153 tef_tail_chip, tef_tail);
1154 return -EILSEQ;
1155 }
1156
1157 return 0;
1158 }
1159
1160 static int
mcp251xfd_check_rx_tail(const struct mcp251xfd_priv * priv,const struct mcp251xfd_rx_ring * ring)1161 mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
1162 const struct mcp251xfd_rx_ring *ring)
1163 {
1164 u8 rx_tail_chip, rx_tail;
1165 int err;
1166
1167 if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1168 return 0;
1169
1170 err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
1171 if (err)
1172 return err;
1173
1174 rx_tail = mcp251xfd_get_rx_tail(ring);
1175 if (rx_tail_chip != rx_tail) {
1176 netdev_err(priv->ndev,
1177 "RX tail of chip (%d) and ours (%d) inconsistent.\n",
1178 rx_tail_chip, rx_tail);
1179 return -EILSEQ;
1180 }
1181
1182 return 0;
1183 }
1184
1185 static int
mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv * priv,const u32 seq)1186 mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
1187 {
1188 const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1189 u32 tef_sta;
1190 int err;
1191
1192 err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
1193 if (err)
1194 return err;
1195
1196 if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
1197 netdev_err(priv->ndev,
1198 "Transmit Event FIFO buffer overflow.\n");
1199 return -ENOBUFS;
1200 }
1201
1202 netdev_info(priv->ndev,
1203 "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x)\n",
1204 tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
1205 "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
1206 "not empty" : "empty",
1207 seq, priv->tef.tail, priv->tef.head, tx_ring->head);
1208
1209 /* The Sequence Number in the TEF doesn't match our tef_tail. */
1210 return -EAGAIN;
1211 }
1212
1213 static int
mcp251xfd_handle_tefif_one(struct mcp251xfd_priv * priv,const struct mcp251xfd_hw_tef_obj * hw_tef_obj)1214 mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
1215 const struct mcp251xfd_hw_tef_obj *hw_tef_obj)
1216 {
1217 struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1218 struct net_device_stats *stats = &priv->ndev->stats;
1219 u32 seq, seq_masked, tef_tail_masked;
1220 int err;
1221
1222 seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
1223 hw_tef_obj->flags);
1224
1225 /* Use the MCP2517FD mask on the MCP2518FD, too. We only
1226 * compare 7 bits, this should be enough to detect
1227 * net-yet-completed, i.e. old TEF objects.
1228 */
1229 seq_masked = seq &
1230 field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1231 tef_tail_masked = priv->tef.tail &
1232 field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1233 if (seq_masked != tef_tail_masked)
1234 return mcp251xfd_handle_tefif_recover(priv, seq);
1235
1236 stats->tx_bytes +=
1237 can_rx_offload_get_echo_skb(&priv->offload,
1238 mcp251xfd_get_tef_tail(priv),
1239 hw_tef_obj->ts);
1240 stats->tx_packets++;
1241
1242 /* finally increment the TEF pointer */
1243 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_TEFCON,
1244 GENMASK(15, 8),
1245 MCP251XFD_REG_TEFCON_UINC);
1246 if (err)
1247 return err;
1248
1249 priv->tef.tail++;
1250 tx_ring->tail++;
1251
1252 return mcp251xfd_check_tef_tail(priv);
1253 }
1254
mcp251xfd_tef_ring_update(struct mcp251xfd_priv * priv)1255 static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
1256 {
1257 const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1258 unsigned int new_head;
1259 u8 chip_tx_tail;
1260 int err;
1261
1262 err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1263 if (err)
1264 return err;
1265
1266 /* chip_tx_tail, is the next TX-Object send by the HW.
1267 * The new TEF head must be >= the old head, ...
1268 */
1269 new_head = round_down(priv->tef.head, tx_ring->obj_num) + chip_tx_tail;
1270 if (new_head <= priv->tef.head)
1271 new_head += tx_ring->obj_num;
1272
1273 /* ... but it cannot exceed the TX head. */
1274 priv->tef.head = min(new_head, tx_ring->head);
1275
1276 return mcp251xfd_check_tef_tail(priv);
1277 }
1278
1279 static inline int
mcp251xfd_tef_obj_read(const struct mcp251xfd_priv * priv,struct mcp251xfd_hw_tef_obj * hw_tef_obj,const u8 offset,const u8 len)1280 mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
1281 struct mcp251xfd_hw_tef_obj *hw_tef_obj,
1282 const u8 offset, const u8 len)
1283 {
1284 const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1285
1286 if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
1287 (offset > tx_ring->obj_num ||
1288 len > tx_ring->obj_num ||
1289 offset + len > tx_ring->obj_num)) {
1290 netdev_err(priv->ndev,
1291 "Trying to read too many TEF objects (max=%d, offset=%d, len=%d).\n",
1292 tx_ring->obj_num, offset, len);
1293 return -ERANGE;
1294 }
1295
1296 return regmap_bulk_read(priv->map_rx,
1297 mcp251xfd_get_tef_obj_addr(offset),
1298 hw_tef_obj,
1299 sizeof(*hw_tef_obj) / sizeof(u32) * len);
1300 }
1301
mcp251xfd_handle_tefif(struct mcp251xfd_priv * priv)1302 static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
1303 {
1304 struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
1305 u8 tef_tail, len, l;
1306 int err, i;
1307
1308 err = mcp251xfd_tef_ring_update(priv);
1309 if (err)
1310 return err;
1311
1312 tef_tail = mcp251xfd_get_tef_tail(priv);
1313 len = mcp251xfd_get_tef_len(priv);
1314 l = mcp251xfd_get_tef_linear_len(priv);
1315 err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
1316 if (err)
1317 return err;
1318
1319 if (l < len) {
1320 err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
1321 if (err)
1322 return err;
1323 }
1324
1325 for (i = 0; i < len; i++) {
1326 err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]);
1327 /* -EAGAIN means the Sequence Number in the TEF
1328 * doesn't match our tef_tail. This can happen if we
1329 * read the TEF objects too early. Leave loop let the
1330 * interrupt handler call us again.
1331 */
1332 if (err == -EAGAIN)
1333 goto out_netif_wake_queue;
1334 if (err)
1335 return err;
1336 }
1337
1338 out_netif_wake_queue:
1339 mcp251xfd_ecc_tefif_successful(priv);
1340
1341 if (mcp251xfd_get_tx_free(priv->tx)) {
1342 /* Make sure that anybody stopping the queue after
1343 * this sees the new tx_ring->tail.
1344 */
1345 smp_mb();
1346 netif_wake_queue(priv->ndev);
1347 }
1348
1349 return 0;
1350 }
1351
1352 static int
mcp251xfd_rx_ring_update(const struct mcp251xfd_priv * priv,struct mcp251xfd_rx_ring * ring)1353 mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
1354 struct mcp251xfd_rx_ring *ring)
1355 {
1356 u32 new_head;
1357 u8 chip_rx_head;
1358 int err;
1359
1360 err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
1361 if (err)
1362 return err;
1363
1364 /* chip_rx_head, is the next RX-Object filled by the HW.
1365 * The new RX head must be >= the old head.
1366 */
1367 new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
1368 if (new_head <= ring->head)
1369 new_head += ring->obj_num;
1370
1371 ring->head = new_head;
1372
1373 return mcp251xfd_check_rx_tail(priv, ring);
1374 }
1375
1376 static void
mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv * priv,const struct mcp251xfd_hw_rx_obj_canfd * hw_rx_obj,struct sk_buff * skb)1377 mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
1378 const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1379 struct sk_buff *skb)
1380 {
1381 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
1382
1383 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
1384 u32 sid, eid;
1385
1386 eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
1387 sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
1388
1389 cfd->can_id = CAN_EFF_FLAG |
1390 FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
1391 FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
1392 } else {
1393 cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
1394 hw_rx_obj->id);
1395 }
1396
1397 /* CANFD */
1398 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
1399 u8 dlc;
1400
1401 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
1402 cfd->flags |= CANFD_ESI;
1403
1404 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
1405 cfd->flags |= CANFD_BRS;
1406
1407 dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, hw_rx_obj->flags);
1408 cfd->len = can_dlc2len(get_canfd_dlc(dlc));
1409 } else {
1410 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
1411 cfd->can_id |= CAN_RTR_FLAG;
1412
1413 cfd->len = get_can_dlc(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC,
1414 hw_rx_obj->flags));
1415 }
1416
1417 memcpy(cfd->data, hw_rx_obj->data, cfd->len);
1418 }
1419
1420 static int
mcp251xfd_handle_rxif_one(struct mcp251xfd_priv * priv,struct mcp251xfd_rx_ring * ring,const struct mcp251xfd_hw_rx_obj_canfd * hw_rx_obj)1421 mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
1422 struct mcp251xfd_rx_ring *ring,
1423 const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
1424 {
1425 struct net_device_stats *stats = &priv->ndev->stats;
1426 struct sk_buff *skb;
1427 struct canfd_frame *cfd;
1428 int err;
1429
1430 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
1431 skb = alloc_canfd_skb(priv->ndev, &cfd);
1432 else
1433 skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
1434
1435 if (!skb) {
1436 stats->rx_dropped++;
1437 return 0;
1438 }
1439
1440 mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
1441 err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
1442 if (err)
1443 stats->rx_fifo_errors++;
1444
1445 ring->tail++;
1446
1447 /* finally increment the RX pointer */
1448 return regmap_update_bits(priv->map_reg,
1449 MCP251XFD_REG_FIFOCON(ring->fifo_nr),
1450 GENMASK(15, 8),
1451 MCP251XFD_REG_FIFOCON_UINC);
1452 }
1453
1454 static inline int
mcp251xfd_rx_obj_read(const struct mcp251xfd_priv * priv,const struct mcp251xfd_rx_ring * ring,struct mcp251xfd_hw_rx_obj_canfd * hw_rx_obj,const u8 offset,const u8 len)1455 mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
1456 const struct mcp251xfd_rx_ring *ring,
1457 struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1458 const u8 offset, const u8 len)
1459 {
1460 int err;
1461
1462 err = regmap_bulk_read(priv->map_rx,
1463 mcp251xfd_get_rx_obj_addr(ring, offset),
1464 hw_rx_obj,
1465 len * ring->obj_size / sizeof(u32));
1466
1467 return err;
1468 }
1469
1470 static int
mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv * priv,struct mcp251xfd_rx_ring * ring)1471 mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
1472 struct mcp251xfd_rx_ring *ring)
1473 {
1474 struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
1475 u8 rx_tail, len;
1476 int err, i;
1477
1478 err = mcp251xfd_rx_ring_update(priv, ring);
1479 if (err)
1480 return err;
1481
1482 while ((len = mcp251xfd_get_rx_linear_len(ring))) {
1483 rx_tail = mcp251xfd_get_rx_tail(ring);
1484
1485 err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
1486 rx_tail, len);
1487 if (err)
1488 return err;
1489
1490 for (i = 0; i < len; i++) {
1491 err = mcp251xfd_handle_rxif_one(priv, ring,
1492 (void *)hw_rx_obj +
1493 i * ring->obj_size);
1494 if (err)
1495 return err;
1496 }
1497 }
1498
1499 return 0;
1500 }
1501
mcp251xfd_handle_rxif(struct mcp251xfd_priv * priv)1502 static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
1503 {
1504 struct mcp251xfd_rx_ring *ring;
1505 int err, n;
1506
1507 mcp251xfd_for_each_rx_ring(priv, ring, n) {
1508 err = mcp251xfd_handle_rxif_ring(priv, ring);
1509 if (err)
1510 return err;
1511 }
1512
1513 return 0;
1514 }
1515
mcp251xfd_get_timestamp(const struct mcp251xfd_priv * priv,u32 * timestamp)1516 static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
1517 u32 *timestamp)
1518 {
1519 return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
1520 }
1521
1522 static struct sk_buff *
mcp251xfd_alloc_can_err_skb(const struct mcp251xfd_priv * priv,struct can_frame ** cf,u32 * timestamp)1523 mcp251xfd_alloc_can_err_skb(const struct mcp251xfd_priv *priv,
1524 struct can_frame **cf, u32 *timestamp)
1525 {
1526 int err;
1527
1528 err = mcp251xfd_get_timestamp(priv, timestamp);
1529 if (err)
1530 return NULL;
1531
1532 return alloc_can_err_skb(priv->ndev, cf);
1533 }
1534
mcp251xfd_handle_rxovif(struct mcp251xfd_priv * priv)1535 static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
1536 {
1537 struct net_device_stats *stats = &priv->ndev->stats;
1538 struct mcp251xfd_rx_ring *ring;
1539 struct sk_buff *skb;
1540 struct can_frame *cf;
1541 u32 timestamp, rxovif;
1542 int err, i;
1543
1544 stats->rx_over_errors++;
1545 stats->rx_errors++;
1546
1547 err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif);
1548 if (err)
1549 return err;
1550
1551 mcp251xfd_for_each_rx_ring(priv, ring, i) {
1552 if (!(rxovif & BIT(ring->fifo_nr)))
1553 continue;
1554
1555 /* If SERRIF is active, there was a RX MAB overflow. */
1556 if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
1557 netdev_info(priv->ndev,
1558 "RX-%d: MAB overflow detected.\n",
1559 ring->nr);
1560 } else {
1561 netdev_info(priv->ndev,
1562 "RX-%d: FIFO overflow.\n", ring->nr);
1563 }
1564
1565 err = regmap_update_bits(priv->map_reg,
1566 MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
1567 MCP251XFD_REG_FIFOSTA_RXOVIF,
1568 0x0);
1569 if (err)
1570 return err;
1571 }
1572
1573 skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp);
1574 if (!skb)
1575 return 0;
1576
1577 cf->can_id |= CAN_ERR_CRTL;
1578 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
1579
1580 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1581 if (err)
1582 stats->rx_fifo_errors++;
1583
1584 return 0;
1585 }
1586
mcp251xfd_handle_txatif(struct mcp251xfd_priv * priv)1587 static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
1588 {
1589 netdev_info(priv->ndev, "%s\n", __func__);
1590
1591 return 0;
1592 }
1593
mcp251xfd_handle_ivmif(struct mcp251xfd_priv * priv)1594 static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
1595 {
1596 struct net_device_stats *stats = &priv->ndev->stats;
1597 u32 bdiag1, timestamp;
1598 struct sk_buff *skb;
1599 struct can_frame *cf = NULL;
1600 int err;
1601
1602 err = mcp251xfd_get_timestamp(priv, ×tamp);
1603 if (err)
1604 return err;
1605
1606 err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1);
1607 if (err)
1608 return err;
1609
1610 /* Write 0s to clear error bits, don't write 1s to non active
1611 * bits, as they will be set.
1612 */
1613 err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0);
1614 if (err)
1615 return err;
1616
1617 priv->can.can_stats.bus_error++;
1618
1619 skb = alloc_can_err_skb(priv->ndev, &cf);
1620 if (cf)
1621 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1622
1623 /* Controller misconfiguration */
1624 if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM))
1625 netdev_err(priv->ndev,
1626 "recv'd DLC is larger than PLSIZE of FIFO element.");
1627
1628 /* RX errors */
1629 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR |
1630 MCP251XFD_REG_BDIAG1_NCRCERR)) {
1631 netdev_dbg(priv->ndev, "CRC error\n");
1632
1633 stats->rx_errors++;
1634 if (cf)
1635 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
1636 }
1637 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR |
1638 MCP251XFD_REG_BDIAG1_NSTUFERR)) {
1639 netdev_dbg(priv->ndev, "Stuff error\n");
1640
1641 stats->rx_errors++;
1642 if (cf)
1643 cf->data[2] |= CAN_ERR_PROT_STUFF;
1644 }
1645 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR |
1646 MCP251XFD_REG_BDIAG1_NFORMERR)) {
1647 netdev_dbg(priv->ndev, "Format error\n");
1648
1649 stats->rx_errors++;
1650 if (cf)
1651 cf->data[2] |= CAN_ERR_PROT_FORM;
1652 }
1653
1654 /* TX errors */
1655 if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) {
1656 netdev_dbg(priv->ndev, "NACK error\n");
1657
1658 stats->tx_errors++;
1659 if (cf) {
1660 cf->can_id |= CAN_ERR_ACK;
1661 cf->data[2] |= CAN_ERR_PROT_TX;
1662 }
1663 }
1664 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR |
1665 MCP251XFD_REG_BDIAG1_NBIT1ERR)) {
1666 netdev_dbg(priv->ndev, "Bit1 error\n");
1667
1668 stats->tx_errors++;
1669 if (cf)
1670 cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1;
1671 }
1672 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR |
1673 MCP251XFD_REG_BDIAG1_NBIT0ERR)) {
1674 netdev_dbg(priv->ndev, "Bit0 error\n");
1675
1676 stats->tx_errors++;
1677 if (cf)
1678 cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0;
1679 }
1680
1681 if (!cf)
1682 return 0;
1683
1684 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1685 if (err)
1686 stats->rx_fifo_errors++;
1687
1688 return 0;
1689 }
1690
mcp251xfd_handle_cerrif(struct mcp251xfd_priv * priv)1691 static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
1692 {
1693 struct net_device_stats *stats = &priv->ndev->stats;
1694 struct sk_buff *skb;
1695 struct can_frame *cf = NULL;
1696 enum can_state new_state, rx_state, tx_state;
1697 u32 trec, timestamp;
1698 int err;
1699
1700 err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1701 if (err)
1702 return err;
1703
1704 if (trec & MCP251XFD_REG_TREC_TXBO)
1705 tx_state = CAN_STATE_BUS_OFF;
1706 else if (trec & MCP251XFD_REG_TREC_TXBP)
1707 tx_state = CAN_STATE_ERROR_PASSIVE;
1708 else if (trec & MCP251XFD_REG_TREC_TXWARN)
1709 tx_state = CAN_STATE_ERROR_WARNING;
1710 else
1711 tx_state = CAN_STATE_ERROR_ACTIVE;
1712
1713 if (trec & MCP251XFD_REG_TREC_RXBP)
1714 rx_state = CAN_STATE_ERROR_PASSIVE;
1715 else if (trec & MCP251XFD_REG_TREC_RXWARN)
1716 rx_state = CAN_STATE_ERROR_WARNING;
1717 else
1718 rx_state = CAN_STATE_ERROR_ACTIVE;
1719
1720 new_state = max(tx_state, rx_state);
1721 if (new_state == priv->can.state)
1722 return 0;
1723
1724 /* The skb allocation might fail, but can_change_state()
1725 * handles cf == NULL.
1726 */
1727 skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp);
1728 can_change_state(priv->ndev, cf, tx_state, rx_state);
1729
1730 if (new_state == CAN_STATE_BUS_OFF) {
1731 /* As we're going to switch off the chip now, let's
1732 * save the error counters and return them to
1733 * userspace, if do_get_berr_counter() is called while
1734 * the chip is in Bus Off.
1735 */
1736 err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec);
1737 if (err)
1738 return err;
1739
1740 mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF);
1741 can_bus_off(priv->ndev);
1742 }
1743
1744 if (!skb)
1745 return 0;
1746
1747 if (new_state != CAN_STATE_BUS_OFF) {
1748 struct can_berr_counter bec;
1749
1750 err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
1751 if (err)
1752 return err;
1753 cf->data[6] = bec.txerr;
1754 cf->data[7] = bec.rxerr;
1755 }
1756
1757 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1758 if (err)
1759 stats->rx_fifo_errors++;
1760
1761 return 0;
1762 }
1763
1764 static int
mcp251xfd_handle_modif(const struct mcp251xfd_priv * priv,bool * set_normal_mode)1765 mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
1766 {
1767 const u8 mode_reference = mcp251xfd_get_normal_mode(priv);
1768 u8 mode;
1769 int err;
1770
1771 err = mcp251xfd_chip_get_mode(priv, &mode);
1772 if (err)
1773 return err;
1774
1775 if (mode == mode_reference) {
1776 netdev_dbg(priv->ndev,
1777 "Controller changed into %s Mode (%u).\n",
1778 mcp251xfd_get_mode_str(mode), mode);
1779 return 0;
1780 }
1781
1782 /* According to MCP2517FD errata DS80000792B 1., during a TX
1783 * MAB underflow, the controller will transition to Restricted
1784 * Operation Mode or Listen Only Mode (depending on SERR2LOM).
1785 *
1786 * However this is not always the case. If SERR2LOM is
1787 * configured for Restricted Operation Mode (SERR2LOM not set)
1788 * the MCP2517FD will sometimes transition to Listen Only Mode
1789 * first. When polling this bit we see that it will transition
1790 * to Restricted Operation Mode shortly after.
1791 */
1792 if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) &&
1793 (mode == MCP251XFD_REG_CON_MODE_RESTRICTED ||
1794 mode == MCP251XFD_REG_CON_MODE_LISTENONLY))
1795 netdev_dbg(priv->ndev,
1796 "Controller changed into %s Mode (%u).\n",
1797 mcp251xfd_get_mode_str(mode), mode);
1798 else
1799 netdev_err(priv->ndev,
1800 "Controller changed into %s Mode (%u).\n",
1801 mcp251xfd_get_mode_str(mode), mode);
1802
1803 /* After the application requests Normal mode, the Controller
1804 * will automatically attempt to retransmit the message that
1805 * caused the TX MAB underflow.
1806 *
1807 * However, if there is an ECC error in the TX-RAM, we first
1808 * have to reload the tx-object before requesting Normal
1809 * mode. This is done later in mcp251xfd_handle_eccif().
1810 */
1811 if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) {
1812 *set_normal_mode = true;
1813 return 0;
1814 }
1815
1816 return mcp251xfd_chip_set_normal_mode_nowait(priv);
1817 }
1818
mcp251xfd_handle_serrif(struct mcp251xfd_priv * priv)1819 static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
1820 {
1821 struct mcp251xfd_ecc *ecc = &priv->ecc;
1822 struct net_device_stats *stats = &priv->ndev->stats;
1823 bool handled = false;
1824
1825 /* TX MAB underflow
1826 *
1827 * According to MCP2517FD Errata DS80000792B 1. a TX MAB
1828 * underflow is indicated by SERRIF and MODIF.
1829 *
1830 * In addition to the effects mentioned in the Errata, there
1831 * are Bus Errors due to the aborted CAN frame, so a IVMIF
1832 * will be seen as well.
1833 *
1834 * Sometimes there is an ECC error in the TX-RAM, which leads
1835 * to a TX MAB underflow.
1836 *
1837 * However, probably due to a race condition, there is no
1838 * associated MODIF pending.
1839 *
1840 * Further, there are situations, where the SERRIF is caused
1841 * by an ECC error in the TX-RAM, but not even the ECCIF is
1842 * set. This only seems to happen _after_ the first occurrence
1843 * of a ECCIF (which is tracked in ecc->cnt).
1844 *
1845 * Treat all as a known system errors..
1846 */
1847 if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF &&
1848 priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) ||
1849 priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1850 ecc->cnt) {
1851 const char *msg;
1852
1853 if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1854 ecc->cnt)
1855 msg = "TX MAB underflow due to ECC error detected.";
1856 else
1857 msg = "TX MAB underflow detected.";
1858
1859 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN)
1860 netdev_dbg(priv->ndev, "%s\n", msg);
1861 else
1862 netdev_info(priv->ndev, "%s\n", msg);
1863
1864 stats->tx_aborted_errors++;
1865 stats->tx_errors++;
1866 handled = true;
1867 }
1868
1869 /* RX MAB overflow
1870 *
1871 * According to MCP2517FD Errata DS80000792B 1. a RX MAB
1872 * overflow is indicated by SERRIF.
1873 *
1874 * In addition to the effects mentioned in the Errata, (most
1875 * of the times) a RXOVIF is raised, if the FIFO that is being
1876 * received into has the RXOVIE activated (and we have enabled
1877 * RXOVIE on all FIFOs).
1878 *
1879 * Sometimes there is no RXOVIF just a RXIF is pending.
1880 *
1881 * Treat all as a known system errors..
1882 */
1883 if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF ||
1884 priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) {
1885 stats->rx_dropped++;
1886 handled = true;
1887 }
1888
1889 if (!handled)
1890 netdev_err(priv->ndev,
1891 "Unhandled System Error Interrupt (intf=0x%08x)!\n",
1892 priv->regs_status.intf);
1893
1894 return 0;
1895 }
1896
1897 static int
mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv * priv,u8 nr)1898 mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
1899 {
1900 struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1901 struct mcp251xfd_ecc *ecc = &priv->ecc;
1902 struct mcp251xfd_tx_obj *tx_obj;
1903 u8 chip_tx_tail, tx_tail, offset;
1904 u16 addr;
1905 int err;
1906
1907 addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat);
1908
1909 err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1910 if (err)
1911 return err;
1912
1913 tx_tail = mcp251xfd_get_tx_tail(tx_ring);
1914 offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1);
1915
1916 /* Bail out if one of the following is met:
1917 * - tx_tail information is inconsistent
1918 * - for mcp2517fd: offset not 0
1919 * - for mcp2518fd: offset not 0 or 1
1920 */
1921 if (chip_tx_tail != tx_tail ||
1922 !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
1923 netdev_err(priv->ndev,
1924 "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
1925 addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
1926 offset);
1927 return -EINVAL;
1928 }
1929
1930 netdev_info(priv->ndev,
1931 "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n",
1932 ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ?
1933 "Single" : "Double",
1934 addr, nr, tx_ring->tail, tx_tail, offset);
1935
1936 /* reload tx_obj into controller RAM ... */
1937 tx_obj = &tx_ring->obj[nr];
1938 err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1);
1939 if (err)
1940 return err;
1941
1942 /* ... and trigger retransmit */
1943 return mcp251xfd_chip_set_normal_mode(priv);
1944 }
1945
1946 static int
mcp251xfd_handle_eccif(struct mcp251xfd_priv * priv,bool set_normal_mode)1947 mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
1948 {
1949 struct mcp251xfd_ecc *ecc = &priv->ecc;
1950 const char *msg;
1951 bool in_tx_ram;
1952 u32 ecc_stat;
1953 u16 addr;
1954 u8 nr;
1955 int err;
1956
1957 err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat);
1958 if (err)
1959 return err;
1960
1961 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT,
1962 MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat);
1963 if (err)
1964 return err;
1965
1966 /* Check if ECC error occurred in TX-RAM */
1967 addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat);
1968 err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr);
1969 if (!err)
1970 in_tx_ram = true;
1971 else if (err == -ENOENT)
1972 in_tx_ram = false;
1973 else
1974 return err;
1975
1976 /* Errata Reference:
1977 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
1978 *
1979 * ECC single error correction does not work in all cases:
1980 *
1981 * Fix/Work Around:
1982 * Enable single error correction and double error detection
1983 * interrupts by setting SECIE and DEDIE. Handle SECIF as a
1984 * detection interrupt and do not rely on the error
1985 * correction. Instead, handle both interrupts as a
1986 * notification that the RAM word at ERRADDR was corrupted.
1987 */
1988 if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF)
1989 msg = "Single ECC Error detected at address";
1990 else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF)
1991 msg = "Double ECC Error detected at address";
1992 else
1993 return -EINVAL;
1994
1995 if (!in_tx_ram) {
1996 ecc->ecc_stat = 0;
1997
1998 netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr);
1999 } else {
2000 /* Re-occurring error? */
2001 if (ecc->ecc_stat == ecc_stat) {
2002 ecc->cnt++;
2003 } else {
2004 ecc->ecc_stat = ecc_stat;
2005 ecc->cnt = 1;
2006 }
2007
2008 netdev_info(priv->ndev,
2009 "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n",
2010 msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : "");
2011
2012 if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX)
2013 return mcp251xfd_handle_eccif_recover(priv, nr);
2014 }
2015
2016 if (set_normal_mode)
2017 return mcp251xfd_chip_set_normal_mode_nowait(priv);
2018
2019 return 0;
2020 }
2021
mcp251xfd_handle_spicrcif(struct mcp251xfd_priv * priv)2022 static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
2023 {
2024 int err;
2025 u32 crc;
2026
2027 err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc);
2028 if (err)
2029 return err;
2030
2031 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC,
2032 MCP251XFD_REG_CRC_IF_MASK,
2033 ~crc);
2034 if (err)
2035 return err;
2036
2037 if (crc & MCP251XFD_REG_CRC_FERRIF)
2038 netdev_notice(priv->ndev, "CRC write command format error.\n");
2039 else if (crc & MCP251XFD_REG_CRC_CRCERRIF)
2040 netdev_notice(priv->ndev,
2041 "CRC write error detected. CRC=0x%04lx.\n",
2042 FIELD_GET(MCP251XFD_REG_CRC_MASK, crc));
2043
2044 return 0;
2045 }
2046
2047 #define mcp251xfd_handle(priv, irq, ...) \
2048 ({ \
2049 struct mcp251xfd_priv *_priv = (priv); \
2050 int err; \
2051 \
2052 err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \
2053 if (err) \
2054 netdev_err(_priv->ndev, \
2055 "IRQ handler mcp251xfd_handle_%s() returned %d.\n", \
2056 __stringify(irq), err); \
2057 err; \
2058 })
2059
mcp251xfd_irq(int irq,void * dev_id)2060 static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
2061 {
2062 struct mcp251xfd_priv *priv = dev_id;
2063 irqreturn_t handled = IRQ_NONE;
2064 int err;
2065
2066 if (priv->rx_int)
2067 do {
2068 int rx_pending;
2069
2070 rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2071 if (!rx_pending)
2072 break;
2073
2074 err = mcp251xfd_handle(priv, rxif);
2075 if (err)
2076 goto out_fail;
2077
2078 handled = IRQ_HANDLED;
2079 } while (1);
2080
2081 do {
2082 u32 intf_pending, intf_pending_clearable;
2083 bool set_normal_mode = false;
2084
2085 err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
2086 &priv->regs_status,
2087 sizeof(priv->regs_status) /
2088 sizeof(u32));
2089 if (err)
2090 goto out_fail;
2091
2092 intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK,
2093 priv->regs_status.intf) &
2094 FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
2095 priv->regs_status.intf);
2096
2097 if (!(intf_pending))
2098 return handled;
2099
2100 /* Some interrupts must be ACKed in the
2101 * MCP251XFD_REG_INT register.
2102 * - First ACK then handle, to avoid lost-IRQ race
2103 * condition on fast re-occurring interrupts.
2104 * - Write "0" to clear active IRQs, "1" to all other,
2105 * to avoid r/m/w race condition on the
2106 * MCP251XFD_REG_INT register.
2107 */
2108 intf_pending_clearable = intf_pending &
2109 MCP251XFD_REG_INT_IF_CLEARABLE_MASK;
2110 if (intf_pending_clearable) {
2111 err = regmap_update_bits(priv->map_reg,
2112 MCP251XFD_REG_INT,
2113 MCP251XFD_REG_INT_IF_MASK,
2114 ~intf_pending_clearable);
2115 if (err)
2116 goto out_fail;
2117 }
2118
2119 if (intf_pending & MCP251XFD_REG_INT_MODIF) {
2120 err = mcp251xfd_handle(priv, modif, &set_normal_mode);
2121 if (err)
2122 goto out_fail;
2123 }
2124
2125 if (intf_pending & MCP251XFD_REG_INT_RXIF) {
2126 err = mcp251xfd_handle(priv, rxif);
2127 if (err)
2128 goto out_fail;
2129 }
2130
2131 if (intf_pending & MCP251XFD_REG_INT_TEFIF) {
2132 err = mcp251xfd_handle(priv, tefif);
2133 if (err)
2134 goto out_fail;
2135 }
2136
2137 if (intf_pending & MCP251XFD_REG_INT_RXOVIF) {
2138 err = mcp251xfd_handle(priv, rxovif);
2139 if (err)
2140 goto out_fail;
2141 }
2142
2143 if (intf_pending & MCP251XFD_REG_INT_TXATIF) {
2144 err = mcp251xfd_handle(priv, txatif);
2145 if (err)
2146 goto out_fail;
2147 }
2148
2149 if (intf_pending & MCP251XFD_REG_INT_IVMIF) {
2150 err = mcp251xfd_handle(priv, ivmif);
2151 if (err)
2152 goto out_fail;
2153 }
2154
2155 if (intf_pending & MCP251XFD_REG_INT_SERRIF) {
2156 err = mcp251xfd_handle(priv, serrif);
2157 if (err)
2158 goto out_fail;
2159 }
2160
2161 if (intf_pending & MCP251XFD_REG_INT_ECCIF) {
2162 err = mcp251xfd_handle(priv, eccif, set_normal_mode);
2163 if (err)
2164 goto out_fail;
2165 }
2166
2167 if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) {
2168 err = mcp251xfd_handle(priv, spicrcif);
2169 if (err)
2170 goto out_fail;
2171 }
2172
2173 /* On the MCP2527FD and MCP2518FD, we don't get a
2174 * CERRIF IRQ on the transition TX ERROR_WARNING -> TX
2175 * ERROR_ACTIVE.
2176 */
2177 if (intf_pending & MCP251XFD_REG_INT_CERRIF ||
2178 priv->can.state > CAN_STATE_ERROR_ACTIVE) {
2179 err = mcp251xfd_handle(priv, cerrif);
2180 if (err)
2181 goto out_fail;
2182
2183 /* In Bus Off we completely shut down the
2184 * controller. Every subsequent register read
2185 * will read bogus data, and if
2186 * MCP251XFD_QUIRK_CRC_REG is enabled the CRC
2187 * check will fail, too. So leave IRQ handler
2188 * directly.
2189 */
2190 if (priv->can.state == CAN_STATE_BUS_OFF)
2191 return IRQ_HANDLED;
2192 }
2193
2194 handled = IRQ_HANDLED;
2195 } while (1);
2196
2197 out_fail:
2198 netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
2199 err, priv->regs_status.intf);
2200 mcp251xfd_chip_interrupts_disable(priv);
2201
2202 return handled;
2203 }
2204
2205 static inline struct
mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring * tx_ring)2206 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
2207 {
2208 u8 tx_head;
2209
2210 tx_head = mcp251xfd_get_tx_head(tx_ring);
2211
2212 return &tx_ring->obj[tx_head];
2213 }
2214
2215 static void
mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv * priv,struct mcp251xfd_tx_obj * tx_obj,const struct sk_buff * skb,unsigned int seq)2216 mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
2217 struct mcp251xfd_tx_obj *tx_obj,
2218 const struct sk_buff *skb,
2219 unsigned int seq)
2220 {
2221 const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
2222 struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
2223 union mcp251xfd_tx_obj_load_buf *load_buf;
2224 u8 dlc;
2225 u32 id, flags;
2226 int offset, len;
2227
2228 if (cfd->can_id & CAN_EFF_FLAG) {
2229 u32 sid, eid;
2230
2231 sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
2232 eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
2233
2234 id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
2235 FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
2236
2237 flags = MCP251XFD_OBJ_FLAGS_IDE;
2238 } else {
2239 id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
2240 flags = 0;
2241 }
2242
2243 /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
2244 * harm, only the lower 7 bits will be transferred into the
2245 * TEF object.
2246 */
2247 dlc = can_len2dlc(cfd->len);
2248 flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq) |
2249 FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC, dlc);
2250
2251 if (cfd->can_id & CAN_RTR_FLAG)
2252 flags |= MCP251XFD_OBJ_FLAGS_RTR;
2253
2254 /* CANFD */
2255 if (can_is_canfd_skb(skb)) {
2256 if (cfd->flags & CANFD_ESI)
2257 flags |= MCP251XFD_OBJ_FLAGS_ESI;
2258
2259 flags |= MCP251XFD_OBJ_FLAGS_FDF;
2260
2261 if (cfd->flags & CANFD_BRS)
2262 flags |= MCP251XFD_OBJ_FLAGS_BRS;
2263 }
2264
2265 load_buf = &tx_obj->buf;
2266 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
2267 hw_tx_obj = &load_buf->crc.hw_tx_obj;
2268 else
2269 hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
2270
2271 put_unaligned_le32(id, &hw_tx_obj->id);
2272 put_unaligned_le32(flags, &hw_tx_obj->flags);
2273
2274 /* Clear data at end of CAN frame */
2275 offset = round_down(cfd->len, sizeof(u32));
2276 len = round_up(can_dlc2len(dlc), sizeof(u32)) - offset;
2277 if (MCP251XFD_SANITIZE_CAN && len)
2278 memset(hw_tx_obj->data + offset, 0x0, len);
2279 memcpy(hw_tx_obj->data, cfd->data, cfd->len);
2280
2281 /* Number of bytes to be written into the RAM of the controller */
2282 len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
2283 if (MCP251XFD_SANITIZE_CAN)
2284 len += round_up(can_dlc2len(dlc), sizeof(u32));
2285 else
2286 len += round_up(cfd->len, sizeof(u32));
2287
2288 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
2289 u16 crc;
2290
2291 mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
2292 len);
2293 /* CRC */
2294 len += sizeof(load_buf->crc.cmd);
2295 crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
2296 put_unaligned_be16(crc, (void *)load_buf + len);
2297
2298 /* Total length */
2299 len += sizeof(load_buf->crc.crc);
2300 } else {
2301 len += sizeof(load_buf->nocrc.cmd);
2302 }
2303
2304 tx_obj->xfer[0].len = len;
2305 }
2306
mcp251xfd_tx_obj_write(const struct mcp251xfd_priv * priv,struct mcp251xfd_tx_obj * tx_obj)2307 static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
2308 struct mcp251xfd_tx_obj *tx_obj)
2309 {
2310 return spi_async(priv->spi, &tx_obj->msg);
2311 }
2312
mcp251xfd_tx_busy(const struct mcp251xfd_priv * priv,struct mcp251xfd_tx_ring * tx_ring)2313 static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
2314 struct mcp251xfd_tx_ring *tx_ring)
2315 {
2316 if (mcp251xfd_get_tx_free(tx_ring) > 0)
2317 return false;
2318
2319 netif_stop_queue(priv->ndev);
2320
2321 /* Memory barrier before checking tx_free (head and tail) */
2322 smp_mb();
2323
2324 if (mcp251xfd_get_tx_free(tx_ring) == 0) {
2325 netdev_dbg(priv->ndev,
2326 "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
2327 tx_ring->head, tx_ring->tail,
2328 tx_ring->head - tx_ring->tail);
2329
2330 return true;
2331 }
2332
2333 netif_start_queue(priv->ndev);
2334
2335 return false;
2336 }
2337
mcp251xfd_start_xmit(struct sk_buff * skb,struct net_device * ndev)2338 static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
2339 struct net_device *ndev)
2340 {
2341 struct mcp251xfd_priv *priv = netdev_priv(ndev);
2342 struct mcp251xfd_tx_ring *tx_ring = priv->tx;
2343 struct mcp251xfd_tx_obj *tx_obj;
2344 u8 tx_head;
2345 int err;
2346
2347 if (can_dropped_invalid_skb(ndev, skb))
2348 return NETDEV_TX_OK;
2349
2350 if (mcp251xfd_tx_busy(priv, tx_ring))
2351 return NETDEV_TX_BUSY;
2352
2353 tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
2354 mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
2355
2356 /* Stop queue if we occupy the complete TX FIFO */
2357 tx_head = mcp251xfd_get_tx_head(tx_ring);
2358 tx_ring->head++;
2359 if (tx_ring->head - tx_ring->tail >= tx_ring->obj_num)
2360 netif_stop_queue(ndev);
2361
2362 can_put_echo_skb(skb, ndev, tx_head);
2363
2364 err = mcp251xfd_tx_obj_write(priv, tx_obj);
2365 if (err)
2366 goto out_err;
2367
2368 return NETDEV_TX_OK;
2369
2370 out_err:
2371 netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
2372
2373 return NETDEV_TX_OK;
2374 }
2375
mcp251xfd_open(struct net_device * ndev)2376 static int mcp251xfd_open(struct net_device *ndev)
2377 {
2378 struct mcp251xfd_priv *priv = netdev_priv(ndev);
2379 const struct spi_device *spi = priv->spi;
2380 int err;
2381
2382 err = pm_runtime_get_sync(ndev->dev.parent);
2383 if (err < 0) {
2384 pm_runtime_put_noidle(ndev->dev.parent);
2385 return err;
2386 }
2387
2388 err = open_candev(ndev);
2389 if (err)
2390 goto out_pm_runtime_put;
2391
2392 err = mcp251xfd_ring_alloc(priv);
2393 if (err)
2394 goto out_close_candev;
2395
2396 err = mcp251xfd_transceiver_enable(priv);
2397 if (err)
2398 goto out_mcp251xfd_ring_free;
2399
2400 err = mcp251xfd_chip_start(priv);
2401 if (err)
2402 goto out_transceiver_disable;
2403
2404 can_rx_offload_enable(&priv->offload);
2405
2406 err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
2407 IRQF_ONESHOT, dev_name(&spi->dev),
2408 priv);
2409 if (err)
2410 goto out_can_rx_offload_disable;
2411
2412 err = mcp251xfd_chip_interrupts_enable(priv);
2413 if (err)
2414 goto out_free_irq;
2415
2416 netif_start_queue(ndev);
2417
2418 return 0;
2419
2420 out_free_irq:
2421 free_irq(spi->irq, priv);
2422 out_can_rx_offload_disable:
2423 can_rx_offload_disable(&priv->offload);
2424 out_transceiver_disable:
2425 mcp251xfd_transceiver_disable(priv);
2426 out_mcp251xfd_ring_free:
2427 mcp251xfd_ring_free(priv);
2428 out_close_candev:
2429 close_candev(ndev);
2430 out_pm_runtime_put:
2431 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2432 pm_runtime_put(ndev->dev.parent);
2433
2434 return err;
2435 }
2436
mcp251xfd_stop(struct net_device * ndev)2437 static int mcp251xfd_stop(struct net_device *ndev)
2438 {
2439 struct mcp251xfd_priv *priv = netdev_priv(ndev);
2440
2441 netif_stop_queue(ndev);
2442 mcp251xfd_chip_interrupts_disable(priv);
2443 free_irq(ndev->irq, priv);
2444 can_rx_offload_disable(&priv->offload);
2445 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2446 mcp251xfd_transceiver_disable(priv);
2447 mcp251xfd_ring_free(priv);
2448 close_candev(ndev);
2449
2450 pm_runtime_put(ndev->dev.parent);
2451
2452 return 0;
2453 }
2454
2455 static const struct net_device_ops mcp251xfd_netdev_ops = {
2456 .ndo_open = mcp251xfd_open,
2457 .ndo_stop = mcp251xfd_stop,
2458 .ndo_start_xmit = mcp251xfd_start_xmit,
2459 .ndo_change_mtu = can_change_mtu,
2460 };
2461
2462 static void
mcp251xfd_register_quirks(struct mcp251xfd_priv * priv)2463 mcp251xfd_register_quirks(struct mcp251xfd_priv *priv)
2464 {
2465 const struct spi_device *spi = priv->spi;
2466 const struct spi_controller *ctlr = spi->controller;
2467
2468 if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
2469 priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX;
2470 }
2471
mcp251xfd_register_chip_detect(struct mcp251xfd_priv * priv)2472 static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
2473 {
2474 const struct net_device *ndev = priv->ndev;
2475 const struct mcp251xfd_devtype_data *devtype_data;
2476 u32 osc;
2477 int err;
2478
2479 /* The OSC_LPMEN is only supported on MCP2518FD, so use it to
2480 * autodetect the model.
2481 */
2482 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
2483 MCP251XFD_REG_OSC_LPMEN,
2484 MCP251XFD_REG_OSC_LPMEN);
2485 if (err)
2486 return err;
2487
2488 err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
2489 if (err)
2490 return err;
2491
2492 if (osc & MCP251XFD_REG_OSC_LPMEN)
2493 devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
2494 else
2495 devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
2496
2497 if (!mcp251xfd_is_251X(priv) &&
2498 priv->devtype_data.model != devtype_data->model) {
2499 netdev_info(ndev,
2500 "Detected %s, but firmware specifies a %s. Fixing up.\n",
2501 __mcp251xfd_get_model_str(devtype_data->model),
2502 mcp251xfd_get_model_str(priv));
2503 }
2504 priv->devtype_data = *devtype_data;
2505
2506 /* We need to preserve the Half Duplex Quirk. */
2507 mcp251xfd_register_quirks(priv);
2508
2509 /* Re-init regmap with quirks of detected model. */
2510 return mcp251xfd_regmap_init(priv);
2511 }
2512
mcp251xfd_register_check_rx_int(struct mcp251xfd_priv * priv)2513 static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
2514 {
2515 int err, rx_pending;
2516
2517 if (!priv->rx_int)
2518 return 0;
2519
2520 err = mcp251xfd_chip_rx_int_enable(priv);
2521 if (err)
2522 return err;
2523
2524 /* Check if RX_INT is properly working. The RX_INT should not
2525 * be active after a softreset.
2526 */
2527 rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2528
2529 err = mcp251xfd_chip_rx_int_disable(priv);
2530 if (err)
2531 return err;
2532
2533 if (!rx_pending)
2534 return 0;
2535
2536 netdev_info(priv->ndev,
2537 "RX_INT active after softreset, disabling RX_INT support.\n");
2538 devm_gpiod_put(&priv->spi->dev, priv->rx_int);
2539 priv->rx_int = NULL;
2540
2541 return 0;
2542 }
2543
2544 static int
mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv * priv,u32 * dev_id,u32 * effective_speed_hz)2545 mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
2546 u32 *dev_id, u32 *effective_speed_hz)
2547 {
2548 struct mcp251xfd_map_buf_nocrc *buf_rx;
2549 struct mcp251xfd_map_buf_nocrc *buf_tx;
2550 struct spi_transfer xfer[2] = { };
2551 int err;
2552
2553 buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL);
2554 if (!buf_rx)
2555 return -ENOMEM;
2556
2557 buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL);
2558 if (!buf_tx) {
2559 err = -ENOMEM;
2560 goto out_kfree_buf_rx;
2561 }
2562
2563 xfer[0].tx_buf = buf_tx;
2564 xfer[0].len = sizeof(buf_tx->cmd);
2565 xfer[1].rx_buf = buf_rx->data;
2566 xfer[1].len = sizeof(dev_id);
2567
2568 mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
2569 err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
2570 if (err)
2571 goto out_kfree_buf_tx;
2572
2573 *dev_id = be32_to_cpup((__be32 *)buf_rx->data);
2574 *effective_speed_hz = xfer->effective_speed_hz;
2575
2576 out_kfree_buf_tx:
2577 kfree(buf_tx);
2578 out_kfree_buf_rx:
2579 kfree(buf_rx);
2580
2581 return err;
2582 }
2583
2584 #define MCP251XFD_QUIRK_ACTIVE(quirk) \
2585 (priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-')
2586
2587 static int
mcp251xfd_register_done(const struct mcp251xfd_priv * priv)2588 mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
2589 {
2590 u32 dev_id, effective_speed_hz;
2591 int err;
2592
2593 err = mcp251xfd_register_get_dev_id(priv, &dev_id,
2594 &effective_speed_hz);
2595 if (err)
2596 return err;
2597
2598 netdev_info(priv->ndev,
2599 "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
2600 mcp251xfd_get_model_str(priv),
2601 FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
2602 FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
2603 priv->rx_int ? '+' : '-',
2604 MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
2605 MCP251XFD_QUIRK_ACTIVE(CRC_REG),
2606 MCP251XFD_QUIRK_ACTIVE(CRC_RX),
2607 MCP251XFD_QUIRK_ACTIVE(CRC_TX),
2608 MCP251XFD_QUIRK_ACTIVE(ECC),
2609 MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
2610 priv->can.clock.freq / 1000000,
2611 priv->can.clock.freq % 1000000 / 1000 / 10,
2612 priv->spi_max_speed_hz_orig / 1000000,
2613 priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
2614 priv->spi->max_speed_hz / 1000000,
2615 priv->spi->max_speed_hz % 1000000 / 1000 / 10,
2616 effective_speed_hz / 1000000,
2617 effective_speed_hz % 1000000 / 1000 / 10);
2618
2619 return 0;
2620 }
2621
mcp251xfd_register(struct mcp251xfd_priv * priv)2622 static int mcp251xfd_register(struct mcp251xfd_priv *priv)
2623 {
2624 struct net_device *ndev = priv->ndev;
2625 int err;
2626
2627 err = mcp251xfd_clks_and_vdd_enable(priv);
2628 if (err)
2629 return err;
2630
2631 pm_runtime_get_noresume(ndev->dev.parent);
2632 err = pm_runtime_set_active(ndev->dev.parent);
2633 if (err)
2634 goto out_runtime_put_noidle;
2635 pm_runtime_enable(ndev->dev.parent);
2636
2637 mcp251xfd_register_quirks(priv);
2638
2639 err = mcp251xfd_chip_softreset(priv);
2640 if (err == -ENODEV)
2641 goto out_runtime_disable;
2642 if (err)
2643 goto out_chip_set_mode_sleep;
2644
2645 err = mcp251xfd_register_chip_detect(priv);
2646 if (err)
2647 goto out_chip_set_mode_sleep;
2648
2649 err = mcp251xfd_register_check_rx_int(priv);
2650 if (err)
2651 goto out_chip_set_mode_sleep;
2652
2653 err = register_candev(ndev);
2654 if (err)
2655 goto out_chip_set_mode_sleep;
2656
2657 err = mcp251xfd_register_done(priv);
2658 if (err)
2659 goto out_unregister_candev;
2660
2661 /* Put controller into sleep mode and let pm_runtime_put()
2662 * disable the clocks and vdd. If CONFIG_PM is not enabled,
2663 * the clocks and vdd will stay powered.
2664 */
2665 err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2666 if (err)
2667 goto out_unregister_candev;
2668
2669 pm_runtime_put(ndev->dev.parent);
2670
2671 return 0;
2672
2673 out_unregister_candev:
2674 unregister_candev(ndev);
2675 out_chip_set_mode_sleep:
2676 mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2677 out_runtime_disable:
2678 pm_runtime_disable(ndev->dev.parent);
2679 out_runtime_put_noidle:
2680 pm_runtime_put_noidle(ndev->dev.parent);
2681 mcp251xfd_clks_and_vdd_disable(priv);
2682
2683 return err;
2684 }
2685
mcp251xfd_unregister(struct mcp251xfd_priv * priv)2686 static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
2687 {
2688 struct net_device *ndev = priv->ndev;
2689
2690 unregister_candev(ndev);
2691
2692 pm_runtime_get_sync(ndev->dev.parent);
2693 pm_runtime_put_noidle(ndev->dev.parent);
2694 mcp251xfd_clks_and_vdd_disable(priv);
2695 pm_runtime_disable(ndev->dev.parent);
2696 }
2697
2698 static const struct of_device_id mcp251xfd_of_match[] = {
2699 {
2700 .compatible = "microchip,mcp2517fd",
2701 .data = &mcp251xfd_devtype_data_mcp2517fd,
2702 }, {
2703 .compatible = "microchip,mcp2518fd",
2704 .data = &mcp251xfd_devtype_data_mcp2518fd,
2705 }, {
2706 .compatible = "microchip,mcp251xfd",
2707 .data = &mcp251xfd_devtype_data_mcp251xfd,
2708 }, {
2709 /* sentinel */
2710 },
2711 };
2712 MODULE_DEVICE_TABLE(of, mcp251xfd_of_match);
2713
2714 static const struct spi_device_id mcp251xfd_id_table[] = {
2715 {
2716 .name = "mcp2517fd",
2717 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd,
2718 }, {
2719 .name = "mcp2518fd",
2720 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
2721 }, {
2722 .name = "mcp251xfd",
2723 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
2724 }, {
2725 /* sentinel */
2726 },
2727 };
2728 MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
2729
mcp251xfd_probe(struct spi_device * spi)2730 static int mcp251xfd_probe(struct spi_device *spi)
2731 {
2732 const void *match;
2733 struct net_device *ndev;
2734 struct mcp251xfd_priv *priv;
2735 struct gpio_desc *rx_int;
2736 struct regulator *reg_vdd, *reg_xceiver;
2737 struct clk *clk;
2738 u32 freq;
2739 int err;
2740
2741 if (!spi->irq)
2742 return dev_err_probe(&spi->dev, -ENXIO,
2743 "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n");
2744
2745 rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
2746 GPIOD_IN);
2747 if (PTR_ERR(rx_int) == -EPROBE_DEFER)
2748 return -EPROBE_DEFER;
2749 else if (IS_ERR(rx_int))
2750 return PTR_ERR(rx_int);
2751
2752 reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd");
2753 if (PTR_ERR(reg_vdd) == -EPROBE_DEFER)
2754 return -EPROBE_DEFER;
2755 else if (PTR_ERR(reg_vdd) == -ENODEV)
2756 reg_vdd = NULL;
2757 else if (IS_ERR(reg_vdd))
2758 return PTR_ERR(reg_vdd);
2759
2760 reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
2761 if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
2762 return -EPROBE_DEFER;
2763 else if (PTR_ERR(reg_xceiver) == -ENODEV)
2764 reg_xceiver = NULL;
2765 else if (IS_ERR(reg_xceiver))
2766 return PTR_ERR(reg_xceiver);
2767
2768 clk = devm_clk_get(&spi->dev, NULL);
2769 if (IS_ERR(clk)) {
2770 dev_err(&spi->dev, "No Oscillator (clock) defined.\n");
2771 return PTR_ERR(clk);
2772 }
2773 freq = clk_get_rate(clk);
2774
2775 /* Sanity check */
2776 if (freq < MCP251XFD_SYSCLOCK_HZ_MIN ||
2777 freq > MCP251XFD_SYSCLOCK_HZ_MAX) {
2778 dev_err(&spi->dev,
2779 "Oscillator frequency (%u Hz) is too low or high.\n",
2780 freq);
2781 return -ERANGE;
2782 }
2783
2784 if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
2785 dev_err(&spi->dev,
2786 "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
2787 freq);
2788 return -ERANGE;
2789 }
2790
2791 ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
2792 MCP251XFD_TX_OBJ_NUM_MAX);
2793 if (!ndev)
2794 return -ENOMEM;
2795
2796 SET_NETDEV_DEV(ndev, &spi->dev);
2797
2798 ndev->netdev_ops = &mcp251xfd_netdev_ops;
2799 ndev->irq = spi->irq;
2800 ndev->flags |= IFF_ECHO;
2801
2802 priv = netdev_priv(ndev);
2803 spi_set_drvdata(spi, priv);
2804 priv->can.clock.freq = freq;
2805 priv->can.do_set_mode = mcp251xfd_set_mode;
2806 priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
2807 priv->can.bittiming_const = &mcp251xfd_bittiming_const;
2808 priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
2809 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
2810 CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD |
2811 CAN_CTRLMODE_FD_NON_ISO;
2812 priv->ndev = ndev;
2813 priv->spi = spi;
2814 priv->rx_int = rx_int;
2815 priv->clk = clk;
2816 priv->reg_vdd = reg_vdd;
2817 priv->reg_xceiver = reg_xceiver;
2818
2819 match = device_get_match_data(&spi->dev);
2820 if (match)
2821 priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
2822 else
2823 priv->devtype_data = *(struct mcp251xfd_devtype_data *)
2824 spi_get_device_id(spi)->driver_data;
2825
2826 /* Errata Reference:
2827 * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4.
2828 *
2829 * The SPI can write corrupted data to the RAM at fast SPI
2830 * speeds:
2831 *
2832 * Simultaneous activity on the CAN bus while writing data to
2833 * RAM via the SPI interface, with high SCK frequency, can
2834 * lead to corrupted data being written to RAM.
2835 *
2836 * Fix/Work Around:
2837 * Ensure that FSCK is less than or equal to 0.85 *
2838 * (FSYSCLK/2).
2839 *
2840 * Known good and bad combinations are:
2841 *
2842 * MCP ext-clk SoC SPI SPI-clk max-clk parent-clk Status config
2843 *
2844 * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 8333333 Hz 83.33% 600000000 Hz good assigned-clocks = <&ccu CLK_SPIx>
2845 * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 9375000 Hz 93.75% 600000000 Hz bad assigned-clocks = <&ccu CLK_SPIx>
2846 * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 16666667 Hz 83.33% 600000000 Hz good assigned-clocks = <&ccu CLK_SPIx>
2847 * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 18750000 Hz 93.75% 600000000 Hz bad assigned-clocks = <&ccu CLK_SPIx>
2848 * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 8333333 Hz 83.33% 16666667 Hz good assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
2849 * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 9523809 Hz 95.34% 28571429 Hz bad assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
2850 * 2517 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz good default
2851 * 2518 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz good default
2852 *
2853 */
2854 priv->spi_max_speed_hz_orig = spi->max_speed_hz;
2855 spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
2856 spi->bits_per_word = 8;
2857 spi->rt = true;
2858 err = spi_setup(spi);
2859 if (err)
2860 goto out_free_candev;
2861
2862 err = mcp251xfd_regmap_init(priv);
2863 if (err)
2864 goto out_free_candev;
2865
2866 err = can_rx_offload_add_manual(ndev, &priv->offload,
2867 MCP251XFD_NAPI_WEIGHT);
2868 if (err)
2869 goto out_free_candev;
2870
2871 err = mcp251xfd_register(priv);
2872 if (err)
2873 goto out_can_rx_offload_del;
2874
2875 return 0;
2876
2877 out_can_rx_offload_del:
2878 can_rx_offload_del(&priv->offload);
2879 out_free_candev:
2880 spi->max_speed_hz = priv->spi_max_speed_hz_orig;
2881
2882 free_candev(ndev);
2883
2884 return err;
2885 }
2886
mcp251xfd_remove(struct spi_device * spi)2887 static int mcp251xfd_remove(struct spi_device *spi)
2888 {
2889 struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
2890 struct net_device *ndev = priv->ndev;
2891
2892 can_rx_offload_del(&priv->offload);
2893 mcp251xfd_unregister(priv);
2894 spi->max_speed_hz = priv->spi_max_speed_hz_orig;
2895 free_candev(ndev);
2896
2897 return 0;
2898 }
2899
mcp251xfd_runtime_suspend(struct device * device)2900 static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
2901 {
2902 const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
2903
2904 return mcp251xfd_clks_and_vdd_disable(priv);
2905 }
2906
mcp251xfd_runtime_resume(struct device * device)2907 static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
2908 {
2909 const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
2910
2911 return mcp251xfd_clks_and_vdd_enable(priv);
2912 }
2913
2914 static const struct dev_pm_ops mcp251xfd_pm_ops = {
2915 SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend,
2916 mcp251xfd_runtime_resume, NULL)
2917 };
2918
2919 static struct spi_driver mcp251xfd_driver = {
2920 .driver = {
2921 .name = DEVICE_NAME,
2922 .pm = &mcp251xfd_pm_ops,
2923 .of_match_table = mcp251xfd_of_match,
2924 },
2925 .probe = mcp251xfd_probe,
2926 .remove = mcp251xfd_remove,
2927 .id_table = mcp251xfd_id_table,
2928 };
2929 module_spi_driver(mcp251xfd_driver);
2930
2931 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
2932 MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver");
2933 MODULE_LICENSE("GPL v2");
2934