• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * CAN bus driver for Bosch C_CAN controller
3  *
4  * Copyright (C) 2010 ST Microelectronics
5  * Bhupesh Sharma <bhupesh.sharma@st.com>
6  *
7  * Borrowed heavily from the C_CAN driver originally written by:
8  * Copyright (C) 2007
9  * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10  * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11  *
12  * TX and RX NAPI implementation has been borrowed from at91 CAN driver
13  * written by:
14  * Copyright
15  * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
16  * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
17  *
18  * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
19  * Bosch C_CAN user manual can be obtained from:
20  * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
21  * users_manual_c_can.pdf
22  *
23  * This file is licensed under the terms of the GNU General Public
24  * License version 2. This program is licensed "as is" without any
25  * warranty of any kind, whether express or implied.
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/interrupt.h>
31 #include <linux/delay.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_arp.h>
34 #include <linux/if_ether.h>
35 #include <linux/list.h>
36 #include <linux/io.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/pinctrl/consumer.h>
39 
40 #include <linux/can.h>
41 #include <linux/can/dev.h>
42 #include <linux/can/error.h>
43 #include <linux/can/led.h>
44 
45 #include "c_can.h"
46 
47 /* Number of interface registers */
48 #define IF_ENUM_REG_LEN		11
49 #define C_CAN_IFACE(reg, iface)	(C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
50 
51 /* control extension register D_CAN specific */
52 #define CONTROL_EX_PDR		BIT(8)
53 
54 /* control register */
55 #define CONTROL_SWR		BIT(15)
56 #define CONTROL_TEST		BIT(7)
57 #define CONTROL_CCE		BIT(6)
58 #define CONTROL_DISABLE_AR	BIT(5)
59 #define CONTROL_ENABLE_AR	(0 << 5)
60 #define CONTROL_EIE		BIT(3)
61 #define CONTROL_SIE		BIT(2)
62 #define CONTROL_IE		BIT(1)
63 #define CONTROL_INIT		BIT(0)
64 
65 #define CONTROL_IRQMSK		(CONTROL_EIE | CONTROL_IE | CONTROL_SIE)
66 
67 /* test register */
68 #define TEST_RX			BIT(7)
69 #define TEST_TX1		BIT(6)
70 #define TEST_TX2		BIT(5)
71 #define TEST_LBACK		BIT(4)
72 #define TEST_SILENT		BIT(3)
73 #define TEST_BASIC		BIT(2)
74 
75 /* status register */
76 #define STATUS_PDA		BIT(10)
77 #define STATUS_BOFF		BIT(7)
78 #define STATUS_EWARN		BIT(6)
79 #define STATUS_EPASS		BIT(5)
80 #define STATUS_RXOK		BIT(4)
81 #define STATUS_TXOK		BIT(3)
82 
83 /* error counter register */
84 #define ERR_CNT_TEC_MASK	0xff
85 #define ERR_CNT_TEC_SHIFT	0
86 #define ERR_CNT_REC_SHIFT	8
87 #define ERR_CNT_REC_MASK	(0x7f << ERR_CNT_REC_SHIFT)
88 #define ERR_CNT_RP_SHIFT	15
89 #define ERR_CNT_RP_MASK		(0x1 << ERR_CNT_RP_SHIFT)
90 
91 /* bit-timing register */
92 #define BTR_BRP_MASK		0x3f
93 #define BTR_BRP_SHIFT		0
94 #define BTR_SJW_SHIFT		6
95 #define BTR_SJW_MASK		(0x3 << BTR_SJW_SHIFT)
96 #define BTR_TSEG1_SHIFT		8
97 #define BTR_TSEG1_MASK		(0xf << BTR_TSEG1_SHIFT)
98 #define BTR_TSEG2_SHIFT		12
99 #define BTR_TSEG2_MASK		(0x7 << BTR_TSEG2_SHIFT)
100 
101 /* interrupt register */
102 #define INT_STS_PENDING		0x8000
103 
104 /* brp extension register */
105 #define BRP_EXT_BRPE_MASK	0x0f
106 #define BRP_EXT_BRPE_SHIFT	0
107 
108 /* IFx command request */
109 #define IF_COMR_BUSY		BIT(15)
110 
111 /* IFx command mask */
112 #define IF_COMM_WR		BIT(7)
113 #define IF_COMM_MASK		BIT(6)
114 #define IF_COMM_ARB		BIT(5)
115 #define IF_COMM_CONTROL		BIT(4)
116 #define IF_COMM_CLR_INT_PND	BIT(3)
117 #define IF_COMM_TXRQST		BIT(2)
118 #define IF_COMM_CLR_NEWDAT	IF_COMM_TXRQST
119 #define IF_COMM_DATAA		BIT(1)
120 #define IF_COMM_DATAB		BIT(0)
121 
122 /* TX buffer setup */
123 #define IF_COMM_TX		(IF_COMM_ARB | IF_COMM_CONTROL | \
124 				 IF_COMM_TXRQST |		 \
125 				 IF_COMM_DATAA | IF_COMM_DATAB)
126 
127 /* For the low buffers we clear the interrupt bit, but keep newdat */
128 #define IF_COMM_RCV_LOW		(IF_COMM_MASK | IF_COMM_ARB | \
129 				 IF_COMM_CONTROL | IF_COMM_CLR_INT_PND | \
130 				 IF_COMM_DATAA | IF_COMM_DATAB)
131 
132 /* For the high buffers we clear the interrupt bit and newdat */
133 #define IF_COMM_RCV_HIGH	(IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
134 
135 
136 /* Receive setup of message objects */
137 #define IF_COMM_RCV_SETUP	(IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL)
138 
139 /* Invalidation of message objects */
140 #define IF_COMM_INVAL		(IF_COMM_ARB | IF_COMM_CONTROL)
141 
142 /* IFx arbitration */
143 #define IF_ARB_MSGVAL		BIT(31)
144 #define IF_ARB_MSGXTD		BIT(30)
145 #define IF_ARB_TRANSMIT		BIT(29)
146 
147 /* IFx message control */
148 #define IF_MCONT_NEWDAT		BIT(15)
149 #define IF_MCONT_MSGLST		BIT(14)
150 #define IF_MCONT_INTPND		BIT(13)
151 #define IF_MCONT_UMASK		BIT(12)
152 #define IF_MCONT_TXIE		BIT(11)
153 #define IF_MCONT_RXIE		BIT(10)
154 #define IF_MCONT_RMTEN		BIT(9)
155 #define IF_MCONT_TXRQST		BIT(8)
156 #define IF_MCONT_EOB		BIT(7)
157 #define IF_MCONT_DLC_MASK	0xf
158 
159 #define IF_MCONT_RCV		(IF_MCONT_RXIE | IF_MCONT_UMASK)
160 #define IF_MCONT_RCV_EOB	(IF_MCONT_RCV | IF_MCONT_EOB)
161 
162 #define IF_MCONT_TX		(IF_MCONT_TXIE | IF_MCONT_EOB)
163 
164 /*
165  * Use IF1 for RX and IF2 for TX
166  */
167 #define IF_RX			0
168 #define IF_TX			1
169 
170 /* minimum timeout for checking BUSY status */
171 #define MIN_TIMEOUT_VALUE	6
172 
173 /* Wait for ~1 sec for INIT bit */
174 #define INIT_WAIT_MS		1000
175 
176 /* napi related */
177 #define C_CAN_NAPI_WEIGHT	C_CAN_MSG_OBJ_RX_NUM
178 
179 /* c_can lec values */
180 enum c_can_lec_type {
181 	LEC_NO_ERROR = 0,
182 	LEC_STUFF_ERROR,
183 	LEC_FORM_ERROR,
184 	LEC_ACK_ERROR,
185 	LEC_BIT1_ERROR,
186 	LEC_BIT0_ERROR,
187 	LEC_CRC_ERROR,
188 	LEC_UNUSED,
189 	LEC_MASK = LEC_UNUSED,
190 };
191 
192 /*
193  * c_can error types:
194  * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
195  */
196 enum c_can_bus_error_types {
197 	C_CAN_NO_ERROR = 0,
198 	C_CAN_BUS_OFF,
199 	C_CAN_ERROR_WARNING,
200 	C_CAN_ERROR_PASSIVE,
201 };
202 
203 static const struct can_bittiming_const c_can_bittiming_const = {
204 	.name = KBUILD_MODNAME,
205 	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
206 	.tseg1_max = 16,
207 	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
208 	.tseg2_max = 8,
209 	.sjw_max = 4,
210 	.brp_min = 1,
211 	.brp_max = 1024,	/* 6-bit BRP field + 4-bit BRPE field*/
212 	.brp_inc = 1,
213 };
214 
c_can_pm_runtime_get_sync(const struct c_can_priv * priv)215 static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
216 {
217 	if (priv->device)
218 		pm_runtime_get_sync(priv->device);
219 }
220 
c_can_pm_runtime_put_sync(const struct c_can_priv * priv)221 static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
222 {
223 	if (priv->device)
224 		pm_runtime_put_sync(priv->device);
225 }
226 
c_can_reset_ram(const struct c_can_priv * priv,bool enable)227 static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
228 {
229 	if (priv->raminit)
230 		priv->raminit(priv, enable);
231 }
232 
c_can_irq_control(struct c_can_priv * priv,bool enable)233 static void c_can_irq_control(struct c_can_priv *priv, bool enable)
234 {
235 	u32 ctrl = priv->read_reg(priv,	C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
236 
237 	if (enable)
238 		ctrl |= CONTROL_IRQMSK;
239 
240 	priv->write_reg(priv, C_CAN_CTRL_REG, ctrl);
241 }
242 
c_can_obj_update(struct net_device * dev,int iface,u32 cmd,u32 obj)243 static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj)
244 {
245 	struct c_can_priv *priv = netdev_priv(dev);
246 	int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
247 
248 	priv->write_reg32(priv, reg, (cmd << 16) | obj);
249 
250 	for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
251 		if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
252 			return;
253 		udelay(1);
254 	}
255 	netdev_err(dev, "Updating object timed out\n");
256 
257 }
258 
c_can_object_get(struct net_device * dev,int iface,u32 obj,u32 cmd)259 static inline void c_can_object_get(struct net_device *dev, int iface,
260 				    u32 obj, u32 cmd)
261 {
262 	c_can_obj_update(dev, iface, cmd, obj);
263 }
264 
c_can_object_put(struct net_device * dev,int iface,u32 obj,u32 cmd)265 static inline void c_can_object_put(struct net_device *dev, int iface,
266 				    u32 obj, u32 cmd)
267 {
268 	c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj);
269 }
270 
271 /*
272  * Note: According to documentation clearing TXIE while MSGVAL is set
273  * is not allowed, but works nicely on C/DCAN. And that lowers the I/O
274  * load significantly.
275  */
c_can_inval_tx_object(struct net_device * dev,int iface,int obj)276 static void c_can_inval_tx_object(struct net_device *dev, int iface, int obj)
277 {
278 	struct c_can_priv *priv = netdev_priv(dev);
279 
280 	priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
281 	c_can_object_put(dev, iface, obj, IF_COMM_INVAL);
282 }
283 
c_can_inval_msg_object(struct net_device * dev,int iface,int obj)284 static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
285 {
286 	struct c_can_priv *priv = netdev_priv(dev);
287 
288 	priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
289 	priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
290 	c_can_inval_tx_object(dev, iface, obj);
291 }
292 
c_can_setup_tx_object(struct net_device * dev,int iface,struct can_frame * frame,int idx)293 static void c_can_setup_tx_object(struct net_device *dev, int iface,
294 				  struct can_frame *frame, int idx)
295 {
296 	struct c_can_priv *priv = netdev_priv(dev);
297 	u16 ctrl = IF_MCONT_TX | frame->can_dlc;
298 	bool rtr = frame->can_id & CAN_RTR_FLAG;
299 	u32 arb = IF_ARB_MSGVAL;
300 	int i;
301 
302 	if (frame->can_id & CAN_EFF_FLAG) {
303 		arb |= frame->can_id & CAN_EFF_MASK;
304 		arb |= IF_ARB_MSGXTD;
305 	} else {
306 		arb |= (frame->can_id & CAN_SFF_MASK) << 18;
307 	}
308 
309 	if (!rtr)
310 		arb |= IF_ARB_TRANSMIT;
311 
312 	/*
313 	 * If we change the DIR bit, we need to invalidate the buffer
314 	 * first, i.e. clear the MSGVAL flag in the arbiter.
315 	 */
316 	if (rtr != (bool)test_bit(idx, &priv->tx_dir)) {
317 		u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
318 
319 		c_can_inval_msg_object(dev, iface, obj);
320 		change_bit(idx, &priv->tx_dir);
321 	}
322 
323 	priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
324 
325 	priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
326 
327 	if (priv->type == BOSCH_D_CAN) {
328 		u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
329 
330 		for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
331 			data = (u32)frame->data[i];
332 			data |= (u32)frame->data[i + 1] << 8;
333 			data |= (u32)frame->data[i + 2] << 16;
334 			data |= (u32)frame->data[i + 3] << 24;
335 			priv->write_reg32(priv, dreg, data);
336 		}
337 	} else {
338 		for (i = 0; i < frame->can_dlc; i += 2) {
339 			priv->write_reg(priv,
340 					C_CAN_IFACE(DATA1_REG, iface) + i / 2,
341 					frame->data[i] |
342 					(frame->data[i + 1] << 8));
343 		}
344 	}
345 }
346 
c_can_activate_all_lower_rx_msg_obj(struct net_device * dev,int iface)347 static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
348 						       int iface)
349 {
350 	int i;
351 
352 	for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
353 		c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
354 }
355 
c_can_handle_lost_msg_obj(struct net_device * dev,int iface,int objno,u32 ctrl)356 static int c_can_handle_lost_msg_obj(struct net_device *dev,
357 				     int iface, int objno, u32 ctrl)
358 {
359 	struct net_device_stats *stats = &dev->stats;
360 	struct c_can_priv *priv = netdev_priv(dev);
361 	struct can_frame *frame;
362 	struct sk_buff *skb;
363 
364 	ctrl &= ~(IF_MCONT_MSGLST | IF_MCONT_INTPND | IF_MCONT_NEWDAT);
365 	priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
366 	c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
367 
368 	stats->rx_errors++;
369 	stats->rx_over_errors++;
370 
371 	/* create an error msg */
372 	skb = alloc_can_err_skb(dev, &frame);
373 	if (unlikely(!skb))
374 		return 0;
375 
376 	frame->can_id |= CAN_ERR_CRTL;
377 	frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
378 
379 	netif_receive_skb(skb);
380 	return 1;
381 }
382 
c_can_read_msg_object(struct net_device * dev,int iface,u32 ctrl)383 static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
384 {
385 	struct net_device_stats *stats = &dev->stats;
386 	struct c_can_priv *priv = netdev_priv(dev);
387 	struct can_frame *frame;
388 	struct sk_buff *skb;
389 	u32 arb, data;
390 
391 	skb = alloc_can_skb(dev, &frame);
392 	if (!skb) {
393 		stats->rx_dropped++;
394 		return -ENOMEM;
395 	}
396 
397 	frame->can_dlc = get_can_dlc(ctrl & 0x0F);
398 
399 	arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
400 
401 	if (arb & IF_ARB_MSGXTD)
402 		frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
403 	else
404 		frame->can_id = (arb >> 18) & CAN_SFF_MASK;
405 
406 	if (arb & IF_ARB_TRANSMIT) {
407 		frame->can_id |= CAN_RTR_FLAG;
408 	} else {
409 		int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
410 
411 		if (priv->type == BOSCH_D_CAN) {
412 			for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
413 				data = priv->read_reg32(priv, dreg);
414 				frame->data[i] = data;
415 				frame->data[i + 1] = data >> 8;
416 				frame->data[i + 2] = data >> 16;
417 				frame->data[i + 3] = data >> 24;
418 			}
419 		} else {
420 			for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
421 				data = priv->read_reg(priv, dreg);
422 				frame->data[i] = data;
423 				frame->data[i + 1] = data >> 8;
424 			}
425 		}
426 	}
427 
428 	stats->rx_packets++;
429 	stats->rx_bytes += frame->can_dlc;
430 
431 	netif_receive_skb(skb);
432 	return 0;
433 }
434 
c_can_setup_receive_object(struct net_device * dev,int iface,u32 obj,u32 mask,u32 id,u32 mcont)435 static void c_can_setup_receive_object(struct net_device *dev, int iface,
436 				       u32 obj, u32 mask, u32 id, u32 mcont)
437 {
438 	struct c_can_priv *priv = netdev_priv(dev);
439 
440 	mask |= BIT(29);
441 	priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
442 
443 	id |= IF_ARB_MSGVAL;
444 	priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id);
445 
446 	priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
447 	c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
448 }
449 
c_can_start_xmit(struct sk_buff * skb,struct net_device * dev)450 static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
451 				    struct net_device *dev)
452 {
453 	struct can_frame *frame = (struct can_frame *)skb->data;
454 	struct c_can_priv *priv = netdev_priv(dev);
455 	u32 idx, obj;
456 
457 	if (can_dropped_invalid_skb(dev, skb))
458 		return NETDEV_TX_OK;
459 	/*
460 	 * This is not a FIFO. C/D_CAN sends out the buffers
461 	 * prioritized. The lowest buffer number wins.
462 	 */
463 	idx = fls(atomic_read(&priv->tx_active));
464 	obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
465 
466 	/* If this is the last buffer, stop the xmit queue */
467 	if (idx == C_CAN_MSG_OBJ_TX_NUM - 1)
468 		netif_stop_queue(dev);
469 	/*
470 	 * Store the message in the interface so we can call
471 	 * can_put_echo_skb(). We must do this before we enable
472 	 * transmit as we might race against do_tx().
473 	 */
474 	c_can_setup_tx_object(dev, IF_TX, frame, idx);
475 	priv->dlc[idx] = frame->can_dlc;
476 	can_put_echo_skb(skb, dev, idx);
477 
478 	/* Update the active bits */
479 	atomic_add((1 << idx), &priv->tx_active);
480 	/* Start transmission */
481 	c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
482 
483 	return NETDEV_TX_OK;
484 }
485 
c_can_wait_for_ctrl_init(struct net_device * dev,struct c_can_priv * priv,u32 init)486 static int c_can_wait_for_ctrl_init(struct net_device *dev,
487 				    struct c_can_priv *priv, u32 init)
488 {
489 	int retry = 0;
490 
491 	while (init != (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_INIT)) {
492 		udelay(10);
493 		if (retry++ > 1000) {
494 			netdev_err(dev, "CCTRL: set CONTROL_INIT failed\n");
495 			return -EIO;
496 		}
497 	}
498 	return 0;
499 }
500 
c_can_set_bittiming(struct net_device * dev)501 static int c_can_set_bittiming(struct net_device *dev)
502 {
503 	unsigned int reg_btr, reg_brpe, ctrl_save;
504 	u8 brp, brpe, sjw, tseg1, tseg2;
505 	u32 ten_bit_brp;
506 	struct c_can_priv *priv = netdev_priv(dev);
507 	const struct can_bittiming *bt = &priv->can.bittiming;
508 	int res;
509 
510 	/* c_can provides a 6-bit brp and 4-bit brpe fields */
511 	ten_bit_brp = bt->brp - 1;
512 	brp = ten_bit_brp & BTR_BRP_MASK;
513 	brpe = ten_bit_brp >> 6;
514 
515 	sjw = bt->sjw - 1;
516 	tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
517 	tseg2 = bt->phase_seg2 - 1;
518 	reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
519 			(tseg2 << BTR_TSEG2_SHIFT);
520 	reg_brpe = brpe & BRP_EXT_BRPE_MASK;
521 
522 	netdev_info(dev,
523 		"setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
524 
525 	ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
526 	ctrl_save &= ~CONTROL_INIT;
527 	priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_CCE | CONTROL_INIT);
528 	res = c_can_wait_for_ctrl_init(dev, priv, CONTROL_INIT);
529 	if (res)
530 		return res;
531 
532 	priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
533 	priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
534 	priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
535 
536 	return c_can_wait_for_ctrl_init(dev, priv, 0);
537 }
538 
539 /*
540  * Configure C_CAN message objects for Tx and Rx purposes:
541  * C_CAN provides a total of 32 message objects that can be configured
542  * either for Tx or Rx purposes. Here the first 16 message objects are used as
543  * a reception FIFO. The end of reception FIFO is signified by the EoB bit
544  * being SET. The remaining 16 message objects are kept aside for Tx purposes.
545  * See user guide document for further details on configuring message
546  * objects.
547  */
c_can_configure_msg_objects(struct net_device * dev)548 static void c_can_configure_msg_objects(struct net_device *dev)
549 {
550 	int i;
551 
552 	/* first invalidate all message objects */
553 	for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
554 		c_can_inval_msg_object(dev, IF_RX, i);
555 
556 	/* setup receive message objects */
557 	for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
558 		c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
559 
560 	c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
561 				   IF_MCONT_RCV_EOB);
562 }
563 
c_can_software_reset(struct net_device * dev)564 static int c_can_software_reset(struct net_device *dev)
565 {
566 	struct c_can_priv *priv = netdev_priv(dev);
567 	int retry = 0;
568 
569 	if (priv->type != BOSCH_D_CAN)
570 		return 0;
571 
572 	priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT);
573 	while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) {
574 		msleep(20);
575 		if (retry++ > 100) {
576 			netdev_err(dev, "CCTRL: software reset failed\n");
577 			return -EIO;
578 		}
579 	}
580 
581 	return 0;
582 }
583 
584 /*
585  * Configure C_CAN chip:
586  * - enable/disable auto-retransmission
587  * - set operating mode
588  * - configure message objects
589  */
c_can_chip_config(struct net_device * dev)590 static int c_can_chip_config(struct net_device *dev)
591 {
592 	struct c_can_priv *priv = netdev_priv(dev);
593 	int err;
594 
595 	err = c_can_software_reset(dev);
596 	if (err)
597 		return err;
598 
599 	/* enable automatic retransmission */
600 	priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
601 
602 	if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
603 	    (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
604 		/* loopback + silent mode : useful for hot self-test */
605 		priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
606 		priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
607 	} else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
608 		/* loopback mode : useful for self-test function */
609 		priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
610 		priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
611 	} else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
612 		/* silent mode : bus-monitoring mode */
613 		priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
614 		priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
615 	}
616 
617 	/* configure message objects */
618 	c_can_configure_msg_objects(dev);
619 
620 	/* set a `lec` value so that we can check for updates later */
621 	priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
622 
623 	/* Clear all internal status */
624 	atomic_set(&priv->tx_active, 0);
625 	priv->rxmasked = 0;
626 	priv->tx_dir = 0;
627 
628 	/* set bittiming params */
629 	return c_can_set_bittiming(dev);
630 }
631 
c_can_start(struct net_device * dev)632 static int c_can_start(struct net_device *dev)
633 {
634 	struct c_can_priv *priv = netdev_priv(dev);
635 	int err;
636 	struct pinctrl *p;
637 
638 	/* basic c_can configuration */
639 	err = c_can_chip_config(dev);
640 	if (err)
641 		return err;
642 
643 	/* Setup the command for new messages */
644 	priv->comm_rcv_high = priv->type != BOSCH_D_CAN ?
645 		IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
646 
647 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
648 
649 	/* Attempt to use "active" if available else use "default" */
650 	p = pinctrl_get_select(priv->device, "active");
651 	if (!IS_ERR(p))
652 		pinctrl_put(p);
653 	else
654 		pinctrl_pm_select_default_state(priv->device);
655 
656 	return 0;
657 }
658 
c_can_stop(struct net_device * dev)659 static void c_can_stop(struct net_device *dev)
660 {
661 	struct c_can_priv *priv = netdev_priv(dev);
662 
663 	c_can_irq_control(priv, false);
664 
665 	/* put ctrl to init on stop to end ongoing transmission */
666 	priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
667 
668 	/* deactivate pins */
669 	pinctrl_pm_select_sleep_state(dev->dev.parent);
670 	priv->can.state = CAN_STATE_STOPPED;
671 }
672 
c_can_set_mode(struct net_device * dev,enum can_mode mode)673 static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
674 {
675 	struct c_can_priv *priv = netdev_priv(dev);
676 	int err;
677 
678 	switch (mode) {
679 	case CAN_MODE_START:
680 		err = c_can_start(dev);
681 		if (err)
682 			return err;
683 		netif_wake_queue(dev);
684 		c_can_irq_control(priv, true);
685 		break;
686 	default:
687 		return -EOPNOTSUPP;
688 	}
689 
690 	return 0;
691 }
692 
__c_can_get_berr_counter(const struct net_device * dev,struct can_berr_counter * bec)693 static int __c_can_get_berr_counter(const struct net_device *dev,
694 				    struct can_berr_counter *bec)
695 {
696 	unsigned int reg_err_counter;
697 	struct c_can_priv *priv = netdev_priv(dev);
698 
699 	reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
700 	bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
701 				ERR_CNT_REC_SHIFT;
702 	bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
703 
704 	return 0;
705 }
706 
c_can_get_berr_counter(const struct net_device * dev,struct can_berr_counter * bec)707 static int c_can_get_berr_counter(const struct net_device *dev,
708 				  struct can_berr_counter *bec)
709 {
710 	struct c_can_priv *priv = netdev_priv(dev);
711 	int err;
712 
713 	c_can_pm_runtime_get_sync(priv);
714 	err = __c_can_get_berr_counter(dev, bec);
715 	c_can_pm_runtime_put_sync(priv);
716 
717 	return err;
718 }
719 
c_can_do_tx(struct net_device * dev)720 static void c_can_do_tx(struct net_device *dev)
721 {
722 	struct c_can_priv *priv = netdev_priv(dev);
723 	struct net_device_stats *stats = &dev->stats;
724 	u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
725 
726 	clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
727 
728 	while ((idx = ffs(pend))) {
729 		idx--;
730 		pend &= ~(1 << idx);
731 		obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
732 		c_can_inval_tx_object(dev, IF_RX, obj);
733 		can_get_echo_skb(dev, idx);
734 		bytes += priv->dlc[idx];
735 		pkts++;
736 	}
737 
738 	/* Clear the bits in the tx_active mask */
739 	atomic_sub(clr, &priv->tx_active);
740 
741 	if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
742 		netif_wake_queue(dev);
743 
744 	if (pkts) {
745 		stats->tx_bytes += bytes;
746 		stats->tx_packets += pkts;
747 		can_led_event(dev, CAN_LED_EVENT_TX);
748 	}
749 }
750 
751 /*
752  * If we have a gap in the pending bits, that means we either
753  * raced with the hardware or failed to readout all upper
754  * objects in the last run due to quota limit.
755  */
c_can_adjust_pending(u32 pend)756 static u32 c_can_adjust_pending(u32 pend)
757 {
758 	u32 weight, lasts;
759 
760 	if (pend == RECEIVE_OBJECT_BITS)
761 		return pend;
762 
763 	/*
764 	 * If the last set bit is larger than the number of pending
765 	 * bits we have a gap.
766 	 */
767 	weight = hweight32(pend);
768 	lasts = fls(pend);
769 
770 	/* If the bits are linear, nothing to do */
771 	if (lasts == weight)
772 		return pend;
773 
774 	/*
775 	 * Find the first set bit after the gap. We walk backwards
776 	 * from the last set bit.
777 	 */
778 	for (lasts--; pend & (1 << (lasts - 1)); lasts--);
779 
780 	return pend & ~((1 << lasts) - 1);
781 }
782 
c_can_rx_object_get(struct net_device * dev,struct c_can_priv * priv,u32 obj)783 static inline void c_can_rx_object_get(struct net_device *dev,
784 				       struct c_can_priv *priv, u32 obj)
785 {
786 		c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
787 }
788 
c_can_rx_finalize(struct net_device * dev,struct c_can_priv * priv,u32 obj)789 static inline void c_can_rx_finalize(struct net_device *dev,
790 				     struct c_can_priv *priv, u32 obj)
791 {
792 	if (priv->type != BOSCH_D_CAN)
793 		c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
794 }
795 
c_can_read_objects(struct net_device * dev,struct c_can_priv * priv,u32 pend,int quota)796 static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
797 			      u32 pend, int quota)
798 {
799 	u32 pkts = 0, ctrl, obj;
800 
801 	while ((obj = ffs(pend)) && quota > 0) {
802 		pend &= ~BIT(obj - 1);
803 
804 		c_can_rx_object_get(dev, priv, obj);
805 		ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
806 
807 		if (ctrl & IF_MCONT_MSGLST) {
808 			int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl);
809 
810 			pkts += n;
811 			quota -= n;
812 			continue;
813 		}
814 
815 		/*
816 		 * This really should not happen, but this covers some
817 		 * odd HW behaviour. Do not remove that unless you
818 		 * want to brick your machine.
819 		 */
820 		if (!(ctrl & IF_MCONT_NEWDAT))
821 			continue;
822 
823 		/* read the data from the message object */
824 		c_can_read_msg_object(dev, IF_RX, ctrl);
825 
826 		c_can_rx_finalize(dev, priv, obj);
827 
828 		pkts++;
829 		quota--;
830 	}
831 
832 	return pkts;
833 }
834 
c_can_get_pending(struct c_can_priv * priv)835 static inline u32 c_can_get_pending(struct c_can_priv *priv)
836 {
837 	u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
838 
839 	return pend;
840 }
841 
842 /*
843  * theory of operation:
844  *
845  * c_can core saves a received CAN message into the first free message
846  * object it finds free (starting with the lowest). Bits NEWDAT and
847  * INTPND are set for this message object indicating that a new message
848  * has arrived. To work-around this issue, we keep two groups of message
849  * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
850  *
851  * We clear the newdat bit right away.
852  *
853  * This can result in packet reordering when the readout is slow.
854  */
c_can_do_rx_poll(struct net_device * dev,int quota)855 static int c_can_do_rx_poll(struct net_device *dev, int quota)
856 {
857 	struct c_can_priv *priv = netdev_priv(dev);
858 	u32 pkts = 0, pend = 0, toread, n;
859 
860 	/*
861 	 * It is faster to read only one 16bit register. This is only possible
862 	 * for a maximum number of 16 objects.
863 	 */
864 	BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
865 			"Implementation does not support more message objects than 16");
866 
867 	while (quota > 0) {
868 		if (!pend) {
869 			pend = c_can_get_pending(priv);
870 			if (!pend)
871 				break;
872 			/*
873 			 * If the pending field has a gap, handle the
874 			 * bits above the gap first.
875 			 */
876 			toread = c_can_adjust_pending(pend);
877 		} else {
878 			toread = pend;
879 		}
880 		/* Remove the bits from pend */
881 		pend &= ~toread;
882 		/* Read the objects */
883 		n = c_can_read_objects(dev, priv, toread, quota);
884 		pkts += n;
885 		quota -= n;
886 	}
887 
888 	if (pkts)
889 		can_led_event(dev, CAN_LED_EVENT_RX);
890 
891 	return pkts;
892 }
893 
c_can_handle_state_change(struct net_device * dev,enum c_can_bus_error_types error_type)894 static int c_can_handle_state_change(struct net_device *dev,
895 				enum c_can_bus_error_types error_type)
896 {
897 	unsigned int reg_err_counter;
898 	unsigned int rx_err_passive;
899 	struct c_can_priv *priv = netdev_priv(dev);
900 	struct net_device_stats *stats = &dev->stats;
901 	struct can_frame *cf;
902 	struct sk_buff *skb;
903 	struct can_berr_counter bec;
904 
905 	switch (error_type) {
906 	case C_CAN_ERROR_WARNING:
907 		/* error warning state */
908 		priv->can.can_stats.error_warning++;
909 		priv->can.state = CAN_STATE_ERROR_WARNING;
910 		break;
911 	case C_CAN_ERROR_PASSIVE:
912 		/* error passive state */
913 		priv->can.can_stats.error_passive++;
914 		priv->can.state = CAN_STATE_ERROR_PASSIVE;
915 		break;
916 	case C_CAN_BUS_OFF:
917 		/* bus-off state */
918 		priv->can.state = CAN_STATE_BUS_OFF;
919 		priv->can.can_stats.bus_off++;
920 		break;
921 	default:
922 		break;
923 	}
924 
925 	/* propagate the error condition to the CAN stack */
926 	skb = alloc_can_err_skb(dev, &cf);
927 	if (unlikely(!skb))
928 		return 0;
929 
930 	__c_can_get_berr_counter(dev, &bec);
931 	reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
932 	rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
933 				ERR_CNT_RP_SHIFT;
934 
935 	switch (error_type) {
936 	case C_CAN_ERROR_WARNING:
937 		/* error warning state */
938 		cf->can_id |= CAN_ERR_CRTL;
939 		cf->data[1] = (bec.txerr > bec.rxerr) ?
940 			CAN_ERR_CRTL_TX_WARNING :
941 			CAN_ERR_CRTL_RX_WARNING;
942 		cf->data[6] = bec.txerr;
943 		cf->data[7] = bec.rxerr;
944 
945 		break;
946 	case C_CAN_ERROR_PASSIVE:
947 		/* error passive state */
948 		cf->can_id |= CAN_ERR_CRTL;
949 		if (rx_err_passive)
950 			cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
951 		if (bec.txerr > 127)
952 			cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
953 
954 		cf->data[6] = bec.txerr;
955 		cf->data[7] = bec.rxerr;
956 		break;
957 	case C_CAN_BUS_OFF:
958 		/* bus-off state */
959 		cf->can_id |= CAN_ERR_BUSOFF;
960 		can_bus_off(dev);
961 		break;
962 	default:
963 		break;
964 	}
965 
966 	stats->rx_packets++;
967 	stats->rx_bytes += cf->can_dlc;
968 	netif_receive_skb(skb);
969 
970 	return 1;
971 }
972 
c_can_handle_bus_err(struct net_device * dev,enum c_can_lec_type lec_type)973 static int c_can_handle_bus_err(struct net_device *dev,
974 				enum c_can_lec_type lec_type)
975 {
976 	struct c_can_priv *priv = netdev_priv(dev);
977 	struct net_device_stats *stats = &dev->stats;
978 	struct can_frame *cf;
979 	struct sk_buff *skb;
980 
981 	/*
982 	 * early exit if no lec update or no error.
983 	 * no lec update means that no CAN bus event has been detected
984 	 * since CPU wrote 0x7 value to status reg.
985 	 */
986 	if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
987 		return 0;
988 
989 	if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
990 		return 0;
991 
992 	/* common for all type of bus errors */
993 	priv->can.can_stats.bus_error++;
994 	stats->rx_errors++;
995 
996 	/* propagate the error condition to the CAN stack */
997 	skb = alloc_can_err_skb(dev, &cf);
998 	if (unlikely(!skb))
999 		return 0;
1000 
1001 	/*
1002 	 * check for 'last error code' which tells us the
1003 	 * type of the last error to occur on the CAN bus
1004 	 */
1005 	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1006 
1007 	switch (lec_type) {
1008 	case LEC_STUFF_ERROR:
1009 		netdev_dbg(dev, "stuff error\n");
1010 		cf->data[2] |= CAN_ERR_PROT_STUFF;
1011 		break;
1012 	case LEC_FORM_ERROR:
1013 		netdev_dbg(dev, "form error\n");
1014 		cf->data[2] |= CAN_ERR_PROT_FORM;
1015 		break;
1016 	case LEC_ACK_ERROR:
1017 		netdev_dbg(dev, "ack error\n");
1018 		cf->data[3] = CAN_ERR_PROT_LOC_ACK;
1019 		break;
1020 	case LEC_BIT1_ERROR:
1021 		netdev_dbg(dev, "bit1 error\n");
1022 		cf->data[2] |= CAN_ERR_PROT_BIT1;
1023 		break;
1024 	case LEC_BIT0_ERROR:
1025 		netdev_dbg(dev, "bit0 error\n");
1026 		cf->data[2] |= CAN_ERR_PROT_BIT0;
1027 		break;
1028 	case LEC_CRC_ERROR:
1029 		netdev_dbg(dev, "CRC error\n");
1030 		cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
1031 		break;
1032 	default:
1033 		break;
1034 	}
1035 
1036 	stats->rx_packets++;
1037 	stats->rx_bytes += cf->can_dlc;
1038 	netif_receive_skb(skb);
1039 	return 1;
1040 }
1041 
c_can_poll(struct napi_struct * napi,int quota)1042 static int c_can_poll(struct napi_struct *napi, int quota)
1043 {
1044 	struct net_device *dev = napi->dev;
1045 	struct c_can_priv *priv = netdev_priv(dev);
1046 	u16 curr, last = priv->last_status;
1047 	int work_done = 0;
1048 
1049 	/* Only read the status register if a status interrupt was pending */
1050 	if (atomic_xchg(&priv->sie_pending, 0)) {
1051 		priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
1052 		/* Ack status on C_CAN. D_CAN is self clearing */
1053 		if (priv->type != BOSCH_D_CAN)
1054 			priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
1055 	} else {
1056 		/* no change detected ... */
1057 		curr = last;
1058 	}
1059 
1060 	/* handle state changes */
1061 	if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
1062 		netdev_dbg(dev, "entered error warning state\n");
1063 		work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
1064 	}
1065 
1066 	if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
1067 		netdev_dbg(dev, "entered error passive state\n");
1068 		work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
1069 	}
1070 
1071 	if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
1072 		netdev_dbg(dev, "entered bus off state\n");
1073 		work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
1074 		goto end;
1075 	}
1076 
1077 	/* handle bus recovery events */
1078 	if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
1079 		netdev_dbg(dev, "left bus off state\n");
1080 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
1081 	}
1082 	if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
1083 		netdev_dbg(dev, "left error passive state\n");
1084 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
1085 	}
1086 
1087 	/* handle lec errors on the bus */
1088 	work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
1089 
1090 	/* Handle Tx/Rx events. We do this unconditionally */
1091 	work_done += c_can_do_rx_poll(dev, (quota - work_done));
1092 	c_can_do_tx(dev);
1093 
1094 end:
1095 	if (work_done < quota) {
1096 		napi_complete(napi);
1097 		/* enable all IRQs if we are not in bus off state */
1098 		if (priv->can.state != CAN_STATE_BUS_OFF)
1099 			c_can_irq_control(priv, true);
1100 	}
1101 
1102 	return work_done;
1103 }
1104 
c_can_isr(int irq,void * dev_id)1105 static irqreturn_t c_can_isr(int irq, void *dev_id)
1106 {
1107 	struct net_device *dev = (struct net_device *)dev_id;
1108 	struct c_can_priv *priv = netdev_priv(dev);
1109 	int reg_int;
1110 
1111 	reg_int = priv->read_reg(priv, C_CAN_INT_REG);
1112 	if (!reg_int)
1113 		return IRQ_NONE;
1114 
1115 	/* save for later use */
1116 	if (reg_int & INT_STS_PENDING)
1117 		atomic_set(&priv->sie_pending, 1);
1118 
1119 	/* disable all interrupts and schedule the NAPI */
1120 	c_can_irq_control(priv, false);
1121 	napi_schedule(&priv->napi);
1122 
1123 	return IRQ_HANDLED;
1124 }
1125 
c_can_open(struct net_device * dev)1126 static int c_can_open(struct net_device *dev)
1127 {
1128 	int err;
1129 	struct c_can_priv *priv = netdev_priv(dev);
1130 
1131 	c_can_pm_runtime_get_sync(priv);
1132 	c_can_reset_ram(priv, true);
1133 
1134 	/* open the can device */
1135 	err = open_candev(dev);
1136 	if (err) {
1137 		netdev_err(dev, "failed to open can device\n");
1138 		goto exit_open_fail;
1139 	}
1140 
1141 	/* register interrupt handler */
1142 	err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
1143 				dev);
1144 	if (err < 0) {
1145 		netdev_err(dev, "failed to request interrupt\n");
1146 		goto exit_irq_fail;
1147 	}
1148 
1149 	/* start the c_can controller */
1150 	err = c_can_start(dev);
1151 	if (err)
1152 		goto exit_start_fail;
1153 
1154 	can_led_event(dev, CAN_LED_EVENT_OPEN);
1155 
1156 	napi_enable(&priv->napi);
1157 	/* enable status change, error and module interrupts */
1158 	c_can_irq_control(priv, true);
1159 	netif_start_queue(dev);
1160 
1161 	return 0;
1162 
1163 exit_start_fail:
1164 	free_irq(dev->irq, dev);
1165 exit_irq_fail:
1166 	close_candev(dev);
1167 exit_open_fail:
1168 	c_can_reset_ram(priv, false);
1169 	c_can_pm_runtime_put_sync(priv);
1170 	return err;
1171 }
1172 
c_can_close(struct net_device * dev)1173 static int c_can_close(struct net_device *dev)
1174 {
1175 	struct c_can_priv *priv = netdev_priv(dev);
1176 
1177 	netif_stop_queue(dev);
1178 	napi_disable(&priv->napi);
1179 	c_can_stop(dev);
1180 	free_irq(dev->irq, dev);
1181 	close_candev(dev);
1182 
1183 	c_can_reset_ram(priv, false);
1184 	c_can_pm_runtime_put_sync(priv);
1185 
1186 	can_led_event(dev, CAN_LED_EVENT_STOP);
1187 
1188 	return 0;
1189 }
1190 
alloc_c_can_dev(void)1191 struct net_device *alloc_c_can_dev(void)
1192 {
1193 	struct net_device *dev;
1194 	struct c_can_priv *priv;
1195 
1196 	dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
1197 	if (!dev)
1198 		return NULL;
1199 
1200 	priv = netdev_priv(dev);
1201 	netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1202 
1203 	priv->dev = dev;
1204 	priv->can.bittiming_const = &c_can_bittiming_const;
1205 	priv->can.do_set_mode = c_can_set_mode;
1206 	priv->can.do_get_berr_counter = c_can_get_berr_counter;
1207 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1208 					CAN_CTRLMODE_LISTENONLY |
1209 					CAN_CTRLMODE_BERR_REPORTING;
1210 
1211 	return dev;
1212 }
1213 EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1214 
1215 #ifdef CONFIG_PM
c_can_power_down(struct net_device * dev)1216 int c_can_power_down(struct net_device *dev)
1217 {
1218 	u32 val;
1219 	unsigned long time_out;
1220 	struct c_can_priv *priv = netdev_priv(dev);
1221 
1222 	if (!(dev->flags & IFF_UP))
1223 		return 0;
1224 
1225 	WARN_ON(priv->type != BOSCH_D_CAN);
1226 
1227 	/* set PDR value so the device goes to power down mode */
1228 	val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1229 	val |= CONTROL_EX_PDR;
1230 	priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1231 
1232 	/* Wait for the PDA bit to get set */
1233 	time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1234 	while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1235 				time_after(time_out, jiffies))
1236 		cpu_relax();
1237 
1238 	if (time_after(jiffies, time_out))
1239 		return -ETIMEDOUT;
1240 
1241 	c_can_stop(dev);
1242 
1243 	c_can_reset_ram(priv, false);
1244 	c_can_pm_runtime_put_sync(priv);
1245 
1246 	return 0;
1247 }
1248 EXPORT_SYMBOL_GPL(c_can_power_down);
1249 
c_can_power_up(struct net_device * dev)1250 int c_can_power_up(struct net_device *dev)
1251 {
1252 	u32 val;
1253 	unsigned long time_out;
1254 	struct c_can_priv *priv = netdev_priv(dev);
1255 	int ret;
1256 
1257 	if (!(dev->flags & IFF_UP))
1258 		return 0;
1259 
1260 	WARN_ON(priv->type != BOSCH_D_CAN);
1261 
1262 	c_can_pm_runtime_get_sync(priv);
1263 	c_can_reset_ram(priv, true);
1264 
1265 	/* Clear PDR and INIT bits */
1266 	val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1267 	val &= ~CONTROL_EX_PDR;
1268 	priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1269 	val = priv->read_reg(priv, C_CAN_CTRL_REG);
1270 	val &= ~CONTROL_INIT;
1271 	priv->write_reg(priv, C_CAN_CTRL_REG, val);
1272 
1273 	/* Wait for the PDA bit to get clear */
1274 	time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1275 	while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1276 				time_after(time_out, jiffies))
1277 		cpu_relax();
1278 
1279 	if (time_after(jiffies, time_out))
1280 		return -ETIMEDOUT;
1281 
1282 	ret = c_can_start(dev);
1283 	if (!ret)
1284 		c_can_irq_control(priv, true);
1285 
1286 	return ret;
1287 }
1288 EXPORT_SYMBOL_GPL(c_can_power_up);
1289 #endif
1290 
free_c_can_dev(struct net_device * dev)1291 void free_c_can_dev(struct net_device *dev)
1292 {
1293 	struct c_can_priv *priv = netdev_priv(dev);
1294 
1295 	netif_napi_del(&priv->napi);
1296 	free_candev(dev);
1297 }
1298 EXPORT_SYMBOL_GPL(free_c_can_dev);
1299 
1300 static const struct net_device_ops c_can_netdev_ops = {
1301 	.ndo_open = c_can_open,
1302 	.ndo_stop = c_can_close,
1303 	.ndo_start_xmit = c_can_start_xmit,
1304 	.ndo_change_mtu = can_change_mtu,
1305 };
1306 
register_c_can_dev(struct net_device * dev)1307 int register_c_can_dev(struct net_device *dev)
1308 {
1309 	int err;
1310 
1311 	/* Deactivate pins to prevent DRA7 DCAN IP from being
1312 	 * stuck in transition when module is disabled.
1313 	 * Pins are activated in c_can_start() and deactivated
1314 	 * in c_can_stop()
1315 	 */
1316 	pinctrl_pm_select_sleep_state(dev->dev.parent);
1317 
1318 	dev->flags |= IFF_ECHO;	/* we support local echo */
1319 	dev->netdev_ops = &c_can_netdev_ops;
1320 
1321 	err = register_candev(dev);
1322 	if (!err)
1323 		devm_can_led_init(dev);
1324 	return err;
1325 }
1326 EXPORT_SYMBOL_GPL(register_c_can_dev);
1327 
unregister_c_can_dev(struct net_device * dev)1328 void unregister_c_can_dev(struct net_device *dev)
1329 {
1330 	unregister_candev(dev);
1331 }
1332 EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1333 
1334 MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
1335 MODULE_LICENSE("GPL v2");
1336 MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
1337