• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020, 2021 Pengutronix,
6 //               Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14 
15 #include <linux/bitfield.h>
16 
17 #include "mcp251xfd.h"
18 
19 static inline int
mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv * priv,u8 * tef_tail)20 mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
21 				 u8 *tef_tail)
22 {
23 	u32 tef_ua;
24 	int err;
25 
26 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
27 	if (err)
28 		return err;
29 
30 	*tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
31 
32 	return 0;
33 }
34 
mcp251xfd_check_tef_tail(const struct mcp251xfd_priv * priv)35 static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
36 {
37 	u8 tef_tail_chip, tef_tail;
38 	int err;
39 
40 	if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
41 		return 0;
42 
43 	err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
44 	if (err)
45 		return err;
46 
47 	tef_tail = mcp251xfd_get_tef_tail(priv);
48 	if (tef_tail_chip != tef_tail) {
49 		netdev_err(priv->ndev,
50 			   "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
51 			   tef_tail_chip, tef_tail);
52 		return -EILSEQ;
53 	}
54 
55 	return 0;
56 }
57 
58 static int
mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv * priv,const u32 seq)59 mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
60 {
61 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
62 	u32 tef_sta;
63 	int err;
64 
65 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
66 	if (err)
67 		return err;
68 
69 	if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
70 		netdev_err(priv->ndev,
71 			   "Transmit Event FIFO buffer overflow.\n");
72 		return -ENOBUFS;
73 	}
74 
75 	netdev_info(priv->ndev,
76 		    "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
77 		    tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
78 		    "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
79 		    "not empty" : "empty",
80 		    seq, priv->tef->tail, priv->tef->head, tx_ring->head);
81 
82 	/* The Sequence Number in the TEF doesn't match our tef_tail. */
83 	return -EAGAIN;
84 }
85 
86 static int
mcp251xfd_handle_tefif_one(struct mcp251xfd_priv * priv,const struct mcp251xfd_hw_tef_obj * hw_tef_obj,unsigned int * frame_len_ptr)87 mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
88 			   const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
89 			   unsigned int *frame_len_ptr)
90 {
91 	struct net_device_stats *stats = &priv->ndev->stats;
92 	struct sk_buff *skb;
93 	u32 seq, seq_masked, tef_tail_masked, tef_tail;
94 
95 	seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
96 			hw_tef_obj->flags);
97 
98 	/* Use the MCP2517FD mask on the MCP2518FD, too. We only
99 	 * compare 7 bits, this should be enough to detect
100 	 * net-yet-completed, i.e. old TEF objects.
101 	 */
102 	seq_masked = seq &
103 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
104 	tef_tail_masked = priv->tef->tail &
105 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
106 	if (seq_masked != tef_tail_masked)
107 		return mcp251xfd_handle_tefif_recover(priv, seq);
108 
109 	tef_tail = mcp251xfd_get_tef_tail(priv);
110 	skb = priv->can.echo_skb[tef_tail];
111 	if (skb)
112 		mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
113 	stats->tx_bytes +=
114 		can_rx_offload_get_echo_skb(&priv->offload,
115 					    tef_tail, hw_tef_obj->ts,
116 					    frame_len_ptr);
117 	stats->tx_packets++;
118 	priv->tef->tail++;
119 
120 	return 0;
121 }
122 
mcp251xfd_tef_ring_update(struct mcp251xfd_priv * priv)123 static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
124 {
125 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
126 	unsigned int new_head;
127 	u8 chip_tx_tail;
128 	int err;
129 
130 	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
131 	if (err)
132 		return err;
133 
134 	/* chip_tx_tail, is the next TX-Object send by the HW.
135 	 * The new TEF head must be >= the old head, ...
136 	 */
137 	new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
138 	if (new_head <= priv->tef->head)
139 		new_head += tx_ring->obj_num;
140 
141 	/* ... but it cannot exceed the TX head. */
142 	priv->tef->head = min(new_head, tx_ring->head);
143 
144 	return mcp251xfd_check_tef_tail(priv);
145 }
146 
147 static inline int
mcp251xfd_tef_obj_read(const struct mcp251xfd_priv * priv,struct mcp251xfd_hw_tef_obj * hw_tef_obj,const u8 offset,const u8 len)148 mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
149 		       struct mcp251xfd_hw_tef_obj *hw_tef_obj,
150 		       const u8 offset, const u8 len)
151 {
152 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
153 	const int val_bytes = regmap_get_val_bytes(priv->map_rx);
154 
155 	if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
156 	    (offset > tx_ring->obj_num ||
157 	     len > tx_ring->obj_num ||
158 	     offset + len > tx_ring->obj_num)) {
159 		netdev_err(priv->ndev,
160 			   "Trying to read too many TEF objects (max=%d, offset=%d, len=%d).\n",
161 			   tx_ring->obj_num, offset, len);
162 		return -ERANGE;
163 	}
164 
165 	return regmap_bulk_read(priv->map_rx,
166 				mcp251xfd_get_tef_obj_addr(offset),
167 				hw_tef_obj,
168 				sizeof(*hw_tef_obj) / val_bytes * len);
169 }
170 
mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv * priv)171 static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
172 {
173 	struct mcp251xfd_ecc *ecc = &priv->ecc;
174 
175 	ecc->ecc_stat = 0;
176 }
177 
mcp251xfd_handle_tefif(struct mcp251xfd_priv * priv)178 int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
179 {
180 	struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
181 	unsigned int total_frame_len = 0;
182 	u8 tef_tail, len, l;
183 	int err, i;
184 
185 	err = mcp251xfd_tef_ring_update(priv);
186 	if (err)
187 		return err;
188 
189 	tef_tail = mcp251xfd_get_tef_tail(priv);
190 	len = mcp251xfd_get_tef_len(priv);
191 	l = mcp251xfd_get_tef_linear_len(priv);
192 	err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
193 	if (err)
194 		return err;
195 
196 	if (l < len) {
197 		err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
198 		if (err)
199 			return err;
200 	}
201 
202 	for (i = 0; i < len; i++) {
203 		unsigned int frame_len = 0;
204 
205 		err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
206 		/* -EAGAIN means the Sequence Number in the TEF
207 		 * doesn't match our tef_tail. This can happen if we
208 		 * read the TEF objects too early. Leave loop let the
209 		 * interrupt handler call us again.
210 		 */
211 		if (err == -EAGAIN)
212 			goto out_netif_wake_queue;
213 		if (err)
214 			return err;
215 
216 		total_frame_len += frame_len;
217 	}
218 
219  out_netif_wake_queue:
220 	len = i;	/* number of handled goods TEFs */
221 	if (len) {
222 		struct mcp251xfd_tef_ring *ring = priv->tef;
223 		struct mcp251xfd_tx_ring *tx_ring = priv->tx;
224 		int offset;
225 
226 		/* Increment the TEF FIFO tail pointer 'len' times in
227 		 * a single SPI message.
228 		 *
229 		 * Note:
230 		 * Calculate offset, so that the SPI transfer ends on
231 		 * the last message of the uinc_xfer array, which has
232 		 * "cs_change == 0", to properly deactivate the chip
233 		 * select.
234 		 */
235 		offset = ARRAY_SIZE(ring->uinc_xfer) - len;
236 		err = spi_sync_transfer(priv->spi,
237 					ring->uinc_xfer + offset, len);
238 		if (err)
239 			return err;
240 
241 		tx_ring->tail += len;
242 		netdev_completed_queue(priv->ndev, len, total_frame_len);
243 
244 		err = mcp251xfd_check_tef_tail(priv);
245 		if (err)
246 			return err;
247 	}
248 
249 	mcp251xfd_ecc_tefif_successful(priv);
250 
251 	if (mcp251xfd_get_tx_free(priv->tx)) {
252 		/* Make sure that anybody stopping the queue after
253 		 * this sees the new tx_ring->tail.
254 		 */
255 		smp_mb();
256 		netif_wake_queue(priv->ndev);
257 	}
258 
259 	if (priv->tx_coalesce_usecs_irq)
260 		hrtimer_start(&priv->tx_irq_timer,
261 			      ns_to_ktime(priv->tx_coalesce_usecs_irq *
262 					  NSEC_PER_USEC),
263 			      HRTIMER_MODE_REL);
264 
265 	return 0;
266 }
267