• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: BSD-3-Clause
2 /* Copyright (c) 2016-2018, NXP Semiconductors
3  * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
4  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
5  */
6 #include <linux/spi/spi.h>
7 #include <linux/packing.h>
8 #include "sja1105.h"
9 
10 #define SJA1105_SIZE_RESET_CMD		4
11 #define SJA1105_SIZE_SPI_MSG_HEADER	4
12 #define SJA1105_SIZE_SPI_MSG_MAXLEN	(64 * 4)
13 
14 struct sja1105_chunk {
15 	u8	*buf;
16 	size_t	len;
17 	u64	reg_addr;
18 };
19 
20 static void
sja1105_spi_message_pack(void * buf,const struct sja1105_spi_message * msg)21 sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
22 {
23 	const int size = SJA1105_SIZE_SPI_MSG_HEADER;
24 
25 	memset(buf, 0, size);
26 
27 	sja1105_pack(buf, &msg->access,     31, 31, size);
28 	sja1105_pack(buf, &msg->read_count, 30, 25, size);
29 	sja1105_pack(buf, &msg->address,    24,  4, size);
30 }
31 
32 #define sja1105_hdr_xfer(xfers, chunk) \
33 	((xfers) + 2 * (chunk))
34 #define sja1105_chunk_xfer(xfers, chunk) \
35 	((xfers) + 2 * (chunk) + 1)
36 #define sja1105_hdr_buf(hdr_bufs, chunk) \
37 	((hdr_bufs) + (chunk) * SJA1105_SIZE_SPI_MSG_HEADER)
38 
39 /* If @rw is:
40  * - SPI_WRITE: creates and sends an SPI write message at absolute
41  *		address reg_addr, taking @len bytes from *buf
42  * - SPI_READ:  creates and sends an SPI read message from absolute
43  *		address reg_addr, writing @len bytes into *buf
44  */
sja1105_xfer(const struct sja1105_private * priv,sja1105_spi_rw_mode_t rw,u64 reg_addr,u8 * buf,size_t len,struct ptp_system_timestamp * ptp_sts)45 static int sja1105_xfer(const struct sja1105_private *priv,
46 			sja1105_spi_rw_mode_t rw, u64 reg_addr, u8 *buf,
47 			size_t len, struct ptp_system_timestamp *ptp_sts)
48 {
49 	struct sja1105_chunk chunk = {
50 		.len = min_t(size_t, len, SJA1105_SIZE_SPI_MSG_MAXLEN),
51 		.reg_addr = reg_addr,
52 		.buf = buf,
53 	};
54 	struct spi_device *spi = priv->spidev;
55 	struct spi_transfer *xfers;
56 	int num_chunks;
57 	int rc, i = 0;
58 	u8 *hdr_bufs;
59 
60 	num_chunks = DIV_ROUND_UP(len, SJA1105_SIZE_SPI_MSG_MAXLEN);
61 
62 	/* One transfer for each message header, one for each message
63 	 * payload (chunk).
64 	 */
65 	xfers = kcalloc(2 * num_chunks, sizeof(struct spi_transfer),
66 			GFP_KERNEL);
67 	if (!xfers)
68 		return -ENOMEM;
69 
70 	/* Packed buffers for the num_chunks SPI message headers,
71 	 * stored as a contiguous array
72 	 */
73 	hdr_bufs = kcalloc(num_chunks, SJA1105_SIZE_SPI_MSG_HEADER,
74 			   GFP_KERNEL);
75 	if (!hdr_bufs) {
76 		kfree(xfers);
77 		return -ENOMEM;
78 	}
79 
80 	for (i = 0; i < num_chunks; i++) {
81 		struct spi_transfer *chunk_xfer = sja1105_chunk_xfer(xfers, i);
82 		struct spi_transfer *hdr_xfer = sja1105_hdr_xfer(xfers, i);
83 		u8 *hdr_buf = sja1105_hdr_buf(hdr_bufs, i);
84 		struct spi_transfer *ptp_sts_xfer;
85 		struct sja1105_spi_message msg;
86 
87 		/* Populate the transfer's header buffer */
88 		msg.address = chunk.reg_addr;
89 		msg.access = rw;
90 		if (rw == SPI_READ)
91 			msg.read_count = chunk.len / 4;
92 		else
93 			/* Ignored */
94 			msg.read_count = 0;
95 		sja1105_spi_message_pack(hdr_buf, &msg);
96 		hdr_xfer->tx_buf = hdr_buf;
97 		hdr_xfer->len = SJA1105_SIZE_SPI_MSG_HEADER;
98 
99 		/* Populate the transfer's data buffer */
100 		if (rw == SPI_READ)
101 			chunk_xfer->rx_buf = chunk.buf;
102 		else
103 			chunk_xfer->tx_buf = chunk.buf;
104 		chunk_xfer->len = chunk.len;
105 
106 		/* Request timestamping for the transfer. Instead of letting
107 		 * callers specify which byte they want to timestamp, we can
108 		 * make certain assumptions:
109 		 * - A read operation will request a software timestamp when
110 		 *   what's being read is the PTP time. That is snapshotted by
111 		 *   the switch hardware at the end of the command portion
112 		 *   (hdr_xfer).
113 		 * - A write operation will request a software timestamp on
114 		 *   actions that modify the PTP time. Taking clock stepping as
115 		 *   an example, the switch writes the PTP time at the end of
116 		 *   the data portion (chunk_xfer).
117 		 */
118 		if (rw == SPI_READ)
119 			ptp_sts_xfer = hdr_xfer;
120 		else
121 			ptp_sts_xfer = chunk_xfer;
122 		ptp_sts_xfer->ptp_sts_word_pre = ptp_sts_xfer->len - 1;
123 		ptp_sts_xfer->ptp_sts_word_post = ptp_sts_xfer->len - 1;
124 		ptp_sts_xfer->ptp_sts = ptp_sts;
125 
126 		/* Calculate next chunk */
127 		chunk.buf += chunk.len;
128 		chunk.reg_addr += chunk.len / 4;
129 		chunk.len = min_t(size_t, (ptrdiff_t)(buf + len - chunk.buf),
130 				  SJA1105_SIZE_SPI_MSG_MAXLEN);
131 
132 		/* De-assert the chip select after each chunk. */
133 		if (chunk.len)
134 			chunk_xfer->cs_change = 1;
135 	}
136 
137 	rc = spi_sync_transfer(spi, xfers, 2 * num_chunks);
138 	if (rc < 0)
139 		dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
140 
141 	kfree(hdr_bufs);
142 	kfree(xfers);
143 
144 	return rc;
145 }
146 
sja1105_xfer_buf(const struct sja1105_private * priv,sja1105_spi_rw_mode_t rw,u64 reg_addr,u8 * buf,size_t len)147 int sja1105_xfer_buf(const struct sja1105_private *priv,
148 		     sja1105_spi_rw_mode_t rw, u64 reg_addr,
149 		     u8 *buf, size_t len)
150 {
151 	return sja1105_xfer(priv, rw, reg_addr, buf, len, NULL);
152 }
153 
154 /* If @rw is:
155  * - SPI_WRITE: creates and sends an SPI write message at absolute
156  *		address reg_addr
157  * - SPI_READ:  creates and sends an SPI read message from absolute
158  *		address reg_addr
159  *
160  * The u64 *value is unpacked, meaning that it's stored in the native
161  * CPU endianness and directly usable by software running on the core.
162  */
sja1105_xfer_u64(const struct sja1105_private * priv,sja1105_spi_rw_mode_t rw,u64 reg_addr,u64 * value,struct ptp_system_timestamp * ptp_sts)163 int sja1105_xfer_u64(const struct sja1105_private *priv,
164 		     sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
165 		     struct ptp_system_timestamp *ptp_sts)
166 {
167 	u8 packed_buf[8];
168 	int rc;
169 
170 	if (rw == SPI_WRITE)
171 		sja1105_pack(packed_buf, value, 63, 0, 8);
172 
173 	rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 8, ptp_sts);
174 
175 	if (rw == SPI_READ)
176 		sja1105_unpack(packed_buf, value, 63, 0, 8);
177 
178 	return rc;
179 }
180 
181 /* Same as above, but transfers only a 4 byte word */
sja1105_xfer_u32(const struct sja1105_private * priv,sja1105_spi_rw_mode_t rw,u64 reg_addr,u32 * value,struct ptp_system_timestamp * ptp_sts)182 int sja1105_xfer_u32(const struct sja1105_private *priv,
183 		     sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
184 		     struct ptp_system_timestamp *ptp_sts)
185 {
186 	u8 packed_buf[4];
187 	u64 tmp;
188 	int rc;
189 
190 	if (rw == SPI_WRITE) {
191 		/* The packing API only supports u64 as CPU word size,
192 		 * so we need to convert.
193 		 */
194 		tmp = *value;
195 		sja1105_pack(packed_buf, &tmp, 31, 0, 4);
196 	}
197 
198 	rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 4, ptp_sts);
199 
200 	if (rw == SPI_READ) {
201 		sja1105_unpack(packed_buf, &tmp, 31, 0, 4);
202 		*value = tmp;
203 	}
204 
205 	return rc;
206 }
207 
sja1105et_reset_cmd(struct dsa_switch * ds)208 static int sja1105et_reset_cmd(struct dsa_switch *ds)
209 {
210 	struct sja1105_private *priv = ds->priv;
211 	const struct sja1105_regs *regs = priv->info->regs;
212 	u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
213 	const int size = SJA1105_SIZE_RESET_CMD;
214 	u64 cold_rst = 1;
215 
216 	sja1105_pack(packed_buf, &cold_rst, 3, 3, size);
217 
218 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
219 				SJA1105_SIZE_RESET_CMD);
220 }
221 
sja1105pqrs_reset_cmd(struct dsa_switch * ds)222 static int sja1105pqrs_reset_cmd(struct dsa_switch *ds)
223 {
224 	struct sja1105_private *priv = ds->priv;
225 	const struct sja1105_regs *regs = priv->info->regs;
226 	u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
227 	const int size = SJA1105_SIZE_RESET_CMD;
228 	u64 cold_rst = 1;
229 
230 	sja1105_pack(packed_buf, &cold_rst, 2, 2, size);
231 
232 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
233 				SJA1105_SIZE_RESET_CMD);
234 }
235 
sja1105_inhibit_tx(const struct sja1105_private * priv,unsigned long port_bitmap,bool tx_inhibited)236 int sja1105_inhibit_tx(const struct sja1105_private *priv,
237 		       unsigned long port_bitmap, bool tx_inhibited)
238 {
239 	const struct sja1105_regs *regs = priv->info->regs;
240 	u32 inhibit_cmd;
241 	int rc;
242 
243 	rc = sja1105_xfer_u32(priv, SPI_READ, regs->port_control,
244 			      &inhibit_cmd, NULL);
245 	if (rc < 0)
246 		return rc;
247 
248 	if (tx_inhibited)
249 		inhibit_cmd |= port_bitmap;
250 	else
251 		inhibit_cmd &= ~port_bitmap;
252 
253 	return sja1105_xfer_u32(priv, SPI_WRITE, regs->port_control,
254 				&inhibit_cmd, NULL);
255 }
256 
257 struct sja1105_status {
258 	u64 configs;
259 	u64 crcchkl;
260 	u64 ids;
261 	u64 crcchkg;
262 };
263 
264 /* This is not reading the entire General Status area, which is also
265  * divergent between E/T and P/Q/R/S, but only the relevant bits for
266  * ensuring that the static config upload procedure was successful.
267  */
sja1105_status_unpack(void * buf,struct sja1105_status * status)268 static void sja1105_status_unpack(void *buf, struct sja1105_status *status)
269 {
270 	/* So that addition translates to 4 bytes */
271 	u32 *p = buf;
272 
273 	/* device_id is missing from the buffer, but we don't
274 	 * want to diverge from the manual definition of the
275 	 * register addresses, so we'll back off one step with
276 	 * the register pointer, and never access p[0].
277 	 */
278 	p--;
279 	sja1105_unpack(p + 0x1, &status->configs,   31, 31, 4);
280 	sja1105_unpack(p + 0x1, &status->crcchkl,   30, 30, 4);
281 	sja1105_unpack(p + 0x1, &status->ids,       29, 29, 4);
282 	sja1105_unpack(p + 0x1, &status->crcchkg,   28, 28, 4);
283 }
284 
sja1105_status_get(struct sja1105_private * priv,struct sja1105_status * status)285 static int sja1105_status_get(struct sja1105_private *priv,
286 			      struct sja1105_status *status)
287 {
288 	const struct sja1105_regs *regs = priv->info->regs;
289 	u8 packed_buf[4];
290 	int rc;
291 
292 	rc = sja1105_xfer_buf(priv, SPI_READ, regs->status, packed_buf, 4);
293 	if (rc < 0)
294 		return rc;
295 
296 	sja1105_status_unpack(packed_buf, status);
297 
298 	return 0;
299 }
300 
301 /* Not const because unpacking priv->static_config into buffers and preparing
302  * for upload requires the recalculation of table CRCs and updating the
303  * structures with these.
304  */
static_config_buf_prepare_for_upload(struct sja1105_private * priv,void * config_buf,int buf_len)305 int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
306 					 void *config_buf, int buf_len)
307 {
308 	struct sja1105_static_config *config = &priv->static_config;
309 	struct sja1105_table_header final_header;
310 	sja1105_config_valid_t valid;
311 	char *final_header_ptr;
312 	int crc_len;
313 
314 	valid = sja1105_static_config_check_valid(config);
315 	if (valid != SJA1105_CONFIG_OK) {
316 		dev_err(&priv->spidev->dev,
317 			sja1105_static_config_error_msg[valid]);
318 		return -EINVAL;
319 	}
320 
321 	/* Write Device ID and config tables to config_buf */
322 	sja1105_static_config_pack(config_buf, config);
323 	/* Recalculate CRC of the last header (right now 0xDEADBEEF).
324 	 * Don't include the CRC field itself.
325 	 */
326 	crc_len = buf_len - 4;
327 	/* Read the whole table header */
328 	final_header_ptr = config_buf + buf_len - SJA1105_SIZE_TABLE_HEADER;
329 	sja1105_table_header_packing(final_header_ptr, &final_header, UNPACK);
330 	/* Modify */
331 	final_header.crc = sja1105_crc32(config_buf, crc_len);
332 	/* Rewrite */
333 	sja1105_table_header_packing(final_header_ptr, &final_header, PACK);
334 
335 	return 0;
336 }
337 
338 #define RETRIES 10
339 
sja1105_static_config_upload(struct sja1105_private * priv)340 int sja1105_static_config_upload(struct sja1105_private *priv)
341 {
342 	unsigned long port_bitmap = GENMASK_ULL(SJA1105_NUM_PORTS - 1, 0);
343 	struct sja1105_static_config *config = &priv->static_config;
344 	const struct sja1105_regs *regs = priv->info->regs;
345 	struct device *dev = &priv->spidev->dev;
346 	struct sja1105_status status;
347 	int rc, retries = RETRIES;
348 	u8 *config_buf;
349 	int buf_len;
350 
351 	buf_len = sja1105_static_config_get_length(config);
352 	config_buf = kcalloc(buf_len, sizeof(char), GFP_KERNEL);
353 	if (!config_buf)
354 		return -ENOMEM;
355 
356 	rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
357 	if (rc < 0) {
358 		dev_err(dev, "Invalid config, cannot upload\n");
359 		rc = -EINVAL;
360 		goto out;
361 	}
362 	/* Prevent PHY jabbering during switch reset by inhibiting
363 	 * Tx on all ports and waiting for current packet to drain.
364 	 * Otherwise, the PHY will see an unterminated Ethernet packet.
365 	 */
366 	rc = sja1105_inhibit_tx(priv, port_bitmap, true);
367 	if (rc < 0) {
368 		dev_err(dev, "Failed to inhibit Tx on ports\n");
369 		rc = -ENXIO;
370 		goto out;
371 	}
372 	/* Wait for an eventual egress packet to finish transmission
373 	 * (reach IFG). It is guaranteed that a second one will not
374 	 * follow, and that switch cold reset is thus safe
375 	 */
376 	usleep_range(500, 1000);
377 	do {
378 		/* Put the SJA1105 in programming mode */
379 		rc = priv->info->reset_cmd(priv->ds);
380 		if (rc < 0) {
381 			dev_err(dev, "Failed to reset switch, retrying...\n");
382 			continue;
383 		}
384 		/* Wait for the switch to come out of reset */
385 		usleep_range(1000, 5000);
386 		/* Upload the static config to the device */
387 		rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->config,
388 				      config_buf, buf_len);
389 		if (rc < 0) {
390 			dev_err(dev, "Failed to upload config, retrying...\n");
391 			continue;
392 		}
393 		/* Check that SJA1105 responded well to the config upload */
394 		rc = sja1105_status_get(priv, &status);
395 		if (rc < 0)
396 			continue;
397 
398 		if (status.ids == 1) {
399 			dev_err(dev, "Mismatch between hardware and static config "
400 				"device id. Wrote 0x%llx, wants 0x%llx\n",
401 				config->device_id, priv->info->device_id);
402 			continue;
403 		}
404 		if (status.crcchkl == 1) {
405 			dev_err(dev, "Switch reported invalid local CRC on "
406 				"the uploaded config, retrying...\n");
407 			continue;
408 		}
409 		if (status.crcchkg == 1) {
410 			dev_err(dev, "Switch reported invalid global CRC on "
411 				"the uploaded config, retrying...\n");
412 			continue;
413 		}
414 		if (status.configs == 0) {
415 			dev_err(dev, "Switch reported that configuration is "
416 				"invalid, retrying...\n");
417 			continue;
418 		}
419 		/* Success! */
420 		break;
421 	} while (--retries);
422 
423 	if (!retries) {
424 		rc = -EIO;
425 		dev_err(dev, "Failed to upload config to device, giving up\n");
426 		goto out;
427 	} else if (retries != RETRIES) {
428 		dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
429 	}
430 
431 out:
432 	kfree(config_buf);
433 	return rc;
434 }
435 
436 static struct sja1105_regs sja1105et_regs = {
437 	.device_id = 0x0,
438 	.prod_id = 0x100BC3,
439 	.status = 0x1,
440 	.port_control = 0x11,
441 	.vl_status = 0x10000,
442 	.config = 0x020000,
443 	.rgu = 0x100440,
444 	/* UM10944.pdf, Table 86, ACU Register overview */
445 	.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
446 	.pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
447 	.rmii_pll1 = 0x10000A,
448 	.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
449 	.mac = {0x200, 0x202, 0x204, 0x206, 0x208},
450 	.mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
451 	.mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
452 	/* UM10944.pdf, Table 78, CGU Register overview */
453 	.mii_tx_clk = {0x100013, 0x10001A, 0x100021, 0x100028, 0x10002F},
454 	.mii_rx_clk = {0x100014, 0x10001B, 0x100022, 0x100029, 0x100030},
455 	.mii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
456 	.mii_ext_rx_clk = {0x100019, 0x100020, 0x100027, 0x10002E, 0x100035},
457 	.rgmii_tx_clk = {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032},
458 	.rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
459 	.rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
460 	.ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
461 	.ptpschtm = 0x12, /* Spans 0x12 to 0x13 */
462 	.ptppinst = 0x14,
463 	.ptppindur = 0x16,
464 	.ptp_control = 0x17,
465 	.ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
466 	.ptpclkrate = 0x1A,
467 	.ptpclkcorp = 0x1D,
468 };
469 
470 static struct sja1105_regs sja1105pqrs_regs = {
471 	.device_id = 0x0,
472 	.prod_id = 0x100BC3,
473 	.status = 0x1,
474 	.port_control = 0x12,
475 	.vl_status = 0x10000,
476 	.config = 0x020000,
477 	.rgu = 0x100440,
478 	/* UM10944.pdf, Table 86, ACU Register overview */
479 	.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
480 	.pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
481 	.pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
482 	.sgmii = 0x1F0000,
483 	.rmii_pll1 = 0x10000A,
484 	.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
485 	.mac = {0x200, 0x202, 0x204, 0x206, 0x208},
486 	.mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
487 	.mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
488 	.ether_stats = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460},
489 	/* UM11040.pdf, Table 114 */
490 	.mii_tx_clk = {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B},
491 	.mii_rx_clk = {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C},
492 	.mii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
493 	.mii_ext_rx_clk = {0x100018, 0x10001E, 0x100024, 0x10002A, 0x100030},
494 	.rgmii_tx_clk = {0x100016, 0x10001C, 0x100022, 0x100028, 0x10002E},
495 	.rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D},
496 	.rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
497 	.qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
498 	.ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
499 	.ptpschtm = 0x13, /* Spans 0x13 to 0x14 */
500 	.ptppinst = 0x15,
501 	.ptppindur = 0x17,
502 	.ptp_control = 0x18,
503 	.ptpclkval = 0x19,
504 	.ptpclkrate = 0x1B,
505 	.ptpclkcorp = 0x1E,
506 	.ptpsyncts = 0x1F,
507 };
508 
509 const struct sja1105_info sja1105e_info = {
510 	.device_id		= SJA1105E_DEVICE_ID,
511 	.part_no		= SJA1105ET_PART_NO,
512 	.static_ops		= sja1105e_table_ops,
513 	.dyn_ops		= sja1105et_dyn_ops,
514 	.qinq_tpid		= ETH_P_8021Q,
515 	.ptp_ts_bits		= 24,
516 	.ptpegr_ts_bytes	= 4,
517 	.num_cbs_shapers	= SJA1105ET_MAX_CBS_COUNT,
518 	.reset_cmd		= sja1105et_reset_cmd,
519 	.fdb_add_cmd		= sja1105et_fdb_add,
520 	.fdb_del_cmd		= sja1105et_fdb_del,
521 	.ptp_cmd_packing	= sja1105et_ptp_cmd_packing,
522 	.regs			= &sja1105et_regs,
523 	.name			= "SJA1105E",
524 };
525 
526 const struct sja1105_info sja1105t_info = {
527 	.device_id		= SJA1105T_DEVICE_ID,
528 	.part_no		= SJA1105ET_PART_NO,
529 	.static_ops		= sja1105t_table_ops,
530 	.dyn_ops		= sja1105et_dyn_ops,
531 	.qinq_tpid		= ETH_P_8021Q,
532 	.ptp_ts_bits		= 24,
533 	.ptpegr_ts_bytes	= 4,
534 	.num_cbs_shapers	= SJA1105ET_MAX_CBS_COUNT,
535 	.reset_cmd		= sja1105et_reset_cmd,
536 	.fdb_add_cmd		= sja1105et_fdb_add,
537 	.fdb_del_cmd		= sja1105et_fdb_del,
538 	.ptp_cmd_packing	= sja1105et_ptp_cmd_packing,
539 	.regs			= &sja1105et_regs,
540 	.name			= "SJA1105T",
541 };
542 
543 const struct sja1105_info sja1105p_info = {
544 	.device_id		= SJA1105PR_DEVICE_ID,
545 	.part_no		= SJA1105P_PART_NO,
546 	.static_ops		= sja1105p_table_ops,
547 	.dyn_ops		= sja1105pqrs_dyn_ops,
548 	.qinq_tpid		= ETH_P_8021AD,
549 	.ptp_ts_bits		= 32,
550 	.ptpegr_ts_bytes	= 8,
551 	.num_cbs_shapers	= SJA1105PQRS_MAX_CBS_COUNT,
552 	.setup_rgmii_delay	= sja1105pqrs_setup_rgmii_delay,
553 	.reset_cmd		= sja1105pqrs_reset_cmd,
554 	.fdb_add_cmd		= sja1105pqrs_fdb_add,
555 	.fdb_del_cmd		= sja1105pqrs_fdb_del,
556 	.ptp_cmd_packing	= sja1105pqrs_ptp_cmd_packing,
557 	.regs			= &sja1105pqrs_regs,
558 	.name			= "SJA1105P",
559 };
560 
561 const struct sja1105_info sja1105q_info = {
562 	.device_id		= SJA1105QS_DEVICE_ID,
563 	.part_no		= SJA1105Q_PART_NO,
564 	.static_ops		= sja1105q_table_ops,
565 	.dyn_ops		= sja1105pqrs_dyn_ops,
566 	.qinq_tpid		= ETH_P_8021AD,
567 	.ptp_ts_bits		= 32,
568 	.ptpegr_ts_bytes	= 8,
569 	.num_cbs_shapers	= SJA1105PQRS_MAX_CBS_COUNT,
570 	.setup_rgmii_delay	= sja1105pqrs_setup_rgmii_delay,
571 	.reset_cmd		= sja1105pqrs_reset_cmd,
572 	.fdb_add_cmd		= sja1105pqrs_fdb_add,
573 	.fdb_del_cmd		= sja1105pqrs_fdb_del,
574 	.ptp_cmd_packing	= sja1105pqrs_ptp_cmd_packing,
575 	.regs			= &sja1105pqrs_regs,
576 	.name			= "SJA1105Q",
577 };
578 
579 const struct sja1105_info sja1105r_info = {
580 	.device_id		= SJA1105PR_DEVICE_ID,
581 	.part_no		= SJA1105R_PART_NO,
582 	.static_ops		= sja1105r_table_ops,
583 	.dyn_ops		= sja1105pqrs_dyn_ops,
584 	.qinq_tpid		= ETH_P_8021AD,
585 	.ptp_ts_bits		= 32,
586 	.ptpegr_ts_bytes	= 8,
587 	.num_cbs_shapers	= SJA1105PQRS_MAX_CBS_COUNT,
588 	.setup_rgmii_delay	= sja1105pqrs_setup_rgmii_delay,
589 	.reset_cmd		= sja1105pqrs_reset_cmd,
590 	.fdb_add_cmd		= sja1105pqrs_fdb_add,
591 	.fdb_del_cmd		= sja1105pqrs_fdb_del,
592 	.ptp_cmd_packing	= sja1105pqrs_ptp_cmd_packing,
593 	.regs			= &sja1105pqrs_regs,
594 	.name			= "SJA1105R",
595 };
596 
597 const struct sja1105_info sja1105s_info = {
598 	.device_id		= SJA1105QS_DEVICE_ID,
599 	.part_no		= SJA1105S_PART_NO,
600 	.static_ops		= sja1105s_table_ops,
601 	.dyn_ops		= sja1105pqrs_dyn_ops,
602 	.regs			= &sja1105pqrs_regs,
603 	.qinq_tpid		= ETH_P_8021AD,
604 	.ptp_ts_bits		= 32,
605 	.ptpegr_ts_bytes	= 8,
606 	.num_cbs_shapers	= SJA1105PQRS_MAX_CBS_COUNT,
607 	.setup_rgmii_delay	= sja1105pqrs_setup_rgmii_delay,
608 	.reset_cmd		= sja1105pqrs_reset_cmd,
609 	.fdb_add_cmd		= sja1105pqrs_fdb_add,
610 	.fdb_del_cmd		= sja1105pqrs_fdb_del,
611 	.ptp_cmd_packing	= sja1105pqrs_ptp_cmd_packing,
612 	.name			= "SJA1105S",
613 };
614