• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file is based on code from OCTEON SDK by Cavium Networks.
4  *
5  * Copyright (c) 2003-2007 Cavium Networks
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/netdevice.h>
10 #include <linux/interrupt.h>
11 #include <net/dst.h>
12 
13 #include "octeon-ethernet.h"
14 #include "ethernet-defines.h"
15 #include "ethernet-util.h"
16 
17 static int number_spi_ports;
18 static int need_retrain[2] = { 0, 0 };
19 
cvm_oct_spxx_int_pr(union cvmx_spxx_int_reg spx_int_reg,int index)20 static void cvm_oct_spxx_int_pr(union cvmx_spxx_int_reg spx_int_reg, int index)
21 {
22 	if (spx_int_reg.s.spf)
23 		pr_err("SPI%d: SRX Spi4 interface down\n", index);
24 	if (spx_int_reg.s.calerr)
25 		pr_err("SPI%d: SRX Spi4 Calendar table parity error\n", index);
26 	if (spx_int_reg.s.syncerr)
27 		pr_err("SPI%d: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n",
28 		       index);
29 	if (spx_int_reg.s.diperr)
30 		pr_err("SPI%d: SRX Spi4 DIP4 error\n", index);
31 	if (spx_int_reg.s.tpaovr)
32 		pr_err("SPI%d: SRX Selected port has hit TPA overflow\n",
33 		       index);
34 	if (spx_int_reg.s.rsverr)
35 		pr_err("SPI%d: SRX Spi4 reserved control word detected\n",
36 		       index);
37 	if (spx_int_reg.s.drwnng)
38 		pr_err("SPI%d: SRX Spi4 receive FIFO drowning/overflow\n",
39 		       index);
40 	if (spx_int_reg.s.clserr)
41 		pr_err("SPI%d: SRX Spi4 packet closed on non-16B alignment without EOP\n",
42 		       index);
43 	if (spx_int_reg.s.spiovr)
44 		pr_err("SPI%d: SRX Spi4 async FIFO overflow\n", index);
45 	if (spx_int_reg.s.abnorm)
46 		pr_err("SPI%d: SRX Abnormal packet termination (ERR bit)\n",
47 		       index);
48 	if (spx_int_reg.s.prtnxa)
49 		pr_err("SPI%d: SRX Port out of range\n", index);
50 }
51 
cvm_oct_stxx_int_pr(union cvmx_stxx_int_reg stx_int_reg,int index)52 static void cvm_oct_stxx_int_pr(union cvmx_stxx_int_reg stx_int_reg, int index)
53 {
54 	if (stx_int_reg.s.syncerr)
55 		pr_err("SPI%d: STX Interface encountered a fatal error\n",
56 		       index);
57 	if (stx_int_reg.s.frmerr)
58 		pr_err("SPI%d: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n",
59 		       index);
60 	if (stx_int_reg.s.unxfrm)
61 		pr_err("SPI%d: STX Unexpected framing sequence\n", index);
62 	if (stx_int_reg.s.nosync)
63 		pr_err("SPI%d: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n",
64 		       index);
65 	if (stx_int_reg.s.diperr)
66 		pr_err("SPI%d: STX DIP2 error on the Spi4 Status channel\n",
67 		       index);
68 	if (stx_int_reg.s.datovr)
69 		pr_err("SPI%d: STX Spi4 FIFO overflow error\n", index);
70 	if (stx_int_reg.s.ovrbst)
71 		pr_err("SPI%d: STX Transmit packet burst too big\n", index);
72 	if (stx_int_reg.s.calpar1)
73 		pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n",
74 		       index, 1);
75 	if (stx_int_reg.s.calpar0)
76 		pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n",
77 		       index, 0);
78 }
79 
cvm_oct_spi_spx_int(int index)80 static irqreturn_t cvm_oct_spi_spx_int(int index)
81 {
82 	union cvmx_spxx_int_reg spx_int_reg;
83 	union cvmx_stxx_int_reg stx_int_reg;
84 
85 	spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(index));
86 	cvmx_write_csr(CVMX_SPXX_INT_REG(index), spx_int_reg.u64);
87 	if (!need_retrain[index]) {
88 		spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(index));
89 		cvm_oct_spxx_int_pr(spx_int_reg, index);
90 	}
91 
92 	stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(index));
93 	cvmx_write_csr(CVMX_STXX_INT_REG(index), stx_int_reg.u64);
94 	if (!need_retrain[index]) {
95 		stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(index));
96 		cvm_oct_stxx_int_pr(stx_int_reg, index);
97 	}
98 
99 	cvmx_write_csr(CVMX_SPXX_INT_MSK(index), 0);
100 	cvmx_write_csr(CVMX_STXX_INT_MSK(index), 0);
101 	need_retrain[index] = 1;
102 
103 	return IRQ_HANDLED;
104 }
105 
cvm_oct_spi_rml_interrupt(int cpl,void * dev_id)106 static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
107 {
108 	irqreturn_t return_status = IRQ_NONE;
109 	union cvmx_npi_rsl_int_blocks rsl_int_blocks;
110 
111 	/* Check and see if this interrupt was caused by the GMX block */
112 	rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
113 	if (rsl_int_blocks.s.spx1) /* 19 - SPX1_INT_REG & STX1_INT_REG */
114 		return_status = cvm_oct_spi_spx_int(1);
115 
116 	if (rsl_int_blocks.s.spx0) /* 18 - SPX0_INT_REG & STX0_INT_REG */
117 		return_status = cvm_oct_spi_spx_int(0);
118 
119 	return return_status;
120 }
121 
cvm_oct_spi_enable_error_reporting(int interface)122 static void cvm_oct_spi_enable_error_reporting(int interface)
123 {
124 	union cvmx_spxx_int_msk spxx_int_msk;
125 	union cvmx_stxx_int_msk stxx_int_msk;
126 
127 	spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
128 	spxx_int_msk.s.calerr = 1;
129 	spxx_int_msk.s.syncerr = 1;
130 	spxx_int_msk.s.diperr = 1;
131 	spxx_int_msk.s.tpaovr = 1;
132 	spxx_int_msk.s.rsverr = 1;
133 	spxx_int_msk.s.drwnng = 1;
134 	spxx_int_msk.s.clserr = 1;
135 	spxx_int_msk.s.spiovr = 1;
136 	spxx_int_msk.s.abnorm = 1;
137 	spxx_int_msk.s.prtnxa = 1;
138 	cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
139 
140 	stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
141 	stxx_int_msk.s.frmerr = 1;
142 	stxx_int_msk.s.unxfrm = 1;
143 	stxx_int_msk.s.nosync = 1;
144 	stxx_int_msk.s.diperr = 1;
145 	stxx_int_msk.s.datovr = 1;
146 	stxx_int_msk.s.ovrbst = 1;
147 	stxx_int_msk.s.calpar1 = 1;
148 	stxx_int_msk.s.calpar0 = 1;
149 	cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
150 }
151 
cvm_oct_spi_poll(struct net_device * dev)152 static void cvm_oct_spi_poll(struct net_device *dev)
153 {
154 	static int spi4000_port;
155 	struct octeon_ethernet *priv = netdev_priv(dev);
156 	int interface;
157 
158 	for (interface = 0; interface < 2; interface++) {
159 		if ((priv->port == interface * 16) && need_retrain[interface]) {
160 			if (cvmx_spi_restart_interface
161 			    (interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) {
162 				need_retrain[interface] = 0;
163 				cvm_oct_spi_enable_error_reporting(interface);
164 			}
165 		}
166 
167 		/*
168 		 * The SPI4000 TWSI interface is very slow. In order
169 		 * not to bring the system to a crawl, we only poll a
170 		 * single port every second. This means negotiation
171 		 * speed changes take up to 10 seconds, but at least
172 		 * we don't waste absurd amounts of time waiting for
173 		 * TWSI.
174 		 */
175 		if (priv->port == spi4000_port) {
176 			/*
177 			 * This function does nothing if it is called on an
178 			 * interface without a SPI4000.
179 			 */
180 			cvmx_spi4000_check_speed(interface, priv->port);
181 			/*
182 			 * Normal ordering increments. By decrementing
183 			 * we only match once per iteration.
184 			 */
185 			spi4000_port--;
186 			if (spi4000_port < 0)
187 				spi4000_port = 10;
188 		}
189 	}
190 }
191 
cvm_oct_spi_init(struct net_device * dev)192 int cvm_oct_spi_init(struct net_device *dev)
193 {
194 	int r;
195 	struct octeon_ethernet *priv = netdev_priv(dev);
196 
197 	if (number_spi_ports == 0) {
198 		r = request_irq(OCTEON_IRQ_RML, cvm_oct_spi_rml_interrupt,
199 				IRQF_SHARED, "SPI", &number_spi_ports);
200 		if (r)
201 			return r;
202 	}
203 	number_spi_ports++;
204 
205 	if ((priv->port == 0) || (priv->port == 16)) {
206 		cvm_oct_spi_enable_error_reporting(INTERFACE(priv->port));
207 		priv->poll = cvm_oct_spi_poll;
208 	}
209 	cvm_oct_common_init(dev);
210 	return 0;
211 }
212 
cvm_oct_spi_uninit(struct net_device * dev)213 void cvm_oct_spi_uninit(struct net_device *dev)
214 {
215 	int interface;
216 
217 	cvm_oct_common_uninit(dev);
218 	number_spi_ports--;
219 	if (number_spi_ports == 0) {
220 		for (interface = 0; interface < 2; interface++) {
221 			cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
222 			cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
223 		}
224 		free_irq(OCTEON_IRQ_RML, &number_spi_ports);
225 	}
226 }
227