1 /** <at> file
2 * Support for PCIe Marvell Yukon gigabit ethernet adapter product family
3 *
4 * Copyright (c) 2011-2016, ARM Limited. All rights reserved.
5 *
6 * This program and the accompanying materials
7 * are licensed and made available under the terms and conditions of the BSD License
8 * which accompanies this distribution. The full text of the license may be found at
9 * http://opensource.org/licenses/bsd-license.php
10 *
11 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
13 *
14 **/
15
16 /******************************************************************************
17 *
18 * LICENSE:
19 * Copyright (C) Marvell International Ltd. and/or its affiliates
20 *
21 * The computer program files contained in this folder ("Files")
22 * are provided to you under the BSD-type license terms provided
23 * below, and any use of such Files and any derivative works
24 * thereof created by you shall be governed by the following terms
25 * and conditions:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above
30 * copyright notice, this list of conditions and the following
31 * disclaimer in the documentation and/or other materials provided
32 * with the distribution.
33 * - Neither the name of Marvell nor the names of its contributors
34 * may be used to endorse or promote products derived from this
35 * software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
40 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
41 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
42 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
43 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
44 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
46 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
47 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
48 * OF THE POSSIBILITY OF SUCH DAMAGE.
49 * /LICENSE
50 *
51 *****************************************************************************/
52
53 /*-
54 * Copyright (c) 1997, 1998, 1999, 2000
55 * Bill Paul <wpaul <at> ctr.columbia.edu>. All rights reserved.
56 *
57 * Redistribution and use in source and binary forms, with or without
58 * modification, are permitted provided that the following conditions
59 * are met:
60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution.
65 * 3. All advertising materials mentioning features or use of this software
66 * must display the following acknowledgement:
67 * This product includes software developed by Bill Paul.
68 * 4. Neither the name of the author nor the names of any co-contributors
69 * may be used to endorse or promote products derived from this software
70 * without specific prior written permission.
71 *
72 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
73 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
76 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
77 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
78 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
79 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
80 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
81 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
82 * THE POSSIBILITY OF SUCH DAMAGE.
83 */
84 /*-
85 * Copyright (c) 2003 Nathan L. Binkert <binkertn <at> umich.edu>
86 *
87 * Permission to use, copy, modify, and distribute this software for any
88 * purpose with or without fee is hereby granted, provided that the above
89 * copyright notice and this permission notice appear in all copies.
90 *
91 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
92 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
93 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
94 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
95 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
96 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
97 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
98 */
99
100 /*
101 * Device driver for the Marvell Yukon II Ethernet controller.
102 * Due to lack of documentation, this driver is based on the code from
103 * sk (4) and Marvell's myk (4) driver for FreeBSD 5.x.
104 */
105
106 #include <Base.h>
107 #include <Library/IoLib.h>
108 #include <Library/MemoryAllocationLib.h>
109 #include <Library/BaseMemoryLib.h>
110 #include <Library/DebugLib.h>
111 #include <Library/NetLib.h>
112 #include <Library/PcdLib.h>
113 #include <Library/BaseLib.h>
114 #include <Library/TimerLib.h>
115 #include <Library/UefiBootServicesTableLib.h>
116 #include <Protocol/PciIo.h>
117 #include <IndustryStandard/Pci.h>
118 #include <IndustryStandard/Acpi.h>
119 #include "miivar.h"
120 #include "if_media.h"
121 #include "if_mskreg.h"
122 #include "if_msk.h"
123
124 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
125
126 /*
127 * Devices supported by this driver.
128 */
129 static struct msk_product {
130 UINT16 msk_vendorid;
131 UINT16 msk_deviceid;
132 const CHAR8 *msk_name;
133 } msk_products[] = {
134 { VENDORID_SK, DEVICEID_SK_YUKON2, "SK-9Sxx Gigabit Ethernet" },
135 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, "SK-9Exx Gigabit Ethernet"},
136 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, "Marvell Yukon 88E8021CU Gigabit Ethernet" },
137 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
138 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, "Marvell Yukon 88E8022CU Gigabit Ethernet" },
139 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
140 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, "Marvell Yukon 88E8061CU Gigabit Ethernet" },
141 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
142 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, "Marvell Yukon 88E8062CU Gigabit Ethernet" },
143 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
144 { VENDORID_MARVELL, DEVICEID_MRVL_8035, "Marvell Yukon 88E8035 Fast Ethernet" },
145 { VENDORID_MARVELL, DEVICEID_MRVL_8036, "Marvell Yukon 88E8036 Fast Ethernet" },
146 { VENDORID_MARVELL, DEVICEID_MRVL_8038, "Marvell Yukon 88E8038 Fast Ethernet" },
147 { VENDORID_MARVELL, DEVICEID_MRVL_8039, "Marvell Yukon 88E8039 Fast Ethernet" },
148 { VENDORID_MARVELL, DEVICEID_MRVL_8040, "Marvell Yukon 88E8040 Fast Ethernet" },
149 { VENDORID_MARVELL, DEVICEID_MRVL_8040T, "Marvell Yukon 88E8040T Fast Ethernet" },
150 { VENDORID_MARVELL, DEVICEID_MRVL_8042, "Marvell Yukon 88E8042 Fast Ethernet" },
151 { VENDORID_MARVELL, DEVICEID_MRVL_8048, "Marvell Yukon 88E8048 Fast Ethernet" },
152 { VENDORID_MARVELL, DEVICEID_MRVL_4361, "Marvell Yukon 88E8050 Gigabit Ethernet" },
153 { VENDORID_MARVELL, DEVICEID_MRVL_4360, "Marvell Yukon 88E8052 Gigabit Ethernet" },
154 { VENDORID_MARVELL, DEVICEID_MRVL_4362, "Marvell Yukon 88E8053 Gigabit Ethernet" },
155 { VENDORID_MARVELL, DEVICEID_MRVL_4363, "Marvell Yukon 88E8055 Gigabit Ethernet" },
156 { VENDORID_MARVELL, DEVICEID_MRVL_4364, "Marvell Yukon 88E8056 Gigabit Ethernet" },
157 { VENDORID_MARVELL, DEVICEID_MRVL_4365, "Marvell Yukon 88E8070 Gigabit Ethernet" },
158 { VENDORID_MARVELL, DEVICEID_MRVL_436A, "Marvell Yukon 88E8058 Gigabit Ethernet" },
159 { VENDORID_MARVELL, DEVICEID_MRVL_436B, "Marvell Yukon 88E8071 Gigabit Ethernet" },
160 { VENDORID_MARVELL, DEVICEID_MRVL_436C, "Marvell Yukon 88E8072 Gigabit Ethernet" },
161 { VENDORID_MARVELL, DEVICEID_MRVL_4380, "Marvell Yukon 88E8057 Gigabit Ethernet" },
162 { VENDORID_MARVELL, DEVICEID_MRVL_4381, "Marvell Yukon 88E8059 Gigabit Ethernet" },
163 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, "D-Link 550SX Gigabit Ethernet" },
164 { VENDORID_DLINK, DEVICEID_DLINK_DGE560SX, "D-Link 560SX Gigabit Ethernet" },
165 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, "D-Link 560T Gigabit Ethernet" }
166 };
167
168 #ifndef MDEPKG_NDEBUG
169 static const CHAR8 *model_name[] = {
170 "Yukon XL",
171 "Yukon EC Ultra",
172 "Yukon EX",
173 "Yukon EC",
174 "Yukon FE",
175 "Yukon FE+",
176 "Yukon Supreme",
177 "Yukon Ultra 2",
178 "Yukon Unknown",
179 "Yukon Optima",
180 };
181 #endif
182
183 //
184 // Forward declarations
185 //
186 STATIC VOID mskc_setup_rambuffer (struct msk_softc *);
187 STATIC VOID mskc_reset (struct msk_softc *);
188
189 EFI_STATUS mskc_attach_if (struct msk_if_softc *, UINTN);
190 VOID mskc_detach_if (struct msk_if_softc *);
191
192 static VOID mskc_tick (IN EFI_EVENT, IN VOID*);
193 STATIC VOID msk_intr (struct msk_softc *);
194 static VOID msk_intr_phy (struct msk_if_softc *);
195 static VOID msk_intr_gmac (struct msk_if_softc *);
196 static __inline VOID msk_rxput (struct msk_if_softc *);
197 STATIC INTN msk_handle_events (struct msk_softc *);
198 static VOID msk_handle_hwerr (struct msk_if_softc *, UINT32);
199 STATIC VOID msk_intr_hwerr (struct msk_softc *);
200 static VOID msk_rxeof (struct msk_if_softc *, UINT32, UINT32, INTN);
201 static VOID msk_txeof (struct msk_if_softc *, INTN);
202 static EFI_STATUS msk_encap (struct msk_if_softc *, MSK_SYSTEM_BUF *);
203 STATIC VOID msk_start (struct msk_if_softc *);
204 STATIC VOID msk_set_prefetch (struct msk_if_softc *, INTN, EFI_PHYSICAL_ADDRESS, UINT32);
205 static VOID msk_set_rambuffer (struct msk_if_softc *);
206 static VOID msk_set_tx_stfwd (struct msk_if_softc *);
207 static EFI_STATUS msk_init (struct msk_if_softc *);
208 VOID mskc_stop_if (struct msk_if_softc *);
209 static VOID msk_phy_power (struct msk_softc *, INTN);
210 INTN msk_phy_readreg (struct msk_if_softc *, INTN);
211 INTN msk_phy_writereg (struct msk_if_softc *, INTN, INTN);
212 STATIC EFI_STATUS msk_status_dma_alloc (struct msk_softc *);
213 STATIC VOID msk_status_dma_free (struct msk_softc *);
214 static EFI_STATUS msk_txrx_dma_alloc (struct msk_if_softc *);
215 static VOID msk_txrx_dma_free (struct msk_if_softc *);
216 static EFI_STATUS msk_init_rx_ring (struct msk_if_softc *);
217 static VOID msk_init_tx_ring (struct msk_if_softc *);
218 static __inline VOID msk_discard_rxbuf (struct msk_if_softc *, INTN);
219 static EFI_STATUS msk_newbuf (struct msk_if_softc *, INTN);
220
221 static VOID msk_rxfilter (
222 struct msk_if_softc *sc_if,
223 UINT32 FilterFlags,
224 UINTN MCastFilterCnt,
225 EFI_MAC_ADDRESS *MCastFilter
226 );
227 static VOID msk_setvlan (struct msk_if_softc *);
228
229 static VOID msk_stats_clear (struct msk_if_softc *);
230 static VOID msk_stats_update (struct msk_if_softc *);
231 STATIC VOID clear_pci_errors (struct msk_softc *);
232
233 EFI_STATUS e1000_probe_and_attach (struct mii_data *, const struct msk_mii_data *, VOID *, VOID **);
234 VOID e1000phy_tick (VOID *);
235 VOID e1000phy_mediachg (VOID *);
236 EFI_STATUS e1000phy_detach (VOID *);
237
238 //
239 // Functions
240 //
241
242 INTN
msk_phy_readreg(struct msk_if_softc * sc_if,INTN reg)243 msk_phy_readreg (
244 struct msk_if_softc *sc_if,
245 INTN reg
246 )
247 {
248 INTN i;
249 INTN val;
250 INTN port;
251 struct msk_softc *sc;
252
253 sc = sc_if->msk_softc;
254 port = sc_if->msk_md.port;
255
256 GMAC_WRITE_2 (sc, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD (PHY_ADDR_MARV) | GM_SMI_CT_REG_AD (reg) | GM_SMI_CT_OP_RD);
257
258 for (i = 0; i < MSK_TIMEOUT; i++) {
259 gBS->Stall (1);
260 val = GMAC_READ_2 (sc, port, GM_SMI_CTRL);
261 if ((val & GM_SMI_CT_RD_VAL) != 0) {
262 val = GMAC_READ_2 (sc, port, GM_SMI_DATA);
263 break;
264 }
265 }
266
267 if (i == MSK_TIMEOUT) {
268 DEBUG ((EFI_D_NET, "Marvell Yukon: phy failed to come ready\n"));
269 val = 0;
270 }
271
272 return (val);
273 }
274
275 INTN
msk_phy_writereg(struct msk_if_softc * sc_if,INTN reg,INTN val)276 msk_phy_writereg (
277 struct msk_if_softc *sc_if,
278 INTN reg,
279 INTN val
280 )
281 {
282 INTN i;
283 INTN port;
284 struct msk_softc *sc;
285
286 sc = sc_if->msk_softc;
287 port = sc_if->msk_md.port;
288
289 GMAC_WRITE_2 (sc, port, GM_SMI_DATA, val);
290 GMAC_WRITE_2 (sc, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD (PHY_ADDR_MARV) | GM_SMI_CT_REG_AD (reg));
291 for (i = 0; i < MSK_TIMEOUT; i++) {
292 gBS->Stall (1);
293 if ((GMAC_READ_2 (sc, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY) == 0) {
294 break;
295 }
296 }
297 if (i == MSK_TIMEOUT) {
298 DEBUG ((EFI_D_NET, "Marvell Yukon: phy write timeout\n"));
299 }
300
301 return (0);
302 }
303
304 VOID
msk_miibus_statchg(struct msk_if_softc * sc_if)305 msk_miibus_statchg (
306 struct msk_if_softc *sc_if
307 )
308 {
309 struct mii_data *mii;
310 UINT32 gmac;
311 UINTN port;
312 struct msk_softc *sc;
313
314 sc = sc_if->msk_softc;
315 port = sc_if->msk_md.port;
316 mii = &sc_if->mii_d;
317 sc_if->msk_flags &= ~MSK_FLAG_LINK;
318
319 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE)) {
320
321 DEBUG ((EFI_D_NET, "Marvell Yukon: msk_miibus_statchg, phy is active\n"));
322 switch (IFM_SUBTYPE (mii->mii_media_active)) {
323 case IFM_10_T:
324 case IFM_100_TX:
325 sc_if->msk_flags |= MSK_FLAG_LINK;
326 break;
327 case IFM_1000_T:
328 case IFM_1000_SX:
329 case IFM_1000_LX:
330 case IFM_1000_CX:
331 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0) {
332 sc_if->msk_flags |= MSK_FLAG_LINK;
333 }
334 break;
335 default:
336 break;
337 }
338 }
339
340 if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
341 // Enable Tx FIFO Underrun
342 DEBUG ((EFI_D_NET, "Marvell Yukon: msk_miibus_statchg, link up\n"));
343
344 CSR_WRITE_1 (sc, MR_ADDR (port, GMAC_IRQ_MSK), GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
345 //
346 // Because mii(4) notify msk (4) that it detected link status
347 // change, there is no need to enable automatic
348 // speed/flow-control/duplex updates.
349 //
350 gmac = GM_GPCR_AU_ALL_DIS;
351 switch (IFM_SUBTYPE (mii->mii_media_active)) {
352 case IFM_1000_SX:
353 case IFM_1000_T:
354 gmac |= GM_GPCR_SPEED_1000;
355 break;
356 case IFM_100_TX:
357 gmac |= GM_GPCR_SPEED_100;
358 break;
359 case IFM_10_T:
360 break;
361 }
362
363 // Disable Rx flow control
364 if ((IFM_OPTIONS (mii->mii_media_active) & IFM_FLAG0) == 0) {
365 gmac |= GM_GPCR_FC_RX_DIS;
366 }
367 // Disable Tx flow control
368 if ((IFM_OPTIONS (mii->mii_media_active) & IFM_FLAG1) == 0) {
369 gmac |= GM_GPCR_FC_TX_DIS;
370 }
371 if ((IFM_OPTIONS (mii->mii_media_active) & IFM_FDX) != 0) {
372 gmac |= GM_GPCR_DUP_FULL;
373 } else {
374 gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
375 }
376 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
377 GMAC_WRITE_2 (sc, port, GM_GP_CTRL, gmac);
378 // Read again to ensure writing
379 GMAC_READ_2 (sc, port, GM_GP_CTRL);
380 gmac = GMC_PAUSE_OFF;
381 if ((IFM_OPTIONS (mii->mii_media_active) & IFM_FDX) != 0) {
382 if ((IFM_OPTIONS (mii->mii_media_active) & IFM_FLAG0) != 0) {
383 gmac = GMC_PAUSE_ON;
384 }
385 }
386 CSR_WRITE_4 (sc, MR_ADDR (port, GMAC_CTRL), gmac);
387
388 // Enable PHY interrupt for FIFO underrun/overflow
389 msk_phy_writereg (sc_if, PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
390 } else {
391 //
392 // Link state changed to down.
393 // Disable PHY interrupts.
394 //
395 DEBUG ((EFI_D_NET, "Marvell Yukon: msk_miibus_statchg, link down\n"));
396 msk_phy_writereg (sc_if, PHY_MARV_INT_MASK, 0);
397 // Disable Rx/Tx MAC
398 gmac = GMAC_READ_2 (sc, port, GM_GP_CTRL);
399 if ((GM_GPCR_RX_ENA | GM_GPCR_TX_ENA) != 0) {
400 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
401 GMAC_WRITE_2 (sc, port, GM_GP_CTRL, gmac);
402 // Read again to ensure writing
403 GMAC_READ_2 (sc, port, GM_GP_CTRL);
404 }
405 }
406 }
407
408 UINT32
ether_crc32_be(const UINT8 * buf,UINTN len)409 ether_crc32_be (
410 const UINT8 *buf,
411 UINTN len
412 )
413 {
414 UINTN i;
415 UINT32 crc;
416 UINT32 carry;
417 INTN bit;
418 UINT8 data;
419
420 crc = 0xffffffff; // initial value
421
422 for (i = 0; i < len; i++) {
423 for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
424 carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
425 crc <<= 1;
426 if (carry) {
427 crc = (crc ^ ETHER_CRC_POLY_BE) | carry;
428 }
429 }
430 }
431
432 return crc;
433 }
434
435 VOID
mskc_rxfilter(struct msk_if_softc * sc_if,UINT32 FilterFlags,UINTN MCastFilterCnt,EFI_MAC_ADDRESS * MCastFilter)436 mskc_rxfilter (
437 struct msk_if_softc *sc_if,
438 UINT32 FilterFlags,
439 UINTN MCastFilterCnt,
440 EFI_MAC_ADDRESS *MCastFilter
441 )
442 {
443 msk_rxfilter (sc_if, FilterFlags, MCastFilterCnt, MCastFilter);
444 }
445
446 static VOID
msk_rxfilter(struct msk_if_softc * sc_if,UINT32 FilterFlags,UINTN MCastFilterCnt,EFI_MAC_ADDRESS * MCastFilter)447 msk_rxfilter (
448 struct msk_if_softc *sc_if,
449 UINT32 FilterFlags,
450 UINTN MCastFilterCnt,
451 EFI_MAC_ADDRESS *MCastFilter
452 )
453 {
454 UINT32 mchash[2];
455 UINT32 crc;
456 UINT16 mode;
457 INTN port;
458 struct msk_softc *sc;
459
460 sc = sc_if->msk_softc;
461 port = sc_if->msk_md.port;
462
463 gBS->SetMem (mchash, sizeof (mchash), 0);
464 mode = GMAC_READ_2 (sc, port, GM_RX_CTRL);
465 if ((FilterFlags & EFI_SIMPLE_NETWORK_RECEIVE_PROMISCUOUS) != 0) {
466 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
467 }
468 else if ((FilterFlags & EFI_SIMPLE_NETWORK_RECEIVE_PROMISCUOUS_MULTICAST) != 0) {
469 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
470 mchash[0] = 0xffff;
471 mchash[1] = 0xffff;
472 } else {
473 mode |= GM_RXCR_UCF_ENA;
474 while (MCastFilterCnt-- > 0) {
475 crc = ether_crc32_be (MCastFilter[MCastFilterCnt].Addr, NET_ETHER_ADDR_LEN);
476 /* Just want the 6 least significant bits. */
477 crc &= 0x3f;
478 /* Set the corresponding bit in the hash table. */
479 mchash[crc >> 5] |= 1 << (crc & 0x1f);
480 }
481 if (mchash[0] != 0 || mchash[1] != 0) {
482 mode |= GM_RXCR_MCF_ENA;
483 }
484 }
485
486 GMAC_WRITE_2 (sc, port, GM_MC_ADDR_H1, mchash[0] & 0xffff );
487 GMAC_WRITE_2 (sc, port, GM_MC_ADDR_H2, (mchash[0] >> 16) & 0xffff );
488 GMAC_WRITE_2 (sc, port, GM_MC_ADDR_H3, mchash[1] & 0xffff );
489 GMAC_WRITE_2 (sc, port, GM_MC_ADDR_H4, (mchash[1] >> 16) & 0xffff );
490 GMAC_WRITE_2 (sc, port, GM_RX_CTRL, mode );
491 }
492
493 static
494 VOID
msk_setvlan(struct msk_if_softc * sc_if)495 msk_setvlan (
496 struct msk_if_softc *sc_if
497 )
498 {
499 //
500 // Disable automatic VLAN tagging/stripping
501 //
502 CSR_WRITE_4 (sc_if->msk_softc, MR_ADDR (sc_if->msk_md.port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
503 CSR_WRITE_4 (sc_if->msk_softc, MR_ADDR (sc_if->msk_md.port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
504 }
505
506 static
507 EFI_STATUS
msk_init_rx_ring(struct msk_if_softc * sc_if)508 msk_init_rx_ring (
509 struct msk_if_softc *sc_if
510 )
511 {
512 struct msk_ring_data *rd;
513 struct msk_rxdesc *rxd;
514 INTN i;
515 INTN prod;
516 INTN nbuf;
517 EFI_STATUS Status;
518
519 sc_if->msk_cdata.msk_rx_cons = 0;
520 sc_if->msk_cdata.msk_rx_prod = 0;
521 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
522
523 rd = &sc_if->msk_rdata;
524 gBS->SetMem (rd->msk_rx_ring, MSK_RX_RING_SZ, 0);
525 for (i = prod = 0; i < MSK_RX_RING_CNT; i++) {
526 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
527 gBS->SetMem (&rxd->rx_m, sizeof (MSK_DMA_BUF), 0);
528 rxd->rx_le = &rd->msk_rx_ring[prod];
529 MSK_INC (prod, MSK_RX_RING_CNT);
530 }
531 nbuf = MSK_RX_BUF_CNT;
532 prod = 0;
533
534 for (i = 0; i < nbuf; i++) {
535 Status = msk_newbuf (sc_if, prod);
536 if (EFI_ERROR (Status)) {
537 return Status;
538 }
539 MSK_RX_INC(prod, MSK_RX_RING_CNT);
540 }
541
542 // Update prefetch unit.
543 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
544 CSR_WRITE_2 (sc_if->msk_softc, Y2_PREF_Q_ADDR (sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
545
546 return EFI_SUCCESS;
547 }
548
549 STATIC
550 VOID
msk_init_tx_ring(struct msk_if_softc * sc_if)551 msk_init_tx_ring (
552 struct msk_if_softc *sc_if
553 )
554 {
555 struct msk_ring_data *rd;
556 struct msk_txdesc *txd;
557 INTN i;
558
559 sc_if->msk_cdata.msk_tx_prod = 0;
560 sc_if->msk_cdata.msk_tx_cons = 0;
561 sc_if->msk_cdata.msk_tx_cnt = 0;
562 sc_if->msk_cdata.msk_tx_high_addr = 0;
563
564 rd = &sc_if->msk_rdata;
565 gBS->SetMem (rd->msk_tx_ring, sizeof (struct msk_tx_desc) * MSK_TX_RING_CNT, 0);
566 for (i = 0; i < MSK_TX_RING_CNT; i++) {
567 txd = &sc_if->msk_cdata.msk_txdesc[i];
568 gBS->SetMem (&(txd->tx_m), sizeof (MSK_DMA_BUF), 0);
569 txd->tx_le = &rd->msk_tx_ring[i];
570 }
571 }
572
573 static
574 __inline
575 VOID
msk_discard_rxbuf(struct msk_if_softc * sc_if,INTN idx)576 msk_discard_rxbuf (
577 struct msk_if_softc *sc_if,
578 INTN idx
579 )
580 {
581 struct msk_rx_desc *rx_le;
582 struct msk_rxdesc *rxd;
583 MSK_DMA_BUF *DmaBuffer;
584
585 DEBUG ((EFI_D_NET, "Marvell Yukon: discard rxbuf\n"));
586
587 #ifdef MSK_64BIT_DMA
588 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
589 rx_le = rxd->rx_le;
590 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
591 MSK_INC(idx, MSK_RX_RING_CNT);
592 #endif
593
594 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
595 DmaBuffer = &rxd->rx_m;
596 rx_le = rxd->rx_le;
597 rx_le->msk_control = htole32 (DmaBuffer->Length | OP_PACKET | HW_OWNER);
598 }
599
600 static
601 EFI_STATUS
msk_newbuf(IN struct msk_if_softc * sc_if,IN INTN idx)602 msk_newbuf (
603 IN struct msk_if_softc *sc_if,
604 IN INTN idx
605 )
606 {
607 struct msk_rx_desc *rx_le;
608 struct msk_rxdesc *rxd;
609 UINTN Length;
610 VOID *Buffer;
611 VOID *Mapping;
612 EFI_PHYSICAL_ADDRESS PhysAddr;
613 EFI_PCI_IO_PROTOCOL *PciIo;
614 EFI_STATUS Status;
615
616 PciIo = sc_if->msk_softc->PciIo;
617 Length = MAX_SUPPORTED_PACKET_SIZE;
618
619 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
620
621 Status = gBS->AllocatePool (EfiBootServicesData, Length, &Buffer);
622 if (EFI_ERROR (Status)) {
623 return Status;
624 }
625 gBS->SetMem (Buffer, Length, 0);
626
627 Status = PciIo->Map (PciIo, EfiPciIoOperationBusMasterWrite, Buffer, &Length, &PhysAddr, &Mapping);
628 if (EFI_ERROR (Status)) {
629 gBS->FreePool (Buffer);
630 return Status;
631 }
632
633 #ifdef MSK_64BIT_DMA
634 rx_le = rxd->rx_le;
635 rx_le->msk_addr = htole32(MSK_ADDR_HI(PhysAddr));
636 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
637 MSK_INC(idx, MSK_RX_RING_CNT);
638 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
639 #endif
640
641 rxd->rx_m.DmaMapping = Mapping;
642 rxd->rx_m.Buf = Buffer;
643 rxd->rx_m.Length = Length;
644 rx_le = rxd->rx_le;
645 rx_le->msk_addr = htole32 (MSK_ADDR_LO (PhysAddr));
646 rx_le->msk_control = htole32 (Length | OP_PACKET | HW_OWNER);
647
648 return EFI_SUCCESS;
649 }
650
651 EFI_STATUS
mskc_probe(EFI_PCI_IO_PROTOCOL * PciIo)652 mskc_probe (
653 EFI_PCI_IO_PROTOCOL *PciIo
654 )
655 {
656 struct msk_product *mp;
657 UINT16 vendor;
658 UINT16 devid;
659 UINT32 PciID;
660 INTN i;
661 EFI_STATUS Status;
662
663 Status = PciIo->Pci.Read (
664 PciIo,
665 EfiPciIoWidthUint32,
666 PCI_VENDOR_ID_OFFSET,
667 1,
668 &PciID
669 );
670 if (EFI_ERROR (Status)) {
671 return EFI_UNSUPPORTED;
672 }
673
674 vendor = PciID & 0xFFFF;
675 devid = PciID >> 16;
676 mp = msk_products;
677 for (i = 0; i < sizeof (msk_products)/sizeof (msk_products[0]); i++, mp++) {
678 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
679 DEBUG ((EFI_D_NET, "Marvell Yukon: Probe found device %a\n", mp->msk_name));
680 return EFI_SUCCESS;
681 }
682 }
683 return EFI_UNSUPPORTED;
684 }
685
686 static
687 VOID
mskc_setup_rambuffer(struct msk_softc * sc)688 mskc_setup_rambuffer (
689 struct msk_softc *sc
690 )
691 {
692 INTN next;
693 INTN i;
694
695 /* Get adapter SRAM size. */
696 sc->msk_ramsize = CSR_READ_1 (sc, B2_E_0) * 4;
697 DEBUG ((DEBUG_NET, "Marvell Yukon: RAM buffer size : %dKB\n", sc->msk_ramsize));
698 if (sc->msk_ramsize == 0) {
699 return;
700 }
701
702 sc->msk_pflags |= MSK_FLAG_RAMBUF;
703 /*
704 * Give receiver 2/3 of memory and round down to the multiple
705 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
706 * of 1024.
707 */
708 sc->msk_rxqsize = (((sc->msk_ramsize * 1024 * 2) / 3) / 1024) * 1024;
709 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
710 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
711 sc->msk_rxqstart[i] = next;
712 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
713 next = sc->msk_rxqend[i] + 1;
714 sc->msk_txqstart[i] = next;
715 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
716 next = sc->msk_txqend[i] + 1;
717 DEBUG ((EFI_D_NET, "Marvell Yukon: Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
718 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], sc->msk_rxqend[i]));
719 DEBUG ((EFI_D_NET, "Marvell Yukon: Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
720 sc->msk_txqsize / 1024, sc->msk_txqstart[i], sc->msk_txqend[i]));
721 }
722 }
723
724 static
725 VOID
msk_phy_power(struct msk_softc * sc,INTN mode)726 msk_phy_power (
727 struct msk_softc *sc,
728 INTN mode
729 )
730 {
731 UINT32 our;
732 UINT32 val;
733 INTN i;
734
735 switch (mode) {
736 case MSK_PHY_POWERUP:
737 // Switch power to VCC (WA for VAUX problem)
738 CSR_WRITE_1 (sc, B0_POWER_CTRL, PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
739
740 // Disable Core Clock Division, set Clock Select to 0
741 CSR_WRITE_4 (sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
742
743 val = 0;
744 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
745 // Enable bits are inverted
746 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
747 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
748 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
749 }
750 //
751 // Enable PCI & Core Clock, enable clock gating for both Links.
752 //
753 CSR_WRITE_1 (sc, B2_Y2_CLK_GATE, val);
754
755 val = CSR_PCI_READ_4 (sc, PCI_OUR_REG_1);
756 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
757 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
758 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
759 // Deassert Low Power for 1st PHY
760 val |= PCI_Y2_PHY1_COMA;
761 if (sc->msk_num_port > 1) {
762 val |= PCI_Y2_PHY2_COMA;
763 }
764 }
765 }
766 // Release PHY from PowerDown/COMA mode
767 CSR_PCI_WRITE_4 (sc, PCI_OUR_REG_1, val);
768
769 switch (sc->msk_hw_id) {
770 case CHIP_ID_YUKON_EC_U:
771 case CHIP_ID_YUKON_EX:
772 case CHIP_ID_YUKON_FE_P:
773 case CHIP_ID_YUKON_UL_2:
774 case CHIP_ID_YUKON_OPT:
775 CSR_WRITE_2 (sc, B0_CTST, Y2_HW_WOL_OFF);
776
777 // Enable all clocks
778 CSR_PCI_WRITE_4 (sc, PCI_OUR_REG_3, 0);
779 our = CSR_PCI_READ_4 (sc, PCI_OUR_REG_4);
780 our &= (PCI_FORCE_ASPM_REQUEST | PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY | PCI_ASPM_CLKRUN_REQUEST);
781 // Set all bits to 0 except bits 15..12
782 CSR_PCI_WRITE_4 (sc, PCI_OUR_REG_4, our);
783 our = CSR_PCI_READ_4 (sc, PCI_OUR_REG_5);
784 our &= PCI_CTL_TIM_VMAIN_AV_MSK;
785 CSR_PCI_WRITE_4 (sc, PCI_OUR_REG_5, our);
786 CSR_PCI_WRITE_4 (sc, PCI_CFG_REG_1, 0);
787 //
788 // Disable status race, workaround for
789 // Yukon EC Ultra & Yukon EX.
790 //
791 val = CSR_READ_4 (sc, B2_GP_IO);
792 val |= GLB_GPIO_STAT_RACE_DIS;
793 CSR_WRITE_4 (sc, B2_GP_IO, val);
794 CSR_READ_4 (sc, B2_GP_IO);
795 break;
796 default:
797 break;
798 }
799 for (i = 0; i < sc->msk_num_port; i++) {
800 CSR_WRITE_2 (sc, MR_ADDR (i, GMAC_LINK_CTRL), GMLC_RST_SET);
801 CSR_WRITE_2 (sc, MR_ADDR (i, GMAC_LINK_CTRL), GMLC_RST_CLR);
802 }
803 break;
804 case MSK_PHY_POWERDOWN:
805 val = CSR_PCI_READ_4 (sc, PCI_OUR_REG_1);
806 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
807 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
808 val &= ~PCI_Y2_PHY1_COMA;
809 if (sc->msk_num_port > 1) {
810 val &= ~PCI_Y2_PHY2_COMA;
811 }
812 }
813 CSR_PCI_WRITE_4 (sc, PCI_OUR_REG_1, val);
814
815 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
816 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
817 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
818 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
819 // Enable bits are inverted
820 val = 0;
821 }
822 //
823 // Disable PCI & Core Clock, disable clock gating for
824 // both Links.
825 //
826 CSR_WRITE_1 (sc, B2_Y2_CLK_GATE, val);
827 CSR_WRITE_1 (sc, B0_POWER_CTRL, PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
828 break;
829 default:
830 break;
831 }
832 }
833
834 static
835 VOID
clear_pci_errors(struct msk_softc * sc)836 clear_pci_errors (
837 struct msk_softc *sc
838 )
839 {
840 EFI_STATUS Status;
841 UINT16 val;
842 EFI_PCI_IO_PROTOCOL *PciIo;
843
844 PciIo = sc->PciIo;
845
846 // Clear all error bits in the PCI status register.
847 Status = PciIo->Pci.Read (
848 PciIo,
849 EfiPciIoWidthUint16,
850 PCI_PRIMARY_STATUS_OFFSET,
851 1,
852 &val
853 );
854 if (EFI_ERROR (Status)) {
855 DEBUG ((EFI_D_ERROR, "Marvell Yukon: Warning - Reading PCI Status failed: %r", Status));
856 }
857 CSR_WRITE_1 (sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
858 val |= PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
859 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT;
860 Status = PciIo->Pci.Write (
861 PciIo,
862 EfiPciIoWidthUint16,
863 PCI_PRIMARY_STATUS_OFFSET,
864 1,
865 &val
866 );
867 if (EFI_ERROR (Status)) {
868 DEBUG ((EFI_D_ERROR, "Marvell Yukon: Warning - Writing PCI Status failed: %r", Status));
869 }
870 CSR_WRITE_2 (sc, B0_CTST, CS_MRST_CLR);
871 }
872
873 static
874 VOID
mskc_reset(struct msk_softc * sc)875 mskc_reset (
876 struct msk_softc *sc
877 )
878 {
879 EFI_STATUS Status;
880 EFI_PHYSICAL_ADDRESS PhysAddr;
881 UINT16 status;
882 UINT32 val;
883 INTN i;
884 EFI_PCI_IO_PROTOCOL *PciIo;
885
886 PciIo = sc->PciIo;
887
888 CSR_WRITE_2 (sc, B0_CTST, CS_RST_CLR);
889
890 // Disable ASF
891 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
892 status = CSR_READ_2 (sc, B28_Y2_ASF_HCU_CCSR);
893 // Clear AHB bridge & microcontroller reset
894 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST | Y2_ASF_HCU_CCSR_CPU_RST_MODE);
895 // Clear ASF microcontroller state
896 status &= ~ Y2_ASF_HCU_CCSR_UC_STATE_MSK;
897 CSR_WRITE_2 (sc, B28_Y2_ASF_HCU_CCSR, status);
898 } else {
899 CSR_WRITE_1 (sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
900 }
901 CSR_WRITE_2 (sc, B0_CTST, Y2_ASF_DISABLE);
902
903 //
904 // Since we disabled ASF, S/W reset is required for Power Management.
905 //
906 CSR_WRITE_2 (sc, B0_CTST, CS_RST_SET);
907 CSR_WRITE_2 (sc, B0_CTST, CS_RST_CLR);
908
909 clear_pci_errors (sc);
910 switch (sc->msk_bustype) {
911 case MSK_PEX_BUS:
912 // Clear all PEX errors
913 CSR_PCI_WRITE_4 (sc, PEX_UNC_ERR_STAT, 0xffffffff);
914 val = CSR_PCI_READ_4 (sc, PEX_UNC_ERR_STAT);
915 if ((val & PEX_RX_OV) != 0) {
916 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
917 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
918 }
919 break;
920 case MSK_PCI_BUS:
921 case MSK_PCIX_BUS:
922 // Set Cache Line Size to 2 (8bytes) if configured to 0
923 Status = PciIo->Pci.Read (
924 PciIo,
925 EfiPciIoWidthUint8,
926 PCI_CACHELINE_SIZE_OFFSET,
927 1,
928 &val
929 );
930 if (EFI_ERROR (Status)) {
931 DEBUG ((EFI_D_ERROR, "Marvell Yukon: Warning - Reading PCI cache line size failed: %r", Status));
932 }
933 if (val == 0) {
934 val = 2;
935 Status = PciIo->Pci.Write (
936 PciIo,
937 EfiPciIoWidthUint8,
938 PCI_CACHELINE_SIZE_OFFSET,
939 1,
940 &val
941 );
942 if (EFI_ERROR (Status)) {
943 DEBUG ((EFI_D_ERROR, "Marvell Yukon: Warning - Writing PCI cache line size failed: %r", Status));
944 }
945 }
946 if (sc->msk_bustype == MSK_PCIX_BUS) {
947 Status = PciIo->Pci.Read (
948 PciIo,
949 EfiPciIoWidthUint32,
950 PCI_OUR_REG_1,
951 1,
952 &val
953 );
954 if (EFI_ERROR (Status)) {
955 DEBUG ((EFI_D_ERROR, "Marvell Yukon: Warning - Reading Our Reg 1 failed: %r", Status));
956 }
957 val |= PCI_CLS_OPT;
958 Status = PciIo->Pci.Write (
959 PciIo,
960 EfiPciIoWidthUint32,
961 PCI_OUR_REG_1,
962 1,
963 &val
964 );
965 if (EFI_ERROR (Status)) {
966 DEBUG ((EFI_D_ERROR, "Marvell Yukon: Warning - Writing Our Reg 1 failed: %r", Status));
967 }
968 }
969 break;
970 }
971
972 // Set PHY power state
973 msk_phy_power (sc, MSK_PHY_POWERUP);
974
975 // Reset GPHY/GMAC Control
976 for (i = 0; i < sc->msk_num_port; i++) {
977 // GPHY Control reset
978 CSR_WRITE_4 (sc, MR_ADDR (i, GPHY_CTRL), GPC_RST_SET);
979 CSR_WRITE_4 (sc, MR_ADDR (i, GPHY_CTRL), GPC_RST_CLR);
980 if (sc->msk_hw_id == CHIP_ID_YUKON_UL_2) {
981 // Magic value observed under Linux.
982 CSR_WRITE_4 (sc, MR_ADDR (i, GPHY_CTRL), 0x00105226);
983 }
984 // GMAC Control reset
985 CSR_WRITE_4 (sc, MR_ADDR (i, GMAC_CTRL), GMC_RST_SET);
986 CSR_WRITE_4 (sc, MR_ADDR (i, GMAC_CTRL), GMC_RST_CLR);
987 CSR_WRITE_4 (sc, MR_ADDR (i, GMAC_CTRL), GMC_F_LOOPB_OFF);
988 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
989 CSR_WRITE_4 (sc, MR_ADDR (i, GMAC_CTRL), GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | GMC_BYP_RETR_ON);
990 }
991 }
992 if ((sc->msk_hw_id == CHIP_ID_YUKON_OPT) && (sc->msk_hw_rev == 0)) {
993 // Disable PCIe PHY powerdown (reg 0x80, bit7)
994 CSR_WRITE_4 (sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
995 }
996 CSR_WRITE_1 (sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
997
998 // LED On
999 CSR_WRITE_2 (sc, B0_CTST, Y2_LED_STAT_ON);
1000
1001 // Enable plug in go
1002 CSR_WRITE_2 (sc, B0_CTST, Y_ULTRA_2_PLUG_IN_GO_EN);
1003
1004 // Clear TWSI IRQ
1005 CSR_WRITE_4 (sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1006
1007 // Turn off hardware timer
1008 CSR_WRITE_1 (sc, B2_TI_CTRL, TIM_STOP);
1009 CSR_WRITE_1 (sc, B2_TI_CTRL, TIM_CLR_IRQ);
1010
1011 // Turn off descriptor polling
1012 CSR_WRITE_1 (sc, B28_DPT_CTRL, DPT_STOP);
1013
1014 // Turn off time stamps
1015 CSR_WRITE_1 (sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1016 CSR_WRITE_1 (sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1017
1018 // Configure timeout values
1019 for (i = 0; i < sc->msk_num_port; i++) {
1020 CSR_WRITE_2 (sc, SELECT_RAM_BUFFER (i, B3_RI_CTRL), RI_RST_SET);
1021 CSR_WRITE_2 (sc, SELECT_RAM_BUFFER (i, B3_RI_CTRL), RI_RST_CLR);
1022 CSR_WRITE_1 (sc, SELECT_RAM_BUFFER (i, B3_RI_WTO_R1), MSK_RI_TO_53);
1023 CSR_WRITE_1 (sc, SELECT_RAM_BUFFER (i, B3_RI_WTO_XA1), MSK_RI_TO_53);
1024 CSR_WRITE_1 (sc, SELECT_RAM_BUFFER (i, B3_RI_WTO_XS1), MSK_RI_TO_53);
1025 CSR_WRITE_1 (sc, SELECT_RAM_BUFFER (i, B3_RI_RTO_R1), MSK_RI_TO_53);
1026 CSR_WRITE_1 (sc, SELECT_RAM_BUFFER (i, B3_RI_RTO_XA1), MSK_RI_TO_53);
1027 CSR_WRITE_1 (sc, SELECT_RAM_BUFFER (i, B3_RI_RTO_XS1), MSK_RI_TO_53);
1028 CSR_WRITE_1 (sc, SELECT_RAM_BUFFER (i, B3_RI_WTO_R2), MSK_RI_TO_53);
1029 CSR_WRITE_1 (sc, SELECT_RAM_BUFFER (i, B3_RI_WTO_XA2), MSK_RI_TO_53);
1030 CSR_WRITE_1 (sc, SELECT_RAM_BUFFER (i, B3_RI_WTO_XS2), MSK_RI_TO_53);
1031 CSR_WRITE_1 (sc, SELECT_RAM_BUFFER (i, B3_RI_RTO_R2), MSK_RI_TO_53);
1032 CSR_WRITE_1 (sc, SELECT_RAM_BUFFER (i, B3_RI_RTO_XA2), MSK_RI_TO_53);
1033 CSR_WRITE_1 (sc, SELECT_RAM_BUFFER (i, B3_RI_RTO_XS2), MSK_RI_TO_53);
1034 }
1035
1036 // Disable all interrupts
1037 CSR_WRITE_4 (sc, B0_HWE_IMSK, 0);
1038 CSR_READ_4 (sc, B0_HWE_IMSK);
1039 CSR_WRITE_4 (sc, B0_IMSK, 0);
1040 CSR_READ_4 (sc, B0_IMSK);
1041
1042 // Clear status list
1043 gBS->SetMem (sc->msk_stat_ring, sizeof (struct msk_stat_desc) * MSK_STAT_RING_CNT, 0);
1044 sc->msk_stat_cons = 0;
1045 CSR_WRITE_4 (sc, STAT_CTRL, SC_STAT_RST_SET);
1046 CSR_WRITE_4 (sc, STAT_CTRL, SC_STAT_RST_CLR);
1047
1048 // Set the status list base address
1049 PhysAddr = sc->msk_stat_ring_paddr;
1050 CSR_WRITE_4 (sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO (PhysAddr));
1051 CSR_WRITE_4 (sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI (PhysAddr));
1052
1053 // Set the status list last index
1054 CSR_WRITE_2 (sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1055 if ((sc->msk_hw_id == CHIP_ID_YUKON_EC) && (sc->msk_hw_rev == CHIP_REV_YU_EC_A1)) {
1056 // WA for dev. #4.3
1057 CSR_WRITE_2 (sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1058 // WA for dev. #4.18
1059 CSR_WRITE_1 (sc, STAT_FIFO_WM, 0x21);
1060 CSR_WRITE_1 (sc, STAT_FIFO_ISR_WM, 0x07);
1061 } else {
1062 CSR_WRITE_2 (sc, STAT_TX_IDX_TH, 0x0a);
1063 CSR_WRITE_1 (sc, STAT_FIFO_WM, 0x10);
1064 if ((sc->msk_hw_id == CHIP_ID_YUKON_XL) && (sc->msk_hw_rev == CHIP_REV_YU_XL_A0)) {
1065 CSR_WRITE_1 (sc, STAT_FIFO_ISR_WM, 0x04);
1066 } else {
1067 CSR_WRITE_1 (sc, STAT_FIFO_ISR_WM, 0x10);
1068 }
1069 CSR_WRITE_4 (sc, STAT_ISR_TIMER_INI, 0x0190);
1070 }
1071 //
1072 // Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1073 //
1074 CSR_WRITE_4 (sc, STAT_TX_TIMER_INI, MSK_USECS (sc, 1000));
1075
1076 // Enable status unit
1077 CSR_WRITE_4 (sc, STAT_CTRL, SC_STAT_OP_ON);
1078
1079 CSR_WRITE_1 (sc, STAT_TX_TIMER_CTRL, TIM_START);
1080 CSR_WRITE_1 (sc, STAT_LEV_TIMER_CTRL, TIM_START);
1081 CSR_WRITE_1 (sc, STAT_ISR_TIMER_CTRL, TIM_START);
1082 }
1083
1084 EFI_STATUS
mskc_attach_if(struct msk_if_softc * sc_if,UINTN Port)1085 mskc_attach_if (
1086 struct msk_if_softc *sc_if,
1087 UINTN Port
1088 )
1089 {
1090 INTN i;
1091 EFI_STATUS Status;
1092
1093 sc_if->msk_md.port = Port;
1094 sc_if->msk_flags = sc_if->msk_softc->msk_pflags;
1095
1096 // Setup Tx/Rx queue register offsets
1097 if (Port == MSK_PORT_A) {
1098 sc_if->msk_txq = Q_XA1;
1099 sc_if->msk_txsq = Q_XS1;
1100 sc_if->msk_rxq = Q_R1;
1101 } else {
1102 sc_if->msk_txq = Q_XA2;
1103 sc_if->msk_txsq = Q_XS2;
1104 sc_if->msk_rxq = Q_R2;
1105 }
1106
1107 Status = msk_txrx_dma_alloc (sc_if);
1108 if (EFI_ERROR (Status)) {
1109 return Status;
1110 }
1111
1112 /*
1113 * Get station address for this interface. Note that
1114 * dual port cards actually come with three station
1115 * addresses: one for each port, plus an extra. The
1116 * extra one is used by the SysKonnect driver software
1117 * as a 'virtual' station address for when both ports
1118 * are operating in failover mode. Currently we don't
1119 * use this extra address.
1120 */
1121 for (i = 0; i < NET_ETHER_ADDR_LEN; i++) {
1122 sc_if->MacAddress.Addr[i] = CSR_READ_1 (sc_if->msk_softc, B2_MAC_1 + (Port * 8) + i);
1123 }
1124
1125 DEBUG ((EFI_D_NET,"Marvell Yukon: Mac Address %02x:%02x:%02x:%02x:%02x:%02x\n",
1126 sc_if->MacAddress.Addr[0], sc_if->MacAddress.Addr[1], sc_if->MacAddress.Addr[2],
1127 sc_if->MacAddress.Addr[3], sc_if->MacAddress.Addr[4], sc_if->MacAddress.Addr[5]));
1128
1129 Status = e1000_probe_and_attach (&sc_if->mii_d, &sc_if->msk_md, sc_if, &sc_if->phy_softc);
1130 if (EFI_ERROR (Status)) {
1131 return Status;
1132 }
1133
1134 InitializeListHead (&sc_if->TransmitQueueHead);
1135 InitializeListHead (&sc_if->TransmitFreeQueueHead);
1136 InitializeListHead (&sc_if->ReceiveQueueHead);
1137 sc_if->active = TRUE;
1138
1139 return (Status);
1140 }
1141
1142 /*
1143 * Attach the interface. Allocate softc structures, do ifmedia
1144 * setup and ethernet/BPF attach.
1145 */
1146 EFI_STATUS
mskc_attach(IN EFI_PCI_IO_PROTOCOL * PciIo,OUT struct msk_softc ** ScData)1147 mskc_attach (
1148 IN EFI_PCI_IO_PROTOCOL *PciIo,
1149 OUT struct msk_softc **ScData
1150 )
1151 {
1152 struct msk_mii_data *mmd;
1153 UINT64 Supports;
1154 UINT8 *PciBarResources;
1155 EFI_STATUS Status;
1156 struct msk_if_softc *ScIf;
1157 struct msk_softc *sc;
1158
1159 Status = gBS->AllocatePool (EfiBootServicesData,
1160 sizeof (struct msk_softc),
1161 (VOID**) &sc);
1162 if (EFI_ERROR (Status)) {
1163 return Status;
1164 }
1165
1166 //
1167 // Save original PCI attributes
1168 //
1169 gBS->SetMem (sc, sizeof (struct msk_softc), 0);
1170 sc->PciIo = PciIo;
1171 Status = PciIo->Attributes (
1172 PciIo,
1173 EfiPciIoAttributeOperationGet,
1174 0,
1175 &sc->OriginalPciAttributes
1176 );
1177 if (EFI_ERROR (Status)) {
1178 gBS->FreePool (sc);
1179 return Status;
1180 }
1181
1182 Status = PciIo->Attributes (
1183 PciIo,
1184 EfiPciIoAttributeOperationSupported,
1185 0,
1186 &Supports
1187 );
1188 if (!EFI_ERROR (Status)) {
1189 Supports &= EFI_PCI_DEVICE_ENABLE;
1190 Status = PciIo->Attributes (
1191 PciIo,
1192 EfiPciIoAttributeOperationEnable,
1193 Supports | EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE,
1194 NULL
1195 );
1196 }
1197 if (EFI_ERROR (Status)) {
1198 DEBUG ((EFI_D_ERROR, "Marvell Yukon: Failed to enable NIC controller\n"));
1199 goto RESTORE_PCI_ATTRIBS;
1200 }
1201
1202 Status = PciIo->GetBarAttributes (PciIo, 0, &Supports, (VOID**)&PciBarResources);
1203 if (!EFI_ERROR (Status) && (((EFI_ACPI_ADDRESS_SPACE_DESCRIPTOR *)PciBarResources)->Desc == ACPI_ADDRESS_SPACE_DESCRIPTOR)) {
1204 if (((EFI_ACPI_ADDRESS_SPACE_DESCRIPTOR *)PciBarResources)->ResType == ACPI_ADDRESS_SPACE_TYPE_MEM) {
1205 if (!(((EFI_ACPI_ADDRESS_SPACE_DESCRIPTOR *)PciBarResources)->SpecificFlag & ACPI_SPECFLAG_PREFETCHABLE)) {
1206 sc->RegBase = ((EFI_ACPI_ADDRESS_SPACE_DESCRIPTOR *)PciBarResources)->AddrRangeMin;
1207 // Should assert that Bar is 32 bits wide
1208 DEBUG ((DEBUG_NET, "Marvell Yukon: GlobalRegistersBase = 0x%x\n", sc->RegBase));
1209 } else {
1210 Status = EFI_NOT_FOUND;
1211 }
1212 } else {
1213 Status = EFI_NOT_FOUND;
1214 }
1215 }
1216 if (EFI_ERROR (Status)) {
1217 goto RESTORE_PCI_ATTRIBS;
1218 }
1219
1220 // Clear Software Reset
1221 CSR_WRITE_2 (sc, B0_CTST, CS_RST_CLR);
1222
1223 // Get Hardware ID & Revision
1224 sc->msk_hw_id = CSR_READ_1 (sc, B2_CHIP_ID);
1225 sc->msk_hw_rev = (CSR_READ_1 (sc, B2_MAC_CFG) >> 4) & 0x0f;
1226
1227 // Bail out if chip is not recognized
1228 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1229 sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
1230 sc->msk_hw_id == CHIP_ID_YUKON_SUPR ||
1231 sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
1232 DEBUG ((DEBUG_NET, "Marvell Yukon: unknown device: id=0x%02x, rev=0x%02x\n", sc->msk_hw_id, sc->msk_hw_rev));
1233 Status = EFI_DEVICE_ERROR;
1234 goto RESTORE_PCI_ATTRIBS;
1235 }
1236 DEBUG ((EFI_D_NET, "Marvell Yukon: Marvell Technology Group Ltd. %a Id:0x%02x Rev:0x%02x\n",
1237 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, sc->msk_hw_rev));
1238
1239 sc->msk_process_limit = MSK_PROC_DEFAULT;
1240 sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT;
1241
1242 // Check if MAC address is valid
1243 if ((CSR_READ_4 (sc, B2_MAC_1) == 0) && (CSR_READ_4 (sc, B2_MAC_1+4) == 0)) {
1244 DEBUG ((EFI_D_NET, "Marvell Yukon: MAC address is invalid (00:00:00:00:00:00)\n"));
1245 }
1246
1247 // Soft reset
1248 CSR_WRITE_2 (sc, B0_CTST, CS_RST_SET);
1249 CSR_WRITE_2 (sc, B0_CTST, CS_RST_CLR);
1250 sc->msk_pmd = CSR_READ_1 (sc, B2_PMD_TYP);
1251
1252 // Check number of MACs
1253 sc->msk_num_port = 1;
1254 if ((CSR_READ_1 (sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
1255 if (!(CSR_READ_1 (sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) {
1256 sc->msk_num_port++;
1257 }
1258 }
1259
1260 /* Check bus type. */
1261 sc->msk_bustype = MSK_PEX_BUS; /* Only support PCI Express */
1262 sc->msk_expcap = 1;
1263
1264 switch (sc->msk_hw_id) {
1265 case CHIP_ID_YUKON_EC:
1266 sc->msk_clock = 125; /* 125 MHz */
1267 sc->msk_pflags |= MSK_FLAG_JUMBO;
1268 break;
1269 case CHIP_ID_YUKON_EC_U:
1270 sc->msk_clock = 125; /* 125 MHz */
1271 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
1272 break;
1273 case CHIP_ID_YUKON_EX:
1274 sc->msk_clock = 125; /* 125 MHz */
1275 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 | MSK_FLAG_AUTOTX_CSUM;
1276 /*
1277 * Yukon Extreme seems to have silicon bug for
1278 * automatic Tx checksum calculation capability.
1279 */
1280 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) {
1281 sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
1282 }
1283 /*
1284 * Yukon Extreme A0 could not use store-and-forward
1285 * for jumbo frames, so disable Tx checksum
1286 * offloading for jumbo frames.
1287 */
1288 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0) {
1289 sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
1290 }
1291 break;
1292 case CHIP_ID_YUKON_FE:
1293 sc->msk_clock = 100; /* 100 MHz */
1294 sc->msk_pflags |= MSK_FLAG_FASTETHER;
1295 break;
1296 case CHIP_ID_YUKON_FE_P:
1297 sc->msk_clock = 50; /* 50 MHz */
1298 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 | MSK_FLAG_AUTOTX_CSUM;
1299 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
1300 /*
1301 * XXX
1302 * FE+ A0 has status LE writeback bug so msk (4)
1303 * does not rely on status word of received frame
1304 * in msk_rxeof () which in turn disables all
1305 * hardware assistance bits reported by the status
1306 * word as well as validity of the recevied frame.
1307 * Just pass received frames to upper stack with
1308 * minimal test and let upper stack handle them.
1309 */
1310 sc->msk_pflags |= MSK_FLAG_NOHWVLAN | MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
1311 }
1312 break;
1313 case CHIP_ID_YUKON_XL:
1314 sc->msk_clock = 156; /* 156 MHz */
1315 sc->msk_pflags |= MSK_FLAG_JUMBO;
1316 break;
1317 case CHIP_ID_YUKON_UL_2:
1318 sc->msk_clock = 125; /* 125 MHz */
1319 sc->msk_pflags |= MSK_FLAG_JUMBO;
1320 break;
1321 case CHIP_ID_YUKON_OPT:
1322 sc->msk_clock = 125; /* 125 MHz */
1323 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2;
1324 break;
1325 default:
1326 sc->msk_clock = 156; /* 156 MHz */
1327 break;
1328 }
1329
1330 Status = msk_status_dma_alloc (sc);
1331 if (EFI_ERROR (Status)) {
1332 goto fail;
1333 }
1334
1335 // Set base interrupt mask
1336 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1337 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1338
1339 // Reset the adapter
1340 mskc_reset (sc);
1341
1342 mskc_setup_rambuffer (sc);
1343
1344 Status = gBS->AllocatePool (EfiBootServicesData,
1345 sizeof (struct msk_if_softc),
1346 (VOID**) &ScIf);
1347 if (EFI_ERROR (Status)) {
1348 goto fail;
1349 }
1350 gBS->SetMem (ScIf, sizeof (struct msk_if_softc), 0);
1351 ScIf->msk_softc = sc;
1352 sc->msk_if[MSK_PORT_A] = ScIf;
1353 Status = mskc_attach_if (sc->msk_if[MSK_PORT_A], MSK_PORT_A);
1354 if (EFI_ERROR (Status)) {
1355 goto fail;
1356 }
1357
1358 mmd = &ScIf->msk_md;
1359 mmd->port = MSK_PORT_A;
1360 mmd->pmd = sc->msk_pmd;
1361 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S' || sc->msk_pmd == 'P') {
1362 mmd->mii_flags |= MIIF_HAVEFIBER;
1363 }
1364
1365 if (sc->msk_num_port > 1) {
1366 Status = gBS->AllocatePool (EfiBootServicesData,
1367 sizeof (struct msk_if_softc),
1368 (VOID**) &ScIf);
1369 if (EFI_ERROR (Status)) {
1370 goto fail;
1371 }
1372 gBS->SetMem (ScIf, sizeof (struct msk_if_softc), 0);
1373 ScIf->msk_softc = sc;
1374 sc->msk_if[MSK_PORT_B] = ScIf;
1375 Status = mskc_attach_if (sc->msk_if[MSK_PORT_B], MSK_PORT_B);
1376 if (EFI_ERROR (Status)) {
1377 goto fail;
1378 }
1379
1380 mmd = &ScIf->msk_md;
1381 mmd->port = MSK_PORT_B;
1382 mmd->pmd = sc->msk_pmd;
1383 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S' || sc->msk_pmd == 'P') {
1384 mmd->mii_flags |= MIIF_HAVEFIBER;
1385 }
1386 }
1387
1388 // Return new msk_softc structure
1389 *ScData = sc;
1390
1391 // Create timer for tick
1392 Status = gBS->CreateEvent (
1393 EVT_NOTIFY_SIGNAL | EVT_TIMER,
1394 TPL_CALLBACK,
1395 mskc_tick,
1396 (VOID *)sc,
1397 &sc->Timer
1398 );
1399 if (EFI_ERROR (Status)) {
1400 goto fail;
1401 }
1402
1403 Status = gBS->SetTimer (sc->Timer, TimerPeriodic, TICKS_PER_SECOND);
1404 if (EFI_ERROR (Status)) {
1405 goto fail;
1406 }
1407
1408 fail:
1409 if (EFI_ERROR (Status)) {
1410 mskc_detach (sc);
1411 }
1412
1413 return (Status);
1414
1415 RESTORE_PCI_ATTRIBS:
1416 //
1417 // Restore original PCI attributes
1418 //
1419 PciIo->Attributes (
1420 PciIo,
1421 EfiPciIoAttributeOperationSet,
1422 sc->OriginalPciAttributes,
1423 NULL
1424 );
1425 gBS->FreePool (sc);
1426 return Status;
1427 }
1428
1429 /*
1430 * Shutdown hardware and free up resources. This can be called any
1431 * time after the mutex has been initialized. It is called in both
1432 * the error case in attach and the normal detach case so it needs
1433 * to be careful about only freeing resources that have actually been
1434 * allocated.
1435 */
1436 VOID
mskc_detach_if(struct msk_if_softc * sc_if)1437 mskc_detach_if (
1438 struct msk_if_softc *sc_if
1439 )
1440 {
1441 if (sc_if->active) {
1442 mskc_stop_if (sc_if);
1443 msk_txrx_dma_free (sc_if);
1444 e1000phy_detach (sc_if->phy_softc);
1445 sc_if->phy_softc = NULL;
1446 sc_if->active = FALSE;
1447 }
1448 }
1449
1450 VOID
mskc_detach(struct msk_softc * sc)1451 mskc_detach (
1452 struct msk_softc *sc
1453 )
1454 {
1455 EFI_TPL OldTpl;
1456 EFI_PCI_IO_PROTOCOL *PciIo;
1457
1458 if (sc == NULL) {
1459 return;
1460 }
1461
1462 OldTpl = gBS->RaiseTPL (TPL_NOTIFY);
1463
1464 PciIo = sc->PciIo;
1465
1466 if (sc->msk_if[MSK_PORT_A] != NULL) {
1467 mskc_detach_if (sc->msk_if[MSK_PORT_A]);
1468 gBS->FreePool (sc->msk_if[MSK_PORT_A]);
1469 sc->msk_if[MSK_PORT_A] = NULL;
1470 }
1471 if (sc->msk_if[MSK_PORT_B] != NULL) {
1472 mskc_detach_if (sc->msk_if[MSK_PORT_B]);
1473 gBS->FreePool (sc->msk_if[MSK_PORT_B]);
1474 sc->msk_if[MSK_PORT_B] = NULL;
1475 }
1476
1477 /* Disable all interrupts. */
1478 CSR_WRITE_4 (sc, B0_IMSK, 0);
1479 CSR_READ_4 (sc, B0_IMSK);
1480 CSR_WRITE_4 (sc, B0_HWE_IMSK, 0);
1481 CSR_READ_4 (sc, B0_HWE_IMSK);
1482
1483 // LED Off.
1484 CSR_WRITE_2 (sc, B0_CTST, Y2_LED_STAT_OFF);
1485
1486 // Put hardware reset.
1487 CSR_WRITE_2 (sc, B0_CTST, CS_RST_SET);
1488
1489 msk_status_dma_free (sc);
1490
1491 if (sc->Timer != NULL) {
1492 gBS->SetTimer (sc->Timer, TimerCancel, 0);
1493 gBS->CloseEvent (sc->Timer);
1494
1495 sc->Timer = NULL;
1496 }
1497 //
1498 // Restore original PCI attributes
1499 //
1500 PciIo->Attributes (
1501 PciIo,
1502 EfiPciIoAttributeOperationSet,
1503 sc->OriginalPciAttributes,
1504 NULL
1505 );
1506
1507 gBS->RestoreTPL (OldTpl);
1508 }
1509
1510 /* Create status DMA region. */
1511 static
1512 EFI_STATUS
msk_status_dma_alloc(struct msk_softc * sc)1513 msk_status_dma_alloc (
1514 struct msk_softc *sc
1515 )
1516 {
1517 EFI_STATUS Status;
1518 UINTN Length;
1519 EFI_PCI_IO_PROTOCOL *PciIo;
1520
1521 PciIo = sc->PciIo;
1522
1523 Status = PciIo->AllocateBuffer (PciIo, AllocateAnyPages, EfiBootServicesData,
1524 EFI_SIZE_TO_PAGES (MSK_STAT_RING_SZ), (VOID**)&sc->msk_stat_ring, 0);
1525
1526 if (EFI_ERROR (Status)) {
1527 DEBUG ((EFI_D_ERROR, "Marvell Yukon: failed to allocate DMA'able memory for status ring\n"));
1528 return Status;
1529 }
1530 ASSERT (sc->msk_stat_ring != NULL);
1531
1532 Length = MSK_STAT_RING_SZ;
1533 Status = PciIo->Map (PciIo, EfiPciIoOperationBusMasterCommonBuffer, sc->msk_stat_ring,
1534 &Length, &sc->msk_stat_ring_paddr, &sc->msk_stat_map);
1535 ASSERT (Length == MSK_STAT_RING_SZ);
1536
1537 if (EFI_ERROR (Status)) {
1538 DEBUG ((EFI_D_ERROR, "Marvell Yukon: failed to map DMA'able memory for status ring\n"));
1539 }
1540
1541 return Status;
1542 }
1543
1544 static
1545 VOID
msk_status_dma_free(struct msk_softc * sc)1546 msk_status_dma_free (
1547 struct msk_softc *sc
1548 )
1549 {
1550 EFI_PCI_IO_PROTOCOL *PciIo;
1551
1552 PciIo = sc->PciIo;
1553
1554 if (sc->msk_stat_map) {
1555 PciIo->Unmap (PciIo, sc->msk_stat_map);
1556 if (sc->msk_stat_ring) {
1557 PciIo->FreeBuffer (PciIo, EFI_SIZE_TO_PAGES (MSK_STAT_RING_SZ), sc->msk_stat_ring);
1558 sc->msk_stat_ring = NULL;
1559 }
1560 sc->msk_stat_map = NULL;
1561 }
1562 }
1563
1564 static
1565 EFI_STATUS
msk_txrx_dma_alloc(struct msk_if_softc * sc_if)1566 msk_txrx_dma_alloc (
1567 struct msk_if_softc *sc_if
1568 )
1569 {
1570 struct msk_txdesc *txd;
1571 struct msk_rxdesc *rxd;
1572 INTN i;
1573 UINTN Length;
1574 EFI_STATUS Status;
1575 EFI_PCI_IO_PROTOCOL *PciIo;
1576
1577 PciIo = sc_if->msk_softc->PciIo;
1578
1579 Status = PciIo->AllocateBuffer (PciIo, AllocateAnyPages, EfiBootServicesData,
1580 EFI_SIZE_TO_PAGES (MSK_TX_RING_SZ), (VOID**)&sc_if->msk_rdata.msk_tx_ring, 0);
1581
1582 if (EFI_ERROR (Status)) {
1583 DEBUG ((EFI_D_ERROR, "Marvell Yukon: failed to allocate DMA'able memory for Tx ring\n"));
1584 goto fail;
1585 }
1586 ASSERT (sc_if->msk_rdata.msk_tx_ring != NULL);
1587
1588 Length = MSK_TX_RING_SZ;
1589 Status = PciIo->Map (PciIo, EfiPciIoOperationBusMasterCommonBuffer, sc_if->msk_rdata.msk_tx_ring,
1590 &Length, &sc_if->msk_rdata.msk_tx_ring_paddr, &sc_if->msk_cdata.msk_tx_ring_map);
1591
1592 if (EFI_ERROR (Status)) {
1593 DEBUG ((EFI_D_ERROR, "Marvell Yukon: failed to map DMA'able memory for Tx ring\n"));
1594 goto fail;
1595 }
1596 ASSERT (Length == MSK_TX_RING_SZ);
1597
1598 Status = PciIo->AllocateBuffer (PciIo, AllocateAnyPages, EfiBootServicesData,
1599 EFI_SIZE_TO_PAGES (MSK_RX_RING_SZ), (VOID**)&sc_if->msk_rdata.msk_rx_ring, 0);
1600
1601 if (EFI_ERROR (Status)) {
1602 DEBUG ((EFI_D_ERROR, "Marvell Yukon: failed to allocate DMA'able memory for Rx ring\n"));
1603 goto fail;
1604 }
1605 ASSERT (sc_if->msk_rdata.msk_rx_ring != NULL);
1606
1607 Length = MSK_RX_RING_SZ;
1608 Status = PciIo->Map (PciIo, EfiPciIoOperationBusMasterCommonBuffer, sc_if->msk_rdata.msk_rx_ring,
1609 &Length, &sc_if->msk_rdata.msk_rx_ring_paddr, &sc_if->msk_cdata.msk_rx_ring_map);
1610
1611 if (EFI_ERROR (Status)) {
1612 DEBUG ((EFI_D_ERROR, "Marvell Yukon: failed to map DMA'able memory for Rx ring\n"));
1613 goto fail;
1614 }
1615 ASSERT (Length == MSK_RX_RING_SZ);
1616
1617 // Create DMA maps for Tx buffers.
1618 for (i = 0; i < MSK_TX_RING_CNT; i++) {
1619 txd = &sc_if->msk_cdata.msk_txdesc[i];
1620 gBS->SetMem (&(txd->tx_m), sizeof (MSK_DMA_BUF), 0);
1621 }
1622 // Create DMA maps for Rx buffers.
1623 for (i = 0; i < MSK_RX_RING_CNT; i++) {
1624 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
1625 gBS->SetMem (&(rxd->rx_m), sizeof (MSK_DMA_BUF), 0);
1626 }
1627
1628 fail:
1629 return (Status);
1630 }
1631
1632 static
1633 VOID
msk_txrx_dma_free(struct msk_if_softc * sc_if)1634 msk_txrx_dma_free (
1635 struct msk_if_softc *sc_if
1636 )
1637 {
1638 struct msk_txdesc *txd;
1639 struct msk_rxdesc *rxd;
1640 INTN i;
1641 EFI_PCI_IO_PROTOCOL *PciIo;
1642
1643 PciIo = sc_if->msk_softc->PciIo;
1644
1645 // Tx ring
1646 if (sc_if->msk_cdata.msk_tx_ring_map) {
1647 PciIo->Unmap (PciIo, sc_if->msk_cdata.msk_tx_ring_map);
1648 if (sc_if->msk_rdata.msk_tx_ring) {
1649 PciIo->FreeBuffer (PciIo, EFI_SIZE_TO_PAGES (MSK_TX_RING_SZ), sc_if->msk_rdata.msk_tx_ring);
1650 sc_if->msk_rdata.msk_tx_ring = NULL;
1651 }
1652 sc_if->msk_cdata.msk_tx_ring_map = NULL;
1653 }
1654
1655 // Rx ring
1656 if (sc_if->msk_cdata.msk_rx_ring_map) {
1657 PciIo->Unmap (PciIo, sc_if->msk_cdata.msk_rx_ring_map);
1658 if (sc_if->msk_rdata.msk_rx_ring) {
1659 PciIo->FreeBuffer (PciIo, EFI_SIZE_TO_PAGES (MSK_RX_RING_SZ), sc_if->msk_rdata.msk_rx_ring);
1660 sc_if->msk_rdata.msk_rx_ring = NULL;
1661 }
1662 sc_if->msk_cdata.msk_rx_ring_map = NULL;
1663 }
1664
1665 // Tx buffers
1666 for (i = 0; i < MSK_TX_RING_CNT; i++) {
1667 txd = &sc_if->msk_cdata.msk_txdesc[i];
1668 if (txd->tx_m.DmaMapping) {
1669 PciIo->Unmap (PciIo, txd->tx_m.DmaMapping);
1670 gBS->SetMem (&(txd->tx_m), sizeof (MSK_DMA_BUF), 0);
1671 // We don't own the transmit buffers so don't free them
1672 }
1673 }
1674 // Rx buffers
1675 for (i = 0; i < MSK_RX_RING_CNT; i++) {
1676 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
1677 if (rxd->rx_m.DmaMapping) {
1678 PciIo->Unmap (PciIo, rxd->rx_m.DmaMapping);
1679 // Free Rx buffers as we own these
1680 if(rxd->rx_m.Buf != NULL) {
1681 gBS->FreePool (rxd->rx_m.Buf);
1682 rxd->rx_m.Buf = NULL;
1683 }
1684 gBS->SetMem (&(rxd->rx_m), sizeof (MSK_DMA_BUF), 0);
1685 }
1686 }
1687 }
1688
1689 static
1690 EFI_STATUS
msk_encap(struct msk_if_softc * sc_if,MSK_SYSTEM_BUF * m_head)1691 msk_encap (
1692 struct msk_if_softc *sc_if,
1693 MSK_SYSTEM_BUF *m_head
1694 )
1695 {
1696 struct msk_txdesc *txd;
1697 struct msk_txdesc *txd_last;
1698 struct msk_tx_desc *tx_le;
1699 VOID *Mapping;
1700 EFI_PHYSICAL_ADDRESS BusPhysAddr;
1701 UINTN BusLength;
1702 UINT32 control;
1703 UINT32 prod;
1704 UINT32 si;
1705 EFI_STATUS Status;
1706 EFI_PCI_IO_PROTOCOL *PciIo;
1707
1708 PciIo = sc_if->msk_softc->PciIo;
1709 prod = sc_if->msk_cdata.msk_tx_prod;
1710 txd = &sc_if->msk_cdata.msk_txdesc[prod];
1711 txd_last = txd;
1712 BusLength = m_head->Length;
1713 Status = PciIo->Map (PciIo, EfiPciIoOperationBusMasterRead, m_head->Buf,
1714 &BusLength, &BusPhysAddr, &txd->tx_m.DmaMapping);
1715
1716 if (EFI_ERROR (Status)) {
1717 DEBUG ((EFI_D_ERROR, "Marvell Yukon: failed to map DMA'able memory for Tx buffer\n"));
1718 return Status;
1719 }
1720 ASSERT (BusLength == m_head->Length);
1721
1722 control = 0;
1723
1724 #ifdef MSK_64BIT_DMA
1725 if (MSK_ADDR_HI(BusPhysAddr) !=
1726 sc_if->msk_cdata.msk_tx_high_addr) {
1727 sc_if->msk_cdata.msk_tx_high_addr =
1728 MSK_ADDR_HI(BusPhysAddr);
1729 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
1730 tx_le->msk_addr = htole32(MSK_ADDR_HI(BusPhysAddr));
1731 tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
1732 sc_if->msk_cdata.msk_tx_cnt++;
1733 MSK_INC(prod, MSK_TX_RING_CNT);
1734 }
1735 #endif
1736
1737 si = prod;
1738 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
1739 tx_le->msk_addr = htole32 (MSK_ADDR_LO (BusPhysAddr));
1740 tx_le->msk_control = htole32 (BusLength | control | OP_PACKET);
1741 sc_if->msk_cdata.msk_tx_cnt++;
1742 MSK_INC (prod, MSK_TX_RING_CNT);
1743
1744 // Update producer index
1745 sc_if->msk_cdata.msk_tx_prod = prod;
1746
1747 // Set EOP on the last descriptor
1748 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
1749 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
1750 tx_le->msk_control |= htole32 (EOP);
1751
1752 // Turn the first descriptor ownership to hardware
1753 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
1754 tx_le->msk_control |= htole32 (HW_OWNER);
1755
1756 txd = &sc_if->msk_cdata.msk_txdesc[prod];
1757 Mapping = txd_last->tx_m.DmaMapping;
1758 txd_last->tx_m.DmaMapping = txd->tx_m.DmaMapping;
1759 txd->tx_m.DmaMapping = Mapping;
1760 txd->tx_m.Buf = m_head->Buf;
1761 txd->tx_m.Length = m_head->Length;
1762
1763 return EFI_SUCCESS;
1764 }
1765
1766 EFI_STATUS
mskc_transmit(struct msk_if_softc * sc_if,UINTN BufferSize,VOID * Buffer)1767 mskc_transmit (
1768 struct msk_if_softc *sc_if,
1769 UINTN BufferSize,
1770 VOID *Buffer
1771 )
1772 {
1773 MSK_LINKED_SYSTEM_BUF *LinkedSystemBuf;
1774 EFI_STATUS Status;
1775
1776 Status = gBS->AllocatePool (EfiBootServicesData,
1777 sizeof (MSK_LINKED_SYSTEM_BUF),
1778 (VOID**) &LinkedSystemBuf);
1779 if (EFI_ERROR (Status)) {
1780 return Status;
1781 }
1782 gBS->SetMem (LinkedSystemBuf, sizeof (MSK_LINKED_SYSTEM_BUF), 0);
1783 LinkedSystemBuf->Signature = TX_MBUF_SIGNATURE;
1784 //
1785 // Add the passed Buffer to the transmit queue. Don't copy.
1786 //
1787 LinkedSystemBuf->SystemBuf.Buf = Buffer;
1788 LinkedSystemBuf->SystemBuf.Length = BufferSize;
1789 InsertTailList (&sc_if->TransmitQueueHead, &LinkedSystemBuf->Link);
1790 msk_start (sc_if);
1791 return EFI_SUCCESS;
1792 }
1793
1794 void
mskc_getstatus(struct msk_if_softc * sc_if,OUT UINT32 * InterruptStatus,OPTIONAL OUT VOID ** TxBuf OPTIONAL)1795 mskc_getstatus (
1796 struct msk_if_softc *sc_if,
1797 OUT UINT32 *InterruptStatus, OPTIONAL
1798 OUT VOID **TxBuf OPTIONAL
1799 )
1800 {
1801 MSK_LINKED_SYSTEM_BUF *m_head;
1802
1803 // Interrupt status is not read from the device when InterruptStatus is NULL
1804 if (InterruptStatus != NULL) {
1805 // Check the interrupt lines
1806 msk_intr (sc_if->msk_softc);
1807 }
1808
1809 // The transmit buffer status is not read when TxBuf is NULL
1810 if (TxBuf != NULL) {
1811 *((UINT8 **) TxBuf) = (UINT8 *) 0;
1812 if (!IsListEmpty (&sc_if->TransmitFreeQueueHead))
1813 {
1814 m_head = CR (GetFirstNode (&sc_if->TransmitFreeQueueHead), MSK_LINKED_SYSTEM_BUF, Link, TX_MBUF_SIGNATURE);
1815 if(m_head != NULL) {
1816 *TxBuf = m_head->SystemBuf.Buf;
1817 RemoveEntryList (&m_head->Link);
1818 gBS->FreePool (m_head);
1819 }
1820 }
1821 }
1822 }
1823
1824 static
1825 VOID
msk_start(struct msk_if_softc * sc_if)1826 msk_start (
1827 struct msk_if_softc *sc_if
1828 )
1829 {
1830 EFI_STATUS Status;
1831 MSK_LINKED_SYSTEM_BUF *m_head;
1832 INTN enq;
1833
1834 for (enq = 0; !IsListEmpty (&sc_if->TransmitQueueHead) &&
1835 sc_if->msk_cdata.msk_tx_cnt < (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); )
1836 {
1837
1838 m_head = CR (GetFirstNode (&sc_if->TransmitQueueHead), MSK_LINKED_SYSTEM_BUF, Link, TX_MBUF_SIGNATURE);
1839 if (m_head == NULL) {
1840 break;
1841 }
1842 //
1843 // Pack the data into the transmit ring. If we
1844 // don't have room, set the OACTIVE flag and wait
1845 // for the NIC to drain the ring.
1846 //
1847 Status = msk_encap (sc_if, &m_head->SystemBuf);
1848 if (EFI_ERROR (Status)) {
1849 break;
1850 }
1851
1852 RemoveEntryList (&m_head->Link);
1853 InsertTailList (&sc_if->TransmitFreeQueueHead, &m_head->Link);
1854 enq++;
1855 }
1856
1857 if (enq > 0) {
1858 // Transmit
1859 CSR_WRITE_2 (sc_if->msk_softc, Y2_PREF_Q_ADDR (sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
1860 sc_if->msk_cdata.msk_tx_prod);
1861 }
1862 }
1863
1864 VOID
mskc_shutdown(struct msk_softc * sc)1865 mskc_shutdown (
1866 struct msk_softc *sc
1867 )
1868 {
1869 INTN i;
1870 EFI_TPL OldTpl;
1871
1872 OldTpl = gBS->RaiseTPL (TPL_NOTIFY);
1873
1874 for (i = 0; i < sc->msk_num_port; i++) {
1875 if (sc->msk_if[i] != NULL) {
1876 mskc_stop_if (sc->msk_if[i]);
1877 }
1878 }
1879 gBS->SetTimer (sc->Timer, TimerCancel, 0);
1880
1881 /* Put hardware reset. */
1882 CSR_WRITE_2 (sc, B0_CTST, CS_RST_SET);
1883
1884 gBS->RestoreTPL (OldTpl);
1885 }
1886
1887 EFI_STATUS
mskc_receive(struct msk_if_softc * sc_if,IN OUT UINTN * BufferSize,OUT VOID * Buffer)1888 mskc_receive (
1889 struct msk_if_softc *sc_if,
1890 IN OUT UINTN *BufferSize,
1891 OUT VOID *Buffer
1892 )
1893 {
1894 MSK_LINKED_SYSTEM_BUF *mBuf;
1895
1896 msk_intr (sc_if->msk_softc); // check the interrupt lines
1897
1898 if (IsListEmpty (&sc_if->ReceiveQueueHead)) {
1899 *BufferSize = 0;
1900 return EFI_NOT_READY;
1901 }
1902
1903 mBuf = CR (GetFirstNode (&sc_if->ReceiveQueueHead), MSK_LINKED_SYSTEM_BUF, Link, RX_MBUF_SIGNATURE);
1904 if (mBuf->SystemBuf.Length > *BufferSize) {
1905 *BufferSize = mBuf->SystemBuf.Length;
1906 DEBUG ((EFI_D_NET, "Marvell Yukon: Receive buffer is too small: Provided = %d, Received = %d\n",
1907 *BufferSize, mBuf->SystemBuf.Length));
1908 return EFI_BUFFER_TOO_SMALL;
1909 }
1910 *BufferSize = mBuf->SystemBuf.Length;
1911 RemoveEntryList (&mBuf->Link);
1912 gBS->CopyMem (Buffer, mBuf->SystemBuf.Buf, *BufferSize);
1913 gBS->FreePool(mBuf->SystemBuf.Buf);
1914 gBS->FreePool (mBuf);
1915 return EFI_SUCCESS;
1916 }
1917
1918 static VOID
msk_rxeof(struct msk_if_softc * sc_if,UINT32 status,UINT32 control,INTN len)1919 msk_rxeof (
1920 struct msk_if_softc *sc_if,
1921 UINT32 status,
1922 UINT32 control,
1923 INTN len
1924 )
1925 {
1926 EFI_STATUS Status;
1927 MSK_LINKED_SYSTEM_BUF *m_link;
1928 struct msk_rxdesc *rxd;
1929 INTN cons;
1930 INTN rxlen;
1931 MSK_DMA_BUF m;
1932 EFI_PCI_IO_PROTOCOL *PciIo;
1933
1934 DEBUG ((EFI_D_NET, "Marvell Yukon: rxeof\n"));
1935
1936 PciIo = sc_if->msk_softc->PciIo;
1937 cons = sc_if->msk_cdata.msk_rx_cons;
1938 do {
1939 rxlen = status >> 16;
1940 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
1941 //
1942 // For controllers that returns bogus status code
1943 // just do minimal check and let upper stack
1944 // handle this frame.
1945 //
1946 if (len > MAX_SUPPORTED_PACKET_SIZE || len < NET_ETHER_ADDR_LEN) {
1947 msk_discard_rxbuf (sc_if, cons);
1948 break;
1949 }
1950 } else if (len > sc_if->msk_framesize ||
1951 ((status & GMR_FS_ANY_ERR) != 0) ||
1952 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
1953 msk_discard_rxbuf (sc_if, cons);
1954 break;
1955 }
1956
1957 #ifdef MSK_64BIT_DMA
1958 rxd = &sc_if->msk_cdata.msk_rxdesc[(cons + 1) % MSK_RX_RING_CNT];
1959 #else
1960 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
1961 #endif
1962
1963 m.Buf = rxd->rx_m.Buf;
1964 m.DmaMapping = rxd->rx_m.DmaMapping;
1965 m.Length = rxd->rx_m.Length;
1966
1967 Status = msk_newbuf (sc_if, cons);
1968 if (EFI_ERROR (Status)) {
1969 // This is a dropped packet, but we aren't counting drops
1970 // Reuse old buffer
1971 msk_discard_rxbuf (sc_if, cons);
1972 break;
1973 }
1974
1975 Status = PciIo->Flush (PciIo);
1976 if (EFI_ERROR (Status)) {
1977 DEBUG ((EFI_D_NET, "Marvell Yukon: failed to Flush DMA\n"));
1978 }
1979
1980 Status = PciIo->Unmap (PciIo, rxd->rx_m.DmaMapping);
1981 if (EFI_ERROR (Status)) {
1982 DEBUG ((EFI_D_NET, "Marvell Yukon: failed to Unmap DMA\n"));
1983 }
1984
1985 Status = gBS->AllocatePool (EfiBootServicesData,
1986 sizeof (MSK_LINKED_SYSTEM_BUF),
1987 (VOID**) &m_link);
1988 if (!EFI_ERROR (Status)) {
1989 gBS->SetMem (m_link, sizeof (MSK_LINKED_SYSTEM_BUF), 0);
1990 m_link->Signature = RX_MBUF_SIGNATURE;
1991 m_link->SystemBuf.Buf = m.Buf;
1992 m_link->SystemBuf.Length = len;
1993
1994 InsertTailList (&sc_if->ReceiveQueueHead, &m_link->Link);
1995 } else {
1996 DEBUG ((EFI_D_NET, "Marvell Yukon: failed to allocate receive buffer link. Dropping Frame\n"));
1997 gBS->FreePool (m.Buf);
1998 }
1999 } while (0);
2000
2001 MSK_RX_INC (sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
2002 MSK_RX_INC (sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
2003 }
2004
2005 static
2006 VOID
msk_txeof(struct msk_if_softc * sc_if,INTN idx)2007 msk_txeof (
2008 struct msk_if_softc *sc_if,
2009 INTN idx
2010 )
2011 {
2012 struct msk_txdesc *txd;
2013 struct msk_tx_desc *cur_tx;
2014 UINT32 control;
2015 INTN cons;
2016 INTN prog;
2017 EFI_PCI_IO_PROTOCOL *PciIo;
2018
2019 DEBUG ((EFI_D_NET, "Marvell Yukon: txeof\n"));
2020
2021 PciIo = sc_if->msk_softc->PciIo;
2022
2023 //
2024 // Go through our tx ring and free mbufs for those
2025 // frames that have been sent.
2026 //
2027 cons = sc_if->msk_cdata.msk_tx_cons;
2028 prog = 0;
2029 for (; cons != idx; MSK_INC (cons, MSK_TX_RING_CNT)) {
2030 if (sc_if->msk_cdata.msk_tx_cnt <= 0) {
2031 break;
2032 }
2033 prog++;
2034 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
2035 control = le32toh (cur_tx->msk_control);
2036 sc_if->msk_cdata.msk_tx_cnt--;
2037 if ((control & EOP) == 0) {
2038 continue;
2039 }
2040 txd = &sc_if->msk_cdata.msk_txdesc[cons];
2041 PciIo->Unmap (PciIo, txd->tx_m.DmaMapping);
2042 gBS->SetMem (&(txd->tx_m), sizeof (MSK_DMA_BUF), 0);
2043 // We don't own the transmit buffers so don't free them
2044 }
2045
2046 if (prog > 0) {
2047 sc_if->msk_cdata.msk_tx_cons = cons;
2048 // No need to sync LEs as we didn't update LEs.
2049 }
2050 }
2051
2052 VOID
mskc_tick(IN EFI_EVENT Event,IN VOID * Context)2053 mskc_tick (
2054 IN EFI_EVENT Event,
2055 IN VOID *Context
2056 )
2057 {
2058 EFI_TPL OldTpl;
2059 struct msk_softc *sc;
2060
2061 OldTpl = gBS->RaiseTPL (TPL_NOTIFY);
2062
2063 sc = (struct msk_softc *)Context;
2064
2065 if (sc->msk_if[MSK_PORT_A] != NULL && sc->msk_if[MSK_PORT_A]->active) {
2066 e1000phy_tick (sc->msk_if[MSK_PORT_A]->phy_softc);
2067 }
2068 if (sc->msk_if[MSK_PORT_B] != NULL && sc->msk_if[MSK_PORT_B]->active) {
2069 e1000phy_tick (sc->msk_if[MSK_PORT_B]->phy_softc);
2070 }
2071
2072 msk_handle_events (sc);
2073
2074 gBS->RestoreTPL (OldTpl);
2075 }
2076
2077 static
2078 VOID
msk_intr_phy(struct msk_if_softc * sc_if)2079 msk_intr_phy (
2080 struct msk_if_softc *sc_if
2081 )
2082 {
2083 UINT16 status;
2084
2085 msk_phy_readreg (sc_if, PHY_MARV_INT_STAT);
2086 status = msk_phy_readreg (sc_if, PHY_MARV_INT_STAT);
2087
2088 // Handle FIFO Underrun/Overflow ?
2089 if ((status & PHY_M_IS_FIFO_ERROR)) {
2090 DEBUG ((EFI_D_NET, "Marvell Yukon: PHY FIFO underrun/overflow.\n"));
2091 }
2092 }
2093
2094 static
2095 VOID
msk_intr_gmac(struct msk_if_softc * sc_if)2096 msk_intr_gmac (
2097 struct msk_if_softc *sc_if
2098 )
2099 {
2100 UINT8 status;
2101 struct msk_softc *sc;
2102
2103 sc = sc_if->msk_softc;
2104
2105 status = CSR_READ_1 (sc, MR_ADDR (sc_if->msk_md.port, GMAC_IRQ_SRC));
2106
2107 // GMAC Rx FIFO overrun.
2108 if ((status & GM_IS_RX_FF_OR) != 0) {
2109 CSR_WRITE_4 (sc, MR_ADDR (sc_if->msk_md.port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2110 }
2111 // GMAC Tx FIFO underrun.
2112 if ((status & GM_IS_TX_FF_UR) != 0) {
2113 CSR_WRITE_4 (sc, MR_ADDR (sc_if->msk_md.port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2114 //device_printf (sc_if->msk_if_dev, "Tx FIFO underrun!\n");*/
2115 DEBUG ((EFI_D_NET, "Marvell Yukon: Tx FIFO underrun!\n"));
2116 /*
2117 * XXX
2118 * In case of Tx underrun, we may need to flush/reset
2119 * Tx MAC but that would also require resynchronization
2120 * with status LEs. Reintializing status LEs would
2121 * affect other port in dual MAC configuration so it
2122 * should be aVOIDed as possible as we can.
2123 * Due to lack of documentation it's all vague guess but
2124 * it needs more investigation.
2125 */
2126 }
2127 }
2128
2129 static
2130 VOID
msk_handle_hwerr(struct msk_if_softc * sc_if,UINT32 status)2131 msk_handle_hwerr (
2132 struct msk_if_softc *sc_if,
2133 UINT32 status
2134 )
2135 {
2136 struct msk_softc *sc;
2137
2138 sc = sc_if->msk_softc;
2139
2140 if ((status & Y2_IS_PAR_RD1) != 0) {
2141 DEBUG ((EFI_D_NET, "Marvell Yukon: RAM buffer read parity error\n"));
2142 // Clear IRQ.
2143 CSR_WRITE_2 (sc, SELECT_RAM_BUFFER (sc_if->msk_md.port, B3_RI_CTRL), RI_CLR_RD_PERR);
2144 }
2145 if ((status & Y2_IS_PAR_WR1) != 0) {
2146 DEBUG ((EFI_D_NET, "Marvell Yukon: RAM buffer write parity error\n"));
2147 // Clear IRQ
2148 CSR_WRITE_2 (sc, SELECT_RAM_BUFFER (sc_if->msk_md.port, B3_RI_CTRL), RI_CLR_WR_PERR);
2149 }
2150 if ((status & Y2_IS_PAR_MAC1) != 0) {
2151 DEBUG ((EFI_D_NET, "Marvell Yukon: Tx MAC parity error\n"));
2152 // Clear IRQ
2153 CSR_WRITE_4 (sc, MR_ADDR (sc_if->msk_md.port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
2154 }
2155 if ((status & Y2_IS_PAR_RX1) != 0) {
2156 DEBUG ((EFI_D_NET, "Marvell Yukon: Rx parity error\n"));
2157 // Clear IRQ
2158 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
2159 }
2160 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
2161 DEBUG ((EFI_D_NET, "Marvell Yukon: TCP segmentation error\n"));
2162 // Clear IRQ
2163 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
2164 }
2165 }
2166
2167 static
2168 VOID
msk_intr_hwerr(struct msk_softc * sc)2169 msk_intr_hwerr (
2170 struct msk_softc *sc
2171 )
2172 {
2173 UINT32 status;
2174 UINT32 tlphead[4];
2175
2176 status = CSR_READ_4 (sc, B0_HWE_ISRC);
2177
2178 // Time Stamp timer overflow.
2179 if ((status & Y2_IS_TIST_OV) != 0) {
2180 CSR_WRITE_1 (sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2181 }
2182 if ((status & Y2_IS_PCI_NEXP) != 0) {
2183 /*
2184 * PCI Express Error occured which is not described in PEX
2185 * spec.
2186 * This error is also mapped either to Master Abort (
2187 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
2188 * can only be cleared there.
2189 */
2190 DEBUG ((EFI_D_NET, "Marvell Yukon: PCI Express protocol violation error\n"));
2191 }
2192
2193 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
2194
2195 if ((status & Y2_IS_MST_ERR) != 0) {
2196 DEBUG ((EFI_D_NET, "Marvell Yukon: unexpected IRQ Status error\n"));
2197 } else {
2198 DEBUG ((EFI_D_NET, "Marvell Yukon: unexpected IRQ Master error\n"));
2199 }
2200 // Reset all bits in the PCI status register
2201 clear_pci_errors (sc);
2202 }
2203
2204 // Check for PCI Express Uncorrectable Error.
2205 if ((status & Y2_IS_PCI_EXP) != 0) {
2206 UINT32 v32;
2207
2208 /*
2209 * On PCI Express bus bridges are called root complexes (RC).
2210 * PCI Express errors are recognized by the root complex too,
2211 * which requests the system to handle the problem. After
2212 * error occurrence it may be that no access to the adapter
2213 * may be performed any longer.
2214 */
2215
2216 v32 = CSR_PCI_READ_4 (sc, PEX_UNC_ERR_STAT);
2217 if ((v32 & PEX_UNSUP_REQ) != 0) {
2218 // Ignore unsupported request error.
2219 DEBUG ((EFI_D_NET, "Marvell Yukon: Uncorrectable PCI Express error\n"));
2220 }
2221 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
2222 INTN i;
2223
2224 // Get TLP header form Log Registers.
2225 for (i = 0; i < 4; i++) {
2226 tlphead[i] = CSR_PCI_READ_4 (sc, PEX_HEADER_LOG + i * 4);
2227 }
2228 // Check for vendor defined broadcast message.
2229 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
2230 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
2231 CSR_WRITE_4 (sc, B0_HWE_IMSK, sc->msk_intrhwemask);
2232 CSR_READ_4 (sc, B0_HWE_IMSK);
2233 }
2234 }
2235 // Clear the interrupt
2236 CSR_WRITE_1 (sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2237 CSR_PCI_WRITE_4 (sc, PEX_UNC_ERR_STAT, 0xffffffff);
2238 CSR_WRITE_1 (sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2239 }
2240
2241 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) {
2242 msk_handle_hwerr (sc->msk_if[MSK_PORT_A], status);
2243 }
2244 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) {
2245 msk_handle_hwerr (sc->msk_if[MSK_PORT_B], status >> 8);
2246 }
2247 }
2248
2249 static
2250 __inline
2251 VOID
msk_rxput(struct msk_if_softc * sc_if)2252 msk_rxput (
2253 struct msk_if_softc *sc_if
2254 )
2255 {
2256 CSR_WRITE_2 (sc_if->msk_softc, Y2_PREF_Q_ADDR (sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
2257 }
2258
2259 static
2260 INTN
msk_handle_events(struct msk_softc * sc)2261 msk_handle_events (
2262 struct msk_softc *sc
2263 )
2264 {
2265 INTN rxput[2];
2266 struct msk_stat_desc *sd;
2267 UINT32 control;
2268 UINT32 status;
2269 INTN cons;
2270 INTN len;
2271 INTN port;
2272 INTN rxprog;
2273 struct msk_if_softc *sc_if;
2274
2275 if (sc->msk_stat_cons == CSR_READ_2 (sc, STAT_PUT_IDX)) {
2276 return (0);
2277 }
2278
2279 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
2280 rxprog = 0;
2281 cons = sc->msk_stat_cons;
2282 for (;;) {
2283 sd = &sc->msk_stat_ring[cons];
2284 control = le32toh (sd->msk_control);
2285 if ((control & HW_OWNER) == 0) {
2286 break;
2287 }
2288 control &= ~HW_OWNER;
2289 sd->msk_control = htole32 (control);
2290 status = le32toh (sd->msk_status);
2291 len = control & STLE_LEN_MASK;
2292 port = (control >> 16) & 0x01;
2293 sc_if = sc->msk_if[port];
2294 if (sc_if == NULL) {
2295 DEBUG ((EFI_D_NET, "Marvell Yukon: invalid port opcode 0x%08x\n", control & STLE_OP_MASK));
2296 continue;
2297 }
2298
2299 switch (control & STLE_OP_MASK) {
2300 case OP_RXSTAT:
2301 msk_rxeof (sc_if, status, control, len);
2302 rxprog++;
2303 //
2304 // Because there is no way to sync single Rx LE
2305 // put the DMA sync operation off until the end of
2306 // event processing.
2307 //
2308 rxput[port]++;
2309 // Update prefetch unit if we've passed water mark
2310 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
2311 msk_rxput (sc_if);
2312 rxput[port] = 0;
2313 }
2314 break;
2315 case OP_TXINDEXLE:
2316 if (sc->msk_if[MSK_PORT_A] != NULL) {
2317 msk_txeof (sc->msk_if[MSK_PORT_A], status & STLE_TXA1_MSKL);
2318 }
2319 if (sc->msk_if[MSK_PORT_B] != NULL) {
2320 msk_txeof (sc->msk_if[MSK_PORT_B],
2321 ((status & STLE_TXA2_MSKL) >>
2322 STLE_TXA2_SHIFTL) |
2323 ((len & STLE_TXA2_MSKH) <<
2324 STLE_TXA2_SHIFTH));
2325 }
2326 break;
2327 default:
2328 DEBUG ((EFI_D_NET, "Marvell Yukon: unhandled opcode 0x%08x\n", control & STLE_OP_MASK));
2329 break;
2330 }
2331 MSK_INC (cons, MSK_STAT_RING_CNT);
2332 if (rxprog > sc->msk_process_limit) {
2333 break;
2334 }
2335 }
2336
2337 sc->msk_stat_cons = cons;
2338
2339 if (rxput[MSK_PORT_A] > 0) {
2340 msk_rxput (sc->msk_if[MSK_PORT_A]);
2341 }
2342 if (rxput[MSK_PORT_B] > 0) {
2343 msk_rxput (sc->msk_if[MSK_PORT_B]);
2344 }
2345
2346 return (sc->msk_stat_cons != CSR_READ_2 (sc, STAT_PUT_IDX));
2347 }
2348
2349 STATIC
2350 VOID
msk_intr(struct msk_softc * sc)2351 msk_intr (
2352 struct msk_softc *sc
2353 )
2354 {
2355 struct msk_if_softc *sc_if0;
2356 struct msk_if_softc *sc_if1;
2357 UINT32 Status;
2358 INTN domore;
2359
2360 // Reading B0_Y2_SP_ISRC2 masks further interrupts
2361 Status = CSR_READ_4 (sc, B0_Y2_SP_ISRC2);
2362 if (Status == 0 || Status == 0xffffffff ||
2363 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
2364 (Status & sc->msk_intrmask) == 0)
2365 {
2366 // Leave ISR - Reenable interrupts
2367 CSR_WRITE_4 (sc, B0_Y2_SP_ICR, 2);
2368 return;
2369 }
2370
2371 sc_if0 = sc->msk_if[MSK_PORT_A];
2372 sc_if1 = sc->msk_if[MSK_PORT_B];
2373
2374 if ((Status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) {
2375 msk_intr_phy (sc_if0);
2376 }
2377 if ((Status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) {
2378 msk_intr_phy (sc_if1);
2379 }
2380 if ((Status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) {
2381 msk_intr_gmac (sc_if0);
2382 }
2383 if ((Status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) {
2384 msk_intr_gmac (sc_if1);
2385 }
2386 if ((Status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
2387 DEBUG ((EFI_D_NET, "Marvell Yukon: Rx descriptor error\n"));
2388 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
2389 CSR_WRITE_4 (sc, B0_IMSK, sc->msk_intrmask);
2390 CSR_READ_4 (sc, B0_IMSK);
2391 }
2392 if ((Status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
2393 DEBUG ((EFI_D_NET, "Marvell Yukon: Tx descriptor error\n"));
2394 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
2395 CSR_WRITE_4 (sc, B0_IMSK, sc->msk_intrmask);
2396 CSR_READ_4 (sc, B0_IMSK);
2397 }
2398 if ((Status & Y2_IS_HW_ERR) != 0) {
2399 msk_intr_hwerr (sc);
2400 }
2401
2402 domore = msk_handle_events (sc);
2403 if ((Status & Y2_IS_STAT_BMU) != 0 && domore == 0) {
2404 CSR_WRITE_4 (sc, STAT_CTRL, SC_STAT_CLR_IRQ);
2405 }
2406
2407 // Leave ISR - Reenable interrupts
2408 CSR_WRITE_4 (sc, B0_Y2_SP_ICR, 2);
2409 }
2410
2411 static
2412 VOID
msk_set_tx_stfwd(struct msk_if_softc * sc_if)2413 msk_set_tx_stfwd (
2414 struct msk_if_softc *sc_if
2415 )
2416 {
2417 // Disable jumbo frames for Tx
2418 CSR_WRITE_4 (sc_if->msk_softc, MR_ADDR (sc_if->msk_md.port, TX_GMF_CTRL_T), TX_JUMBO_DIS | TX_STFW_ENA);
2419 }
2420
2421 EFI_STATUS
mskc_init(struct msk_if_softc * sc_if)2422 mskc_init (
2423 struct msk_if_softc *sc_if
2424 )
2425 {
2426 EFI_STATUS Status;
2427
2428 Status = msk_init (sc_if);
2429 if (EFI_ERROR (Status)) {
2430 return Status;
2431 }
2432
2433 return Status;
2434 }
2435
2436 static
2437 EFI_STATUS
msk_init(IN struct msk_if_softc * sc_if)2438 msk_init (
2439 IN struct msk_if_softc *sc_if
2440 )
2441 {
2442 UINT8 *eaddr;
2443 UINT16 gmac;
2444 UINT32 reg;
2445 EFI_STATUS Status;
2446 INTN port;
2447 IN struct msk_softc *sc;
2448
2449 sc = sc_if->msk_softc;
2450 port = sc_if->msk_md.port;
2451
2452 // Cancel pending I/O and free all Rx/Tx buffers.
2453 mskc_stop_if (sc_if);
2454
2455 sc_if->msk_framesize = MAX_SUPPORTED_PACKET_SIZE;
2456
2457 // GMAC Control reset.
2458 CSR_WRITE_4 (sc, MR_ADDR (port, GMAC_CTRL), GMC_RST_SET);
2459 CSR_WRITE_4 (sc, MR_ADDR (port, GMAC_CTRL), GMC_RST_CLR);
2460 CSR_WRITE_4 (sc, MR_ADDR (port, GMAC_CTRL), GMC_F_LOOPB_OFF);
2461 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
2462 CSR_WRITE_4 (sc, MR_ADDR (port, GMAC_CTRL), GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | GMC_BYP_RETR_ON);
2463 }
2464
2465 //
2466 // Initialize GMAC first such that speed/duplex/flow-control
2467 // parameters are renegotiated when interface is brought up.
2468 //
2469 GMAC_WRITE_2 (sc, port, GM_GP_CTRL, 0);
2470
2471 // Dummy read the Interrupt Source Register
2472 CSR_READ_1 (sc, MR_ADDR (port, GMAC_IRQ_SRC));
2473
2474 // Clear MIB stats
2475 msk_stats_clear (sc_if);
2476
2477 // Disable FCS
2478 GMAC_WRITE_2 (sc, port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
2479
2480 // Setup Transmit Control Register
2481 GMAC_WRITE_2 (sc, port, GM_TX_CTRL, TX_COL_THR (TX_COL_DEF));
2482
2483 // Setup Transmit Flow Control Register
2484 GMAC_WRITE_2 (sc, port, GM_TX_FLOW_CTRL, 0xffff);
2485
2486 // Setup Transmit Parameter Register
2487 GMAC_WRITE_2 (sc, port, GM_TX_PARAM,
2488 TX_JAM_LEN_VAL (TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL (TX_JAM_IPG_DEF) |
2489 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
2490
2491 gmac = DATA_BLIND_VAL (DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA | IPG_DATA_VAL (IPG_DATA_DEF);
2492
2493 GMAC_WRITE_2 (sc, port, GM_SERIAL_MODE, gmac);
2494
2495 // Set station address
2496 eaddr = sc_if->MacAddress.Addr;
2497 GMAC_WRITE_2 (sc, port, GM_SRC_ADDR_1L, eaddr[0] | (eaddr[1] << 8));
2498 GMAC_WRITE_2 (sc, port, GM_SRC_ADDR_1M, eaddr[2] | (eaddr[3] << 8));
2499 GMAC_WRITE_2 (sc, port, GM_SRC_ADDR_1H, eaddr[4] | (eaddr[5] << 8));
2500 GMAC_WRITE_2 (sc, port, GM_SRC_ADDR_2L, eaddr[0] | (eaddr[1] << 8));
2501 GMAC_WRITE_2 (sc, port, GM_SRC_ADDR_2M, eaddr[2] | (eaddr[3] << 8));
2502 GMAC_WRITE_2 (sc, port, GM_SRC_ADDR_2H, eaddr[4] | (eaddr[5] << 8));
2503
2504 // Disable interrupts for counter overflows
2505 GMAC_WRITE_2 (sc, port, GM_TX_IRQ_MSK, 0);
2506 GMAC_WRITE_2 (sc, port, GM_RX_IRQ_MSK, 0);
2507 GMAC_WRITE_2 (sc, port, GM_TR_IRQ_MSK, 0);
2508
2509 // Configure Rx MAC FIFO
2510 CSR_WRITE_4 (sc, MR_ADDR (port, RX_GMF_CTRL_T), GMF_RST_SET);
2511 CSR_WRITE_4 (sc, MR_ADDR (port, RX_GMF_CTRL_T), GMF_RST_CLR);
2512 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
2513 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P || sc->msk_hw_id == CHIP_ID_YUKON_EX) {
2514 reg |= GMF_RX_OVER_ON;
2515 }
2516 CSR_WRITE_4 (sc, MR_ADDR (port, RX_GMF_CTRL_T), reg);
2517
2518 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
2519 // Clear flush mask - HW bug
2520 CSR_WRITE_4 (sc, MR_ADDR (port, RX_GMF_FL_MSK), 0);
2521 } else {
2522 // Flush Rx MAC FIFO on any flow control or error
2523 CSR_WRITE_4 (sc, MR_ADDR (port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
2524 }
2525
2526 //
2527 // Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
2528 // due to hardware hang on receipt of pause frames.
2529 //
2530 reg = RX_GMF_FL_THR_DEF + 1;
2531 // Another magic for Yukon FE+ - From Linux
2532 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
2533 reg = 0x178;
2534 }
2535 CSR_WRITE_2 (sc, MR_ADDR (port, RX_GMF_FL_THR), reg);
2536
2537 // Configure Tx MAC FIFO
2538 CSR_WRITE_4 (sc, MR_ADDR (port, TX_GMF_CTRL_T), GMF_RST_SET);
2539 CSR_WRITE_4 (sc, MR_ADDR (port, TX_GMF_CTRL_T), GMF_RST_CLR);
2540 CSR_WRITE_4 (sc, MR_ADDR (port, TX_GMF_CTRL_T), GMF_OPER_ON);
2541
2542 // Configure hardware VLAN tag insertion/stripping
2543 msk_setvlan (sc_if);
2544
2545 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
2546 // Set Rx Pause threshould.
2547 CSR_WRITE_2 (sc, MR_ADDR (port, RX_GMF_LP_THR), MSK_ECU_LLPP);
2548 CSR_WRITE_2 (sc, MR_ADDR (port, RX_GMF_UP_THR), MSK_ECU_ULPP);
2549 // Configure store-and-forward for Tx.
2550 msk_set_tx_stfwd (sc_if);
2551 }
2552
2553 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
2554 // Disable dynamic watermark - from Linux
2555 reg = CSR_READ_4 (sc, MR_ADDR (port, TX_GMF_EA));
2556 reg &= ~0x03;
2557 CSR_WRITE_4 (sc, MR_ADDR (port, TX_GMF_EA), reg);
2558 }
2559
2560 //
2561 // Disable Force Sync bit and Alloc bit in Tx RAM interface
2562 // arbiter as we don't use Sync Tx queue.
2563 //
2564 CSR_WRITE_1 (sc, MR_ADDR (port, TXA_CTRL), TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2565 // Enable the RAM Interface Arbiter
2566 CSR_WRITE_1 (sc, MR_ADDR (port, TXA_CTRL), TXA_ENA_ARB);
2567
2568 // Setup RAM buffer
2569 msk_set_rambuffer (sc_if);
2570
2571 // Disable Tx sync Queue
2572 CSR_WRITE_1 (sc, RB_ADDR (sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
2573
2574 // Setup Tx Queue Bus Memory Interface
2575 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
2576 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
2577 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
2578 CSR_WRITE_2 (sc, Q_ADDR (sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
2579 switch (sc->msk_hw_id) {
2580 case CHIP_ID_YUKON_EC_U:
2581 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
2582 // Fix for Yukon-EC Ultra: set BMU FIFO level
2583 CSR_WRITE_2 (sc, Q_ADDR (sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV);
2584 }
2585 break;
2586 case CHIP_ID_YUKON_EX:
2587 //
2588 // Yukon Extreme seems to have silicon bug for
2589 // automatic Tx checksum calculation capability.
2590 //
2591 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) {
2592 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_txq, Q_F), F_TX_CHK_AUTO_OFF);
2593 }
2594 break;
2595 }
2596
2597 // Setup Rx Queue Bus Memory Interface
2598 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
2599 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
2600 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
2601 CSR_WRITE_2 (sc, Q_ADDR (sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
2602 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
2603 // MAC Rx RAM Read is controlled by hardware
2604 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
2605 }
2606
2607 // truncate too-large frames - from linux
2608 CSR_WRITE_4 (sc, MR_ADDR (port, RX_GMF_TR_THR), 0x17a);
2609 CSR_WRITE_4 (sc, MR_ADDR (port, RX_GMF_CTRL_T), RX_TRUNC_ON);
2610
2611 msk_set_prefetch (sc_if, sc_if->msk_txq, sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
2612 msk_init_tx_ring (sc_if);
2613
2614 // Disable Rx checksum offload and RSS hash
2615 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_rxq, Q_CSR), BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
2616 msk_set_prefetch (sc_if, sc_if->msk_rxq, sc_if->msk_rdata.msk_rx_ring_paddr, MSK_RX_RING_CNT - 1);
2617 Status = msk_init_rx_ring (sc_if);
2618 if (EFI_ERROR (Status)) {
2619 DEBUG ((EFI_D_ERROR, "Marvell Yukon: Initialization failed: no memory for Rx buffers\n"));
2620 mskc_stop_if (sc_if);
2621 return Status;
2622 }
2623
2624 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
2625 // Disable flushing of non-ASF packets
2626 CSR_WRITE_4 (sc, MR_ADDR (port, RX_GMF_CTRL_T), GMF_RX_MACSEC_FLUSH_OFF);
2627 }
2628
2629 // Configure interrupt handling
2630 if (port == MSK_PORT_A) {
2631 sc->msk_intrmask |= Y2_IS_PORT_A;
2632 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
2633 } else {
2634 sc->msk_intrmask |= Y2_IS_PORT_B;
2635 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
2636 }
2637 // Configure IRQ moderation mask.
2638 CSR_WRITE_4 (sc, B2_IRQM_MSK, sc->msk_intrmask);
2639 if (sc->msk_int_holdoff > 0) {
2640 // Configure initial IRQ moderation timer value.
2641 CSR_WRITE_4 (sc, B2_IRQM_INI, MSK_USECS (sc, sc->msk_int_holdoff));
2642 CSR_WRITE_4 (sc, B2_IRQM_VAL, MSK_USECS (sc, sc->msk_int_holdoff));
2643 // Start IRQ moderation.
2644 CSR_WRITE_1 (sc, B2_IRQM_CTRL, TIM_START);
2645 }
2646 CSR_WRITE_4 (sc, B0_HWE_IMSK, sc->msk_intrhwemask);
2647 CSR_READ_4 (sc, B0_HWE_IMSK);
2648 CSR_WRITE_4 (sc, B0_IMSK, sc->msk_intrmask);
2649 CSR_READ_4 (sc, B0_IMSK);
2650
2651 sc_if->msk_flags &= ~MSK_FLAG_LINK;
2652 e1000phy_mediachg (sc_if->phy_softc);
2653
2654 return Status;
2655 }
2656
2657 STATIC
2658 VOID
msk_set_rambuffer(struct msk_if_softc * sc_if)2659 msk_set_rambuffer (
2660 struct msk_if_softc *sc_if
2661 )
2662 {
2663 INTN ltpp, utpp;
2664 INTN port;
2665 struct msk_softc *sc;
2666
2667 sc = sc_if->msk_softc;
2668 port = sc_if->msk_md.port;
2669
2670 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
2671 return;
2672
2673 // Setup Rx Queue
2674 CSR_WRITE_1 (sc, RB_ADDR (sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
2675 CSR_WRITE_4 (sc, RB_ADDR (sc_if->msk_rxq, RB_START), sc->msk_rxqstart[port] / 8);
2676 CSR_WRITE_4 (sc, RB_ADDR (sc_if->msk_rxq, RB_END), sc->msk_rxqend[port] / 8);
2677 CSR_WRITE_4 (sc, RB_ADDR (sc_if->msk_rxq, RB_WP), sc->msk_rxqstart[port] / 8);
2678 CSR_WRITE_4 (sc, RB_ADDR (sc_if->msk_rxq, RB_RP), sc->msk_rxqstart[port] / 8);
2679
2680 utpp = (sc->msk_rxqend[port] + 1 - sc->msk_rxqstart[port] - MSK_RB_ULPP) / 8;
2681 ltpp = (sc->msk_rxqend[port] + 1 - sc->msk_rxqstart[port] - MSK_RB_LLPP_B) / 8;
2682 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) {
2683 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
2684 }
2685 CSR_WRITE_4 (sc, RB_ADDR (sc_if->msk_rxq, RB_RX_UTPP), utpp);
2686 CSR_WRITE_4 (sc, RB_ADDR (sc_if->msk_rxq, RB_RX_LTPP), ltpp);
2687 // Set Rx priority (RB_RX_UTHP/RB_RX_LTHP) thresholds?
2688
2689 CSR_WRITE_1 (sc, RB_ADDR (sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
2690 CSR_READ_1 (sc, RB_ADDR (sc_if->msk_rxq, RB_CTRL));
2691
2692 // Setup Tx Queue.
2693 CSR_WRITE_1 (sc, RB_ADDR (sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
2694 CSR_WRITE_4 (sc, RB_ADDR (sc_if->msk_txq, RB_START), sc->msk_txqstart[port] / 8);
2695 CSR_WRITE_4 (sc, RB_ADDR (sc_if->msk_txq, RB_END), sc->msk_txqend[port] / 8);
2696 CSR_WRITE_4 (sc, RB_ADDR (sc_if->msk_txq, RB_WP), sc->msk_txqstart[port] / 8);
2697 CSR_WRITE_4 (sc, RB_ADDR (sc_if->msk_txq, RB_RP), sc->msk_txqstart[port] / 8);
2698
2699 // Enable Store & Forward for Tx side
2700 CSR_WRITE_1 (sc, RB_ADDR (sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
2701 CSR_WRITE_1 (sc, RB_ADDR (sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
2702 CSR_READ_1 (sc, RB_ADDR (sc_if->msk_txq, RB_CTRL));
2703 }
2704
2705 STATIC
2706 VOID
msk_set_prefetch(struct msk_if_softc * sc_if,INTN qaddr,EFI_PHYSICAL_ADDRESS addr,UINT32 count)2707 msk_set_prefetch (
2708 struct msk_if_softc *sc_if,
2709 INTN qaddr,
2710 EFI_PHYSICAL_ADDRESS addr,
2711 UINT32 count
2712 )
2713 {
2714 struct msk_softc *sc;
2715
2716 sc = sc_if->msk_softc;
2717
2718 // Reset the prefetch unit
2719 CSR_WRITE_4 (sc, Y2_PREF_Q_ADDR (qaddr, PREF_UNIT_CTRL_REG), PREF_UNIT_RST_SET);
2720 CSR_WRITE_4 (sc, Y2_PREF_Q_ADDR (qaddr, PREF_UNIT_CTRL_REG), PREF_UNIT_RST_CLR);
2721 // Set LE base address
2722 CSR_WRITE_4 (sc, Y2_PREF_Q_ADDR (qaddr, PREF_UNIT_ADDR_LOW_REG), MSK_ADDR_LO (addr));
2723 CSR_WRITE_4 (sc, Y2_PREF_Q_ADDR (qaddr, PREF_UNIT_ADDR_HI_REG), MSK_ADDR_HI (addr));
2724
2725 // Set the list last index
2726 CSR_WRITE_2 (sc, Y2_PREF_Q_ADDR (qaddr, PREF_UNIT_LAST_IDX_REG), count);
2727 // Turn on prefetch unit
2728 CSR_WRITE_4 (sc, Y2_PREF_Q_ADDR (qaddr, PREF_UNIT_CTRL_REG), PREF_UNIT_OP_ON);
2729 // Dummy read to ensure write
2730 CSR_READ_4 (sc, Y2_PREF_Q_ADDR (qaddr, PREF_UNIT_CTRL_REG));
2731 }
2732
2733 VOID
mskc_stop_if(struct msk_if_softc * sc_if)2734 mskc_stop_if (
2735 struct msk_if_softc *sc_if
2736 )
2737 {
2738 struct msk_txdesc *txd;
2739 struct msk_rxdesc *rxd;
2740 UINT32 val;
2741 INTN i;
2742 INTN port;
2743 EFI_PCI_IO_PROTOCOL *PciIo;
2744 struct msk_softc *sc;
2745
2746 sc = sc_if->msk_softc;
2747 PciIo = sc->PciIo;
2748 port = sc_if->msk_md.port;
2749
2750 // Disable interrupts
2751 if (port == MSK_PORT_A) {
2752 sc->msk_intrmask &= ~Y2_IS_PORT_A;
2753 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
2754 } else {
2755 sc->msk_intrmask &= ~Y2_IS_PORT_B;
2756 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
2757 }
2758 CSR_WRITE_4 (sc, B0_HWE_IMSK, sc->msk_intrhwemask);
2759 CSR_READ_4 (sc, B0_HWE_IMSK);
2760 CSR_WRITE_4 (sc, B0_IMSK, sc->msk_intrmask);
2761 CSR_READ_4 (sc, B0_IMSK);
2762
2763 // Disable Tx/Rx MAC.
2764 val = GMAC_READ_2 (sc, port, GM_GP_CTRL);
2765 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
2766 GMAC_WRITE_2 (sc, port, GM_GP_CTRL, val);
2767 // Read again to ensure writing.
2768 GMAC_READ_2 (sc, port, GM_GP_CTRL);
2769 // Update stats and clear counters
2770 msk_stats_update (sc_if);
2771
2772 // Stop Tx BMU
2773 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_txq, Q_CSR), BMU_STOP);
2774 val = CSR_READ_4 (sc, Q_ADDR (sc_if->msk_txq, Q_CSR));
2775 for (i = 0; i < MSK_TIMEOUT; i++) {
2776 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
2777 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_txq, Q_CSR), BMU_STOP);
2778 val = CSR_READ_4 (sc, Q_ADDR (sc_if->msk_txq, Q_CSR));
2779 } else {
2780 break;
2781 }
2782 gBS->Stall (1);
2783 }
2784 if (i == MSK_TIMEOUT) {
2785 DEBUG ((EFI_D_NET, "Marvell Yukon: Tx BMU stop failed\n"));
2786 }
2787 CSR_WRITE_1 (sc, RB_ADDR (sc_if->msk_txq, RB_CTRL), RB_RST_SET | RB_DIS_OP_MD);
2788
2789 // Disable all GMAC interrupt.
2790 CSR_WRITE_1 (sc, MR_ADDR (port, GMAC_IRQ_MSK), 0);
2791 // Disable PHY interrupt. */
2792 msk_phy_writereg (sc_if, PHY_MARV_INT_MASK, 0);
2793
2794 // Disable the RAM Interface Arbiter.
2795 CSR_WRITE_1 (sc, MR_ADDR (port, TXA_CTRL), TXA_DIS_ARB);
2796
2797 // Reset the PCI FIFO of the async Tx queue
2798 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_txq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
2799
2800 // Reset the Tx prefetch units
2801 CSR_WRITE_4 (sc, Y2_PREF_Q_ADDR (sc_if->msk_txq, PREF_UNIT_CTRL_REG), PREF_UNIT_RST_SET);
2802
2803 // Reset the RAM Buffer async Tx queue
2804 CSR_WRITE_1 (sc, RB_ADDR (sc_if->msk_txq, RB_CTRL), RB_RST_SET);
2805
2806 // Reset Tx MAC FIFO.
2807 CSR_WRITE_4 (sc, MR_ADDR (port, TX_GMF_CTRL_T), GMF_RST_SET);
2808 // Set Pause Off.
2809 CSR_WRITE_4 (sc, MR_ADDR (port, GMAC_CTRL), GMC_PAUSE_OFF);
2810
2811 /*
2812 * The Rx Stop command will not work for Yukon-2 if the BMU does not
2813 * reach the end of packet and since we can't make sure that we have
2814 * incoming data, we must reset the BMU while it is not during a DMA
2815 * transfer. Since it is possible that the Rx path is still active,
2816 * the Rx RAM buffer will be stopped first, so any possible incoming
2817 * data will not trigger a DMA. After the RAM buffer is stopped, the
2818 * BMU is polled until any DMA in progress is ended and only then it
2819 * will be reset.
2820 */
2821
2822 // Disable the RAM Buffer receive queue
2823 CSR_WRITE_1 (sc, RB_ADDR (sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
2824 for (i = 0; i < MSK_TIMEOUT; i++) {
2825 if (CSR_READ_1 (sc, RB_ADDR (sc_if->msk_rxq, Q_RSL)) == CSR_READ_1 (sc, RB_ADDR (sc_if->msk_rxq, Q_RL))) {
2826 break;
2827 }
2828 gBS->Stall (1);
2829 }
2830 if (i == MSK_TIMEOUT) {
2831 DEBUG ((EFI_D_NET, "Marvell Yukon: Rx BMU stop failed\n"));
2832 }
2833 CSR_WRITE_4 (sc, Q_ADDR (sc_if->msk_rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
2834 // Reset the Rx prefetch unit.
2835 CSR_WRITE_4 (sc, Y2_PREF_Q_ADDR (sc_if->msk_rxq, PREF_UNIT_CTRL_REG), PREF_UNIT_RST_SET);
2836 // Reset the RAM Buffer receive queue.
2837 CSR_WRITE_1 (sc, RB_ADDR (sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
2838 // Reset Rx MAC FIFO.
2839 CSR_WRITE_4 (sc, MR_ADDR (port, RX_GMF_CTRL_T), GMF_RST_SET);
2840
2841 // Free Rx and Tx mbufs still in the queues
2842 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2843 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2844 if (rxd->rx_m.Buf != NULL) {
2845 PciIo->Unmap (PciIo, rxd->rx_m.DmaMapping);
2846 if(rxd->rx_m.Buf != NULL) {
2847 gBS->FreePool (rxd->rx_m.Buf);
2848 rxd->rx_m.Buf = NULL;
2849 }
2850 gBS->SetMem (&(rxd->rx_m), sizeof (MSK_DMA_BUF), 0);
2851 }
2852 }
2853
2854 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2855 txd = &sc_if->msk_cdata.msk_txdesc[i];
2856 if (txd->tx_m.Buf != NULL) {
2857 PciIo->Unmap (PciIo, txd->tx_m.DmaMapping);
2858 gBS->SetMem (&(txd->tx_m), sizeof (MSK_DMA_BUF), 0);
2859 // We don't own the transmit buffers so don't free them
2860 }
2861 }
2862
2863 /*
2864 * Mark the interface down.
2865 */
2866 sc_if->msk_flags &= ~MSK_FLAG_LINK;
2867 }
2868
2869 /*
2870 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
2871 * counter clears high 16 bits of the counter such that accessing
2872 * lower 16 bits should be the last operation.
2873 */
2874 #define MSK_READ_MIB32(x, y) (((UINT32)GMAC_READ_2 (sc, x, (y) + 4)) << 16) + (UINT32)GMAC_READ_2 (sc, x, y)
2875 #define MSK_READ_MIB64(x, y) (((UINT64)MSK_READ_MIB32 (x, (y) + 8)) << 32) + (UINT64)MSK_READ_MIB32 (x, y)
2876
2877 static
2878 VOID
msk_stats_clear(struct msk_if_softc * sc_if)2879 msk_stats_clear (
2880 struct msk_if_softc *sc_if
2881 )
2882 {
2883 UINT16 gmac;
2884 INTN val;
2885 INTN i;
2886 INTN port;
2887 struct msk_softc *sc;
2888
2889 sc = sc_if->msk_softc;
2890 port = sc_if->msk_md.port;
2891
2892 // Set MIB Clear Counter Mode.
2893 gmac = GMAC_READ_2 (sc, port, GM_PHY_ADDR);
2894 GMAC_WRITE_2 (sc, port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
2895 // Read all MIB Counters with Clear Mode set
2896 for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof (UINT32)) {
2897 val = MSK_READ_MIB32 (port, i);
2898 if (val); //Workaround: to prevent the GCC error: 'value computed is not used'
2899 }
2900 // Clear MIB Clear Counter Mode
2901 gmac &= ~GM_PAR_MIB_CLR;
2902 GMAC_WRITE_2 (sc, port, GM_PHY_ADDR, gmac);
2903 }
2904
2905 static
2906 VOID
msk_stats_update(struct msk_if_softc * sc_if)2907 msk_stats_update (
2908 struct msk_if_softc *sc_if
2909 )
2910 {
2911 struct msk_hw_stats *stats;
2912 UINT16 gmac;
2913 INTN val;
2914 INTN port;
2915 struct msk_softc *sc;
2916
2917 sc = sc_if->msk_softc;
2918 port = sc_if->msk_md.port;
2919 stats = &sc_if->msk_stats;
2920 /* Set MIB Clear Counter Mode. */
2921 gmac = GMAC_READ_2 (sc, port, GM_PHY_ADDR);
2922 GMAC_WRITE_2 (sc, port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
2923
2924 /* Rx stats. */
2925 stats->rx_ucast_frames += MSK_READ_MIB32 (port, GM_RXF_UC_OK);
2926 stats->rx_bcast_frames += MSK_READ_MIB32 (port, GM_RXF_BC_OK);
2927 stats->rx_pause_frames += MSK_READ_MIB32 (port, GM_RXF_MPAUSE);
2928 stats->rx_mcast_frames += MSK_READ_MIB32 (port, GM_RXF_MC_OK);
2929 stats->rx_crc_errs += MSK_READ_MIB32 (port, GM_RXF_FCS_ERR);
2930 val = MSK_READ_MIB32 (port, GM_RXF_SPARE1);
2931 stats->rx_good_octets += MSK_READ_MIB64 (port, GM_RXO_OK_LO);
2932 stats->rx_bad_octets += MSK_READ_MIB64 (port, GM_RXO_ERR_LO);
2933 stats->rx_runts += MSK_READ_MIB32 (port, GM_RXF_SHT);
2934 stats->rx_runt_errs += MSK_READ_MIB32 (port, GM_RXE_FRAG);
2935 stats->rx_pkts_64 += MSK_READ_MIB32 (port, GM_RXF_64B);
2936 stats->rx_pkts_65_127 += MSK_READ_MIB32 (port, GM_RXF_127B);
2937 stats->rx_pkts_128_255 += MSK_READ_MIB32 (port, GM_RXF_255B);
2938 stats->rx_pkts_256_511 += MSK_READ_MIB32 (port, GM_RXF_511B);
2939 stats->rx_pkts_512_1023 += MSK_READ_MIB32 (port, GM_RXF_1023B);
2940 stats->rx_pkts_1024_1518 += MSK_READ_MIB32 (port, GM_RXF_1518B);
2941 stats->rx_pkts_1519_max += MSK_READ_MIB32 (port, GM_RXF_MAX_SZ);
2942 stats->rx_pkts_too_long += MSK_READ_MIB32 (port, GM_RXF_LNG_ERR);
2943 stats->rx_pkts_jabbers += MSK_READ_MIB32 (port, GM_RXF_JAB_PKT);
2944 val = MSK_READ_MIB32 (port, GM_RXF_SPARE2);
2945 stats->rx_fifo_oflows += MSK_READ_MIB32 (port, GM_RXE_FIFO_OV);
2946 val = MSK_READ_MIB32 (port, GM_RXF_SPARE3);
2947
2948 /* Tx stats. */
2949 stats->tx_ucast_frames += MSK_READ_MIB32 (port, GM_TXF_UC_OK);
2950 stats->tx_bcast_frames += MSK_READ_MIB32 (port, GM_TXF_BC_OK);
2951 stats->tx_pause_frames += MSK_READ_MIB32 (port, GM_TXF_MPAUSE);
2952 stats->tx_mcast_frames += MSK_READ_MIB32 (port, GM_TXF_MC_OK);
2953 stats->tx_octets += MSK_READ_MIB64 (port, GM_TXO_OK_LO);
2954 stats->tx_pkts_64 += MSK_READ_MIB32 (port, GM_TXF_64B);
2955 stats->tx_pkts_65_127 += MSK_READ_MIB32 (port, GM_TXF_127B);
2956 stats->tx_pkts_128_255 += MSK_READ_MIB32 (port, GM_TXF_255B);
2957 stats->tx_pkts_256_511 += MSK_READ_MIB32 (port, GM_TXF_511B);
2958 stats->tx_pkts_512_1023 += MSK_READ_MIB32 (port, GM_TXF_1023B);
2959 stats->tx_pkts_1024_1518 += MSK_READ_MIB32 (port, GM_TXF_1518B);
2960 stats->tx_pkts_1519_max += MSK_READ_MIB32 (port, GM_TXF_MAX_SZ);
2961 val = MSK_READ_MIB32 (port, GM_TXF_SPARE1);
2962 stats->tx_colls += MSK_READ_MIB32 (port, GM_TXF_COL);
2963 stats->tx_late_colls += MSK_READ_MIB32 (port, GM_TXF_LAT_COL);
2964 stats->tx_excess_colls += MSK_READ_MIB32 (port, GM_TXF_ABO_COL);
2965 stats->tx_multi_colls += MSK_READ_MIB32 (port, GM_TXF_MUL_COL);
2966 stats->tx_single_colls += MSK_READ_MIB32 (port, GM_TXF_SNG_COL);
2967 stats->tx_underflows += MSK_READ_MIB32 (port, GM_TXE_FIFO_UR);
2968
2969 if (val); //Workaround: to prevent the GCC error: 'value computed is not used'
2970
2971 /* Clear MIB Clear Counter Mode. */
2972 gmac &= ~GM_PAR_MIB_CLR;
2973 GMAC_WRITE_2 (sc, port, GM_PHY_ADDR, gmac);
2974 }
2975
2976 #undef MSK_READ_MIB32
2977 #undef MSK_READ_MIB64
2978