1 // SPDX-License-Identifier: GPL-2.0
2 /* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
3 * auto carrier detecting ethernet driver. Also known as the
4 * "Happy Meal Ethernet" found on SunSwift SBUS cards.
5 *
6 * Copyright (C) 1996, 1998, 1999, 2002, 2003,
7 * 2006, 2008 David S. Miller (davem@davemloft.net)
8 *
9 * Changes :
10 * 2000/11/11 Willy Tarreau <willy AT meta-x.org>
11 * - port to non-sparc architectures. Tested only on x86 and
12 * only currently works with QFE PCI cards.
13 * - ability to specify the MAC address at module load time by passing this
14 * argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50
15 */
16
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/fcntl.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/in.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/crc32.h>
31 #include <linux/random.h>
32 #include <linux/errno.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/mm.h>
37 #include <linux/bitops.h>
38 #include <linux/dma-mapping.h>
39
40 #include <asm/io.h>
41 #include <asm/dma.h>
42 #include <asm/byteorder.h>
43
44 #ifdef CONFIG_SPARC
45 #include <linux/of.h>
46 #include <linux/of_device.h>
47 #include <asm/idprom.h>
48 #include <asm/openprom.h>
49 #include <asm/oplib.h>
50 #include <asm/prom.h>
51 #include <asm/auxio.h>
52 #endif
53 #include <linux/uaccess.h>
54
55 #include <asm/irq.h>
56
57 #ifdef CONFIG_PCI
58 #include <linux/pci.h>
59 #endif
60
61 #include "sunhme.h"
62
63 #define DRV_NAME "sunhme"
64
65 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
66 MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
67 MODULE_LICENSE("GPL");
68
69 static int macaddr[6];
70
71 /* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
72 module_param_array(macaddr, int, NULL, 0);
73 MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
74
75 #ifdef CONFIG_SBUS
76 static struct quattro *qfe_sbus_list;
77 #endif
78
79 #ifdef CONFIG_PCI
80 static struct quattro *qfe_pci_list;
81 #endif
82
83 #define hme_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
84 #define HMD hme_debug
85
86 /* "Auto Switch Debug" aka phy debug */
87 #if 1
88 #define ASD hme_debug
89 #else
90 #define ASD(...)
91 #endif
92
93 #if 0
94 struct hme_tx_logent {
95 unsigned int tstamp;
96 int tx_new, tx_old;
97 unsigned int action;
98 #define TXLOG_ACTION_IRQ 0x01
99 #define TXLOG_ACTION_TXMIT 0x02
100 #define TXLOG_ACTION_TBUSY 0x04
101 #define TXLOG_ACTION_NBUFS 0x08
102 unsigned int status;
103 };
104 #define TX_LOG_LEN 128
105 static struct hme_tx_logent tx_log[TX_LOG_LEN];
106 static int txlog_cur_entry;
107 static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
108 {
109 struct hme_tx_logent *tlp;
110 unsigned long flags;
111
112 local_irq_save(flags);
113 tlp = &tx_log[txlog_cur_entry];
114 tlp->tstamp = (unsigned int)jiffies;
115 tlp->tx_new = hp->tx_new;
116 tlp->tx_old = hp->tx_old;
117 tlp->action = a;
118 tlp->status = s;
119 txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
120 local_irq_restore(flags);
121 }
122 static __inline__ void tx_dump_log(void)
123 {
124 int i, this;
125
126 this = txlog_cur_entry;
127 for (i = 0; i < TX_LOG_LEN; i++) {
128 pr_err("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
129 tx_log[this].tstamp,
130 tx_log[this].tx_new, tx_log[this].tx_old,
131 tx_log[this].action, tx_log[this].status);
132 this = (this + 1) & (TX_LOG_LEN - 1);
133 }
134 }
135 #else
136 #define tx_add_log(hp, a, s)
137 #define tx_dump_log()
138 #endif
139
140 #define DEFAULT_IPG0 16 /* For lance-mode only */
141 #define DEFAULT_IPG1 8 /* For all modes */
142 #define DEFAULT_IPG2 4 /* For all modes */
143 #define DEFAULT_JAMSIZE 4 /* Toe jam */
144
145 /* NOTE: In the descriptor writes one _must_ write the address
146 * member _first_. The card must not be allowed to see
147 * the updated descriptor flags until the address is
148 * correct. I've added a write memory barrier between
149 * the two stores so that I can sleep well at night... -DaveM
150 */
151
152 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
sbus_hme_write32(void __iomem * reg,u32 val)153 static void sbus_hme_write32(void __iomem *reg, u32 val)
154 {
155 sbus_writel(val, reg);
156 }
157
sbus_hme_read32(void __iomem * reg)158 static u32 sbus_hme_read32(void __iomem *reg)
159 {
160 return sbus_readl(reg);
161 }
162
sbus_hme_write_rxd(struct happy_meal_rxd * rxd,u32 flags,u32 addr)163 static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
164 {
165 rxd->rx_addr = (__force hme32)addr;
166 dma_wmb();
167 rxd->rx_flags = (__force hme32)flags;
168 }
169
sbus_hme_write_txd(struct happy_meal_txd * txd,u32 flags,u32 addr)170 static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
171 {
172 txd->tx_addr = (__force hme32)addr;
173 dma_wmb();
174 txd->tx_flags = (__force hme32)flags;
175 }
176
sbus_hme_read_desc32(hme32 * p)177 static u32 sbus_hme_read_desc32(hme32 *p)
178 {
179 return (__force u32)*p;
180 }
181
pci_hme_write32(void __iomem * reg,u32 val)182 static void pci_hme_write32(void __iomem *reg, u32 val)
183 {
184 writel(val, reg);
185 }
186
pci_hme_read32(void __iomem * reg)187 static u32 pci_hme_read32(void __iomem *reg)
188 {
189 return readl(reg);
190 }
191
pci_hme_write_rxd(struct happy_meal_rxd * rxd,u32 flags,u32 addr)192 static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
193 {
194 rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
195 dma_wmb();
196 rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
197 }
198
pci_hme_write_txd(struct happy_meal_txd * txd,u32 flags,u32 addr)199 static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
200 {
201 txd->tx_addr = (__force hme32)cpu_to_le32(addr);
202 dma_wmb();
203 txd->tx_flags = (__force hme32)cpu_to_le32(flags);
204 }
205
pci_hme_read_desc32(hme32 * p)206 static u32 pci_hme_read_desc32(hme32 *p)
207 {
208 return le32_to_cpup((__le32 *)p);
209 }
210
211 #define hme_write32(__hp, __reg, __val) \
212 ((__hp)->write32((__reg), (__val)))
213 #define hme_read32(__hp, __reg) \
214 ((__hp)->read32(__reg))
215 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
216 ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
217 #define hme_write_txd(__hp, __txd, __flags, __addr) \
218 ((__hp)->write_txd((__txd), (__flags), (__addr)))
219 #define hme_read_desc32(__hp, __p) \
220 ((__hp)->read_desc32(__p))
221 #else
222 #ifdef CONFIG_SBUS
223 /* SBUS only compilation */
224 #define hme_write32(__hp, __reg, __val) \
225 sbus_writel((__val), (__reg))
226 #define hme_read32(__hp, __reg) \
227 sbus_readl(__reg)
228 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
229 do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
230 dma_wmb(); \
231 (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
232 } while(0)
233 #define hme_write_txd(__hp, __txd, __flags, __addr) \
234 do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
235 dma_wmb(); \
236 (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
237 } while(0)
238 #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
239 #else
240 /* PCI only compilation */
241 #define hme_write32(__hp, __reg, __val) \
242 writel((__val), (__reg))
243 #define hme_read32(__hp, __reg) \
244 readl(__reg)
245 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
246 do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
247 dma_wmb(); \
248 (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
249 } while(0)
250 #define hme_write_txd(__hp, __txd, __flags, __addr) \
251 do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
252 dma_wmb(); \
253 (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
254 } while(0)
hme_read_desc32(struct happy_meal * hp,hme32 * p)255 static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
256 {
257 return le32_to_cpup((__le32 *)p);
258 }
259 #endif
260 #endif
261
262
263 /* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */
BB_PUT_BIT(struct happy_meal * hp,void __iomem * tregs,int bit)264 static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
265 {
266 hme_write32(hp, tregs + TCVR_BBDATA, bit);
267 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
268 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
269 }
270
271 #if 0
272 static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
273 {
274 u32 ret;
275
276 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
277 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
278 ret = hme_read32(hp, tregs + TCVR_CFG);
279 if (internal)
280 ret &= TCV_CFG_MDIO0;
281 else
282 ret &= TCV_CFG_MDIO1;
283
284 return ret;
285 }
286 #endif
287
BB_GET_BIT2(struct happy_meal * hp,void __iomem * tregs,int internal)288 static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
289 {
290 u32 retval;
291
292 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
293 udelay(1);
294 retval = hme_read32(hp, tregs + TCVR_CFG);
295 if (internal)
296 retval &= TCV_CFG_MDIO0;
297 else
298 retval &= TCV_CFG_MDIO1;
299 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
300
301 return retval;
302 }
303
304 #define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */
305
happy_meal_bb_read(struct happy_meal * hp,void __iomem * tregs,int reg)306 static int happy_meal_bb_read(struct happy_meal *hp,
307 void __iomem *tregs, int reg)
308 {
309 u32 tmp;
310 int retval = 0;
311 int i;
312
313 /* Enable the MIF BitBang outputs. */
314 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
315
316 /* Force BitBang into the idle state. */
317 for (i = 0; i < 32; i++)
318 BB_PUT_BIT(hp, tregs, 1);
319
320 /* Give it the read sequence. */
321 BB_PUT_BIT(hp, tregs, 0);
322 BB_PUT_BIT(hp, tregs, 1);
323 BB_PUT_BIT(hp, tregs, 1);
324 BB_PUT_BIT(hp, tregs, 0);
325
326 /* Give it the PHY address. */
327 tmp = hp->paddr & 0xff;
328 for (i = 4; i >= 0; i--)
329 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
330
331 /* Tell it what register we want to read. */
332 tmp = (reg & 0xff);
333 for (i = 4; i >= 0; i--)
334 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
335
336 /* Close down the MIF BitBang outputs. */
337 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
338
339 /* Now read in the value. */
340 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
341 for (i = 15; i >= 0; i--)
342 retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
343 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
344 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
345 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
346 ASD("reg=%d value=%x\n", reg, retval);
347 return retval;
348 }
349
happy_meal_bb_write(struct happy_meal * hp,void __iomem * tregs,int reg,unsigned short value)350 static void happy_meal_bb_write(struct happy_meal *hp,
351 void __iomem *tregs, int reg,
352 unsigned short value)
353 {
354 u32 tmp;
355 int i;
356
357 ASD("reg=%d value=%x\n", reg, value);
358
359 /* Enable the MIF BitBang outputs. */
360 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
361
362 /* Force BitBang into the idle state. */
363 for (i = 0; i < 32; i++)
364 BB_PUT_BIT(hp, tregs, 1);
365
366 /* Give it write sequence. */
367 BB_PUT_BIT(hp, tregs, 0);
368 BB_PUT_BIT(hp, tregs, 1);
369 BB_PUT_BIT(hp, tregs, 0);
370 BB_PUT_BIT(hp, tregs, 1);
371
372 /* Give it the PHY address. */
373 tmp = (hp->paddr & 0xff);
374 for (i = 4; i >= 0; i--)
375 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
376
377 /* Tell it what register we will be writing. */
378 tmp = (reg & 0xff);
379 for (i = 4; i >= 0; i--)
380 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
381
382 /* Tell it to become ready for the bits. */
383 BB_PUT_BIT(hp, tregs, 1);
384 BB_PUT_BIT(hp, tregs, 0);
385
386 for (i = 15; i >= 0; i--)
387 BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
388
389 /* Close down the MIF BitBang outputs. */
390 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
391 }
392
393 #define TCVR_READ_TRIES 16
394
happy_meal_tcvr_read(struct happy_meal * hp,void __iomem * tregs,int reg)395 static int happy_meal_tcvr_read(struct happy_meal *hp,
396 void __iomem *tregs, int reg)
397 {
398 int tries = TCVR_READ_TRIES;
399 int retval;
400
401 if (hp->tcvr_type == none) {
402 ASD("no transceiver, value=TCVR_FAILURE\n");
403 return TCVR_FAILURE;
404 }
405
406 if (!(hp->happy_flags & HFLAG_FENABLE)) {
407 ASD("doing bit bang\n");
408 return happy_meal_bb_read(hp, tregs, reg);
409 }
410
411 hme_write32(hp, tregs + TCVR_FRAME,
412 (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
413 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
414 udelay(20);
415 if (!tries) {
416 netdev_err(hp->dev, "Aieee, transceiver MIF read bolixed\n");
417 return TCVR_FAILURE;
418 }
419 retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
420 ASD("reg=0x%02x value=%04x\n", reg, retval);
421 return retval;
422 }
423
424 #define TCVR_WRITE_TRIES 16
425
happy_meal_tcvr_write(struct happy_meal * hp,void __iomem * tregs,int reg,unsigned short value)426 static void happy_meal_tcvr_write(struct happy_meal *hp,
427 void __iomem *tregs, int reg,
428 unsigned short value)
429 {
430 int tries = TCVR_WRITE_TRIES;
431
432 ASD("reg=0x%02x value=%04x\n", reg, value);
433
434 /* Welcome to Sun Microsystems, can I take your order please? */
435 if (!(hp->happy_flags & HFLAG_FENABLE)) {
436 happy_meal_bb_write(hp, tregs, reg, value);
437 return;
438 }
439
440 /* Would you like fries with that? */
441 hme_write32(hp, tregs + TCVR_FRAME,
442 (FRAME_WRITE | (hp->paddr << 23) |
443 ((reg & 0xff) << 18) | (value & 0xffff)));
444 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
445 udelay(20);
446
447 /* Anything else? */
448 if (!tries)
449 netdev_err(hp->dev, "Aieee, transceiver MIF write bolixed\n");
450
451 /* Fifty-two cents is your change, have a nice day. */
452 }
453
454 /* Auto negotiation. The scheme is very simple. We have a timer routine
455 * that keeps watching the auto negotiation process as it progresses.
456 * The DP83840 is first told to start doing it's thing, we set up the time
457 * and place the timer state machine in it's initial state.
458 *
459 * Here the timer peeks at the DP83840 status registers at each click to see
460 * if the auto negotiation has completed, we assume here that the DP83840 PHY
461 * will time out at some point and just tell us what (didn't) happen. For
462 * complete coverage we only allow so many of the ticks at this level to run,
463 * when this has expired we print a warning message and try another strategy.
464 * This "other" strategy is to force the interface into various speed/duplex
465 * configurations and we stop when we see a link-up condition before the
466 * maximum number of "peek" ticks have occurred.
467 *
468 * Once a valid link status has been detected we configure the BigMAC and
469 * the rest of the Happy Meal to speak the most efficient protocol we could
470 * get a clean link for. The priority for link configurations, highest first
471 * is:
472 * 100 Base-T Full Duplex
473 * 100 Base-T Half Duplex
474 * 10 Base-T Full Duplex
475 * 10 Base-T Half Duplex
476 *
477 * We start a new timer now, after a successful auto negotiation status has
478 * been detected. This timer just waits for the link-up bit to get set in
479 * the BMCR of the DP83840. When this occurs we print a kernel log message
480 * describing the link type in use and the fact that it is up.
481 *
482 * If a fatal error of some sort is signalled and detected in the interrupt
483 * service routine, and the chip is reset, or the link is ifconfig'd down
484 * and then back up, this entire process repeats itself all over again.
485 */
try_next_permutation(struct happy_meal * hp,void __iomem * tregs)486 static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
487 {
488 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
489
490 /* Downgrade from full to half duplex. Only possible
491 * via ethtool.
492 */
493 if (hp->sw_bmcr & BMCR_FULLDPLX) {
494 hp->sw_bmcr &= ~(BMCR_FULLDPLX);
495 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
496 return 0;
497 }
498
499 /* Downgrade from 100 to 10. */
500 if (hp->sw_bmcr & BMCR_SPEED100) {
501 hp->sw_bmcr &= ~(BMCR_SPEED100);
502 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
503 return 0;
504 }
505
506 /* We've tried everything. */
507 return -1;
508 }
509
display_link_mode(struct happy_meal * hp,void __iomem * tregs)510 static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
511 {
512 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
513
514 netdev_info(hp->dev,
515 "Link is up using %s transceiver at %dMb/s, %s Duplex.\n",
516 hp->tcvr_type == external ? "external" : "internal",
517 hp->sw_lpa & (LPA_100HALF | LPA_100FULL) ? 100 : 10,
518 hp->sw_lpa & (LPA_100FULL | LPA_10FULL) ? "Full" : "Half");
519 }
520
display_forced_link_mode(struct happy_meal * hp,void __iomem * tregs)521 static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
522 {
523 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
524
525 netdev_info(hp->dev,
526 "Link has been forced up using %s transceiver at %dMb/s, %s Duplex.\n",
527 hp->tcvr_type == external ? "external" : "internal",
528 hp->sw_bmcr & BMCR_SPEED100 ? 100 : 10,
529 hp->sw_bmcr & BMCR_FULLDPLX ? "Full" : "Half");
530 }
531
set_happy_link_modes(struct happy_meal * hp,void __iomem * tregs)532 static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
533 {
534 int full;
535
536 /* All we care about is making sure the bigmac tx_cfg has a
537 * proper duplex setting.
538 */
539 if (hp->timer_state == arbwait) {
540 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
541 if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
542 goto no_response;
543 if (hp->sw_lpa & LPA_100FULL)
544 full = 1;
545 else if (hp->sw_lpa & LPA_100HALF)
546 full = 0;
547 else if (hp->sw_lpa & LPA_10FULL)
548 full = 1;
549 else
550 full = 0;
551 } else {
552 /* Forcing a link mode. */
553 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
554 if (hp->sw_bmcr & BMCR_FULLDPLX)
555 full = 1;
556 else
557 full = 0;
558 }
559
560 /* Before changing other bits in the tx_cfg register, and in
561 * general any of other the TX config registers too, you
562 * must:
563 * 1) Clear Enable
564 * 2) Poll with reads until that bit reads back as zero
565 * 3) Make TX configuration changes
566 * 4) Set Enable once more
567 */
568 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
569 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
570 ~(BIGMAC_TXCFG_ENABLE));
571 while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
572 barrier();
573 if (full) {
574 hp->happy_flags |= HFLAG_FULL;
575 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
576 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
577 BIGMAC_TXCFG_FULLDPLX);
578 } else {
579 hp->happy_flags &= ~(HFLAG_FULL);
580 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
581 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
582 ~(BIGMAC_TXCFG_FULLDPLX));
583 }
584 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
585 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
586 BIGMAC_TXCFG_ENABLE);
587 return 0;
588 no_response:
589 return 1;
590 }
591
592 static int happy_meal_init(struct happy_meal *hp);
593
is_lucent_phy(struct happy_meal * hp)594 static int is_lucent_phy(struct happy_meal *hp)
595 {
596 void __iomem *tregs = hp->tcvregs;
597 unsigned short mr2, mr3;
598 int ret = 0;
599
600 mr2 = happy_meal_tcvr_read(hp, tregs, 2);
601 mr3 = happy_meal_tcvr_read(hp, tregs, 3);
602 if ((mr2 & 0xffff) == 0x0180 &&
603 ((mr3 & 0xffff) >> 10) == 0x1d)
604 ret = 1;
605
606 return ret;
607 }
608
happy_meal_timer(struct timer_list * t)609 static void happy_meal_timer(struct timer_list *t)
610 {
611 struct happy_meal *hp = from_timer(hp, t, happy_timer);
612 void __iomem *tregs = hp->tcvregs;
613 int restart_timer = 0;
614
615 spin_lock_irq(&hp->happy_lock);
616
617 hp->timer_ticks++;
618 switch(hp->timer_state) {
619 case arbwait:
620 /* Only allow for 5 ticks, thats 10 seconds and much too
621 * long to wait for arbitration to complete.
622 */
623 if (hp->timer_ticks >= 10) {
624 /* Enter force mode. */
625 do_force_mode:
626 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
627 netdev_notice(hp->dev,
628 "Auto-Negotiation unsuccessful, trying force link mode\n");
629 hp->sw_bmcr = BMCR_SPEED100;
630 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
631
632 if (!is_lucent_phy(hp)) {
633 /* OK, seems we need do disable the transceiver for the first
634 * tick to make sure we get an accurate link state at the
635 * second tick.
636 */
637 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
638 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
639 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
640 }
641 hp->timer_state = ltrywait;
642 hp->timer_ticks = 0;
643 restart_timer = 1;
644 } else {
645 /* Anything interesting happen? */
646 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
647 if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
648 int ret;
649
650 /* Just what we've been waiting for... */
651 ret = set_happy_link_modes(hp, tregs);
652 if (ret) {
653 /* Ooops, something bad happened, go to force
654 * mode.
655 *
656 * XXX Broken hubs which don't support 802.3u
657 * XXX auto-negotiation make this happen as well.
658 */
659 goto do_force_mode;
660 }
661
662 /* Success, at least so far, advance our state engine. */
663 hp->timer_state = lupwait;
664 restart_timer = 1;
665 } else {
666 restart_timer = 1;
667 }
668 }
669 break;
670
671 case lupwait:
672 /* Auto negotiation was successful and we are awaiting a
673 * link up status. I have decided to let this timer run
674 * forever until some sort of error is signalled, reporting
675 * a message to the user at 10 second intervals.
676 */
677 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
678 if (hp->sw_bmsr & BMSR_LSTATUS) {
679 /* Wheee, it's up, display the link mode in use and put
680 * the timer to sleep.
681 */
682 display_link_mode(hp, tregs);
683 hp->timer_state = asleep;
684 restart_timer = 0;
685 } else {
686 if (hp->timer_ticks >= 10) {
687 netdev_notice(hp->dev,
688 "Auto negotiation successful, link still not completely up.\n");
689 hp->timer_ticks = 0;
690 restart_timer = 1;
691 } else {
692 restart_timer = 1;
693 }
694 }
695 break;
696
697 case ltrywait:
698 /* Making the timeout here too long can make it take
699 * annoyingly long to attempt all of the link mode
700 * permutations, but then again this is essentially
701 * error recovery code for the most part.
702 */
703 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
704 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
705 if (hp->timer_ticks == 1) {
706 if (!is_lucent_phy(hp)) {
707 /* Re-enable transceiver, we'll re-enable the transceiver next
708 * tick, then check link state on the following tick.
709 */
710 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
711 happy_meal_tcvr_write(hp, tregs,
712 DP83840_CSCONFIG, hp->sw_csconfig);
713 }
714 restart_timer = 1;
715 break;
716 }
717 if (hp->timer_ticks == 2) {
718 if (!is_lucent_phy(hp)) {
719 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
720 happy_meal_tcvr_write(hp, tregs,
721 DP83840_CSCONFIG, hp->sw_csconfig);
722 }
723 restart_timer = 1;
724 break;
725 }
726 if (hp->sw_bmsr & BMSR_LSTATUS) {
727 /* Force mode selection success. */
728 display_forced_link_mode(hp, tregs);
729 set_happy_link_modes(hp, tregs); /* XXX error? then what? */
730 hp->timer_state = asleep;
731 restart_timer = 0;
732 } else {
733 if (hp->timer_ticks >= 4) { /* 6 seconds or so... */
734 int ret;
735
736 ret = try_next_permutation(hp, tregs);
737 if (ret == -1) {
738 /* Aieee, tried them all, reset the
739 * chip and try all over again.
740 */
741
742 /* Let the user know... */
743 netdev_notice(hp->dev,
744 "Link down, cable problem?\n");
745
746 ret = happy_meal_init(hp);
747 if (ret) {
748 /* ho hum... */
749 netdev_err(hp->dev,
750 "Error, cannot re-init the Happy Meal.\n");
751 }
752 goto out;
753 }
754 if (!is_lucent_phy(hp)) {
755 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
756 DP83840_CSCONFIG);
757 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
758 happy_meal_tcvr_write(hp, tregs,
759 DP83840_CSCONFIG, hp->sw_csconfig);
760 }
761 hp->timer_ticks = 0;
762 restart_timer = 1;
763 } else {
764 restart_timer = 1;
765 }
766 }
767 break;
768
769 case asleep:
770 default:
771 /* Can't happens.... */
772 netdev_err(hp->dev,
773 "Aieee, link timer is asleep but we got one anyways!\n");
774 restart_timer = 0;
775 hp->timer_ticks = 0;
776 hp->timer_state = asleep; /* foo on you */
777 break;
778 }
779
780 if (restart_timer) {
781 hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
782 add_timer(&hp->happy_timer);
783 }
784
785 out:
786 spin_unlock_irq(&hp->happy_lock);
787 }
788
789 #define TX_RESET_TRIES 32
790 #define RX_RESET_TRIES 32
791
792 /* hp->happy_lock must be held */
happy_meal_tx_reset(struct happy_meal * hp,void __iomem * bregs)793 static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
794 {
795 int tries = TX_RESET_TRIES;
796
797 HMD("reset...\n");
798
799 /* Would you like to try our SMCC Delux? */
800 hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
801 while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
802 udelay(20);
803
804 /* Lettuce, tomato, buggy hardware (no extra charge)? */
805 if (!tries)
806 netdev_err(hp->dev, "Transceiver BigMac ATTACK!");
807
808 /* Take care. */
809 HMD("done\n");
810 }
811
812 /* hp->happy_lock must be held */
happy_meal_rx_reset(struct happy_meal * hp,void __iomem * bregs)813 static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
814 {
815 int tries = RX_RESET_TRIES;
816
817 HMD("reset...\n");
818
819 /* We have a special on GNU/Viking hardware bugs today. */
820 hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
821 while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
822 udelay(20);
823
824 /* Will that be all? */
825 if (!tries)
826 netdev_err(hp->dev, "Receiver BigMac ATTACK!\n");
827
828 /* Don't forget your vik_1137125_wa. Have a nice day. */
829 HMD("done\n");
830 }
831
832 #define STOP_TRIES 16
833
834 /* hp->happy_lock must be held */
happy_meal_stop(struct happy_meal * hp,void __iomem * gregs)835 static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
836 {
837 int tries = STOP_TRIES;
838
839 HMD("reset...\n");
840
841 /* We're consolidating our STB products, it's your lucky day. */
842 hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
843 while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
844 udelay(20);
845
846 /* Come back next week when we are "Sun Microelectronics". */
847 if (!tries)
848 netdev_err(hp->dev, "Fry guys.\n");
849
850 /* Remember: "Different name, same old buggy as shit hardware." */
851 HMD("done\n");
852 }
853
854 /* hp->happy_lock must be held */
happy_meal_get_counters(struct happy_meal * hp,void __iomem * bregs)855 static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
856 {
857 struct net_device_stats *stats = &hp->dev->stats;
858
859 stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
860 hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
861
862 stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
863 hme_write32(hp, bregs + BMAC_UNALECTR, 0);
864
865 stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
866 hme_write32(hp, bregs + BMAC_GLECTR, 0);
867
868 stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
869
870 stats->collisions +=
871 (hme_read32(hp, bregs + BMAC_EXCTR) +
872 hme_read32(hp, bregs + BMAC_LTCTR));
873 hme_write32(hp, bregs + BMAC_EXCTR, 0);
874 hme_write32(hp, bregs + BMAC_LTCTR, 0);
875 }
876
877 /* hp->happy_lock must be held */
happy_meal_poll_stop(struct happy_meal * hp,void __iomem * tregs)878 static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
879 {
880 /* If polling disabled or not polling already, nothing to do. */
881 if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
882 (HFLAG_POLLENABLE | HFLAG_POLL)) {
883 ASD("not polling, return\n");
884 return;
885 }
886
887 /* Shut up the MIF. */
888 ASD("were polling, mif ints off, polling off\n");
889 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
890
891 /* Turn off polling. */
892 hme_write32(hp, tregs + TCVR_CFG,
893 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
894
895 /* We are no longer polling. */
896 hp->happy_flags &= ~(HFLAG_POLL);
897
898 /* Let the bits set. */
899 udelay(200);
900 ASD("done\n");
901 }
902
903 /* Only Sun can take such nice parts and fuck up the programming interface
904 * like this. Good job guys...
905 */
906 #define TCVR_RESET_TRIES 16 /* It should reset quickly */
907 #define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */
908
909 /* hp->happy_lock must be held */
happy_meal_tcvr_reset(struct happy_meal * hp,void __iomem * tregs)910 static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
911 {
912 u32 tconfig;
913 int result, tries = TCVR_RESET_TRIES;
914
915 tconfig = hme_read32(hp, tregs + TCVR_CFG);
916 ASD("tcfg=%08x\n", tconfig);
917 if (hp->tcvr_type == external) {
918 hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
919 hp->tcvr_type = internal;
920 hp->paddr = TCV_PADDR_ITX;
921 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
922 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
923 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
924 if (result == TCVR_FAILURE) {
925 ASD("phyread_fail\n");
926 return -1;
927 }
928 ASD("external: ISOLATE, phyread_ok, PSELECT\n");
929 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
930 hp->tcvr_type = external;
931 hp->paddr = TCV_PADDR_ETX;
932 } else {
933 if (tconfig & TCV_CFG_MDIO1) {
934 hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
935 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
936 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
937 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
938 if (result == TCVR_FAILURE) {
939 ASD("phyread_fail>\n");
940 return -1;
941 }
942 ASD("internal: PSELECT, ISOLATE, phyread_ok, ~PSELECT\n");
943 hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
944 hp->tcvr_type = internal;
945 hp->paddr = TCV_PADDR_ITX;
946 }
947 }
948
949 ASD("BMCR_RESET...\n");
950 happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
951
952 while (--tries) {
953 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
954 if (result == TCVR_FAILURE)
955 return -1;
956 hp->sw_bmcr = result;
957 if (!(result & BMCR_RESET))
958 break;
959 udelay(20);
960 }
961 if (!tries) {
962 ASD("BMCR RESET FAILED!\n");
963 return -1;
964 }
965 ASD("RESET_OK\n");
966
967 /* Get fresh copies of the PHY registers. */
968 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
969 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
970 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
971 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
972
973 ASD("UNISOLATE...\n");
974 hp->sw_bmcr &= ~(BMCR_ISOLATE);
975 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
976
977 tries = TCVR_UNISOLATE_TRIES;
978 while (--tries) {
979 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
980 if (result == TCVR_FAILURE)
981 return -1;
982 if (!(result & BMCR_ISOLATE))
983 break;
984 udelay(20);
985 }
986 if (!tries) {
987 ASD("UNISOLATE FAILED!\n");
988 return -1;
989 }
990 ASD("SUCCESS and CSCONFIG_DFBYPASS\n");
991 if (!is_lucent_phy(hp)) {
992 result = happy_meal_tcvr_read(hp, tregs,
993 DP83840_CSCONFIG);
994 happy_meal_tcvr_write(hp, tregs,
995 DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
996 }
997 return 0;
998 }
999
1000 /* Figure out whether we have an internal or external transceiver.
1001 *
1002 * hp->happy_lock must be held
1003 */
happy_meal_transceiver_check(struct happy_meal * hp,void __iomem * tregs)1004 static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
1005 {
1006 unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
1007
1008 ASD("tcfg=%08lx\n", tconfig);
1009 if (hp->happy_flags & HFLAG_POLL) {
1010 /* If we are polling, we must stop to get the transceiver type. */
1011 if (hp->tcvr_type == internal) {
1012 if (tconfig & TCV_CFG_MDIO1) {
1013 happy_meal_poll_stop(hp, tregs);
1014 hp->paddr = TCV_PADDR_ETX;
1015 hp->tcvr_type = external;
1016 tconfig &= ~(TCV_CFG_PENABLE);
1017 tconfig |= TCV_CFG_PSELECT;
1018 hme_write32(hp, tregs + TCVR_CFG, tconfig);
1019 ASD("poll stop, internal->external\n");
1020 }
1021 } else {
1022 if (hp->tcvr_type == external) {
1023 if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
1024 happy_meal_poll_stop(hp, tregs);
1025 hp->paddr = TCV_PADDR_ITX;
1026 hp->tcvr_type = internal;
1027 hme_write32(hp, tregs + TCVR_CFG,
1028 hme_read32(hp, tregs + TCVR_CFG) &
1029 ~(TCV_CFG_PSELECT));
1030 ASD("poll stop, external->internal\n");
1031 }
1032 } else {
1033 ASD("polling, none\n");
1034 }
1035 }
1036 } else {
1037 u32 reread = hme_read32(hp, tregs + TCVR_CFG);
1038
1039 /* Else we can just work off of the MDIO bits. */
1040 if (reread & TCV_CFG_MDIO1) {
1041 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1042 hp->paddr = TCV_PADDR_ETX;
1043 hp->tcvr_type = external;
1044 ASD("not polling, external\n");
1045 } else {
1046 if (reread & TCV_CFG_MDIO0) {
1047 hme_write32(hp, tregs + TCVR_CFG,
1048 tconfig & ~(TCV_CFG_PSELECT));
1049 hp->paddr = TCV_PADDR_ITX;
1050 hp->tcvr_type = internal;
1051 ASD("not polling, internal\n");
1052 } else {
1053 netdev_err(hp->dev,
1054 "Transceiver and a coke please.");
1055 hp->tcvr_type = none; /* Grrr... */
1056 ASD("not polling, none\n");
1057 }
1058 }
1059 }
1060 }
1061
1062 /* The receive ring buffers are a bit tricky to get right. Here goes...
1063 *
1064 * The buffers we dma into must be 64 byte aligned. So we use a special
1065 * alloc_skb() routine for the happy meal to allocate 64 bytes more than
1066 * we really need.
1067 *
1068 * We use skb_reserve() to align the data block we get in the skb. We
1069 * also program the etxregs->cfg register to use an offset of 2. This
1070 * imperical constant plus the ethernet header size will always leave
1071 * us with a nicely aligned ip header once we pass things up to the
1072 * protocol layers.
1073 *
1074 * The numbers work out to:
1075 *
1076 * Max ethernet frame size 1518
1077 * Ethernet header size 14
1078 * Happy Meal base offset 2
1079 *
1080 * Say a skb data area is at 0xf001b010, and its size alloced is
1081 * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes.
1082 *
1083 * First our alloc_skb() routine aligns the data base to a 64 byte
1084 * boundary. We now have 0xf001b040 as our skb data address. We
1085 * plug this into the receive descriptor address.
1086 *
1087 * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset.
1088 * So now the data we will end up looking at starts at 0xf001b042. When
1089 * the packet arrives, we will check out the size received and subtract
1090 * this from the skb->length. Then we just pass the packet up to the
1091 * protocols as is, and allocate a new skb to replace this slot we have
1092 * just received from.
1093 *
1094 * The ethernet layer will strip the ether header from the front of the
1095 * skb we just sent to it, this leaves us with the ip header sitting
1096 * nicely aligned at 0xf001b050. Also, for tcp and udp packets the
1097 * Happy Meal has even checksummed the tcp/udp data for us. The 16
1098 * bit checksum is obtained from the low bits of the receive descriptor
1099 * flags, thus:
1100 *
1101 * skb->csum = rxd->rx_flags & 0xffff;
1102 * skb->ip_summed = CHECKSUM_COMPLETE;
1103 *
1104 * before sending off the skb to the protocols, and we are good as gold.
1105 */
happy_meal_clean_rings(struct happy_meal * hp)1106 static void happy_meal_clean_rings(struct happy_meal *hp)
1107 {
1108 int i;
1109
1110 for (i = 0; i < RX_RING_SIZE; i++) {
1111 if (hp->rx_skbs[i] != NULL) {
1112 struct sk_buff *skb = hp->rx_skbs[i];
1113 struct happy_meal_rxd *rxd;
1114 u32 dma_addr;
1115
1116 rxd = &hp->happy_block->happy_meal_rxd[i];
1117 dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
1118 dma_unmap_single(hp->dma_dev, dma_addr,
1119 RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1120 dev_kfree_skb_any(skb);
1121 hp->rx_skbs[i] = NULL;
1122 }
1123 }
1124
1125 for (i = 0; i < TX_RING_SIZE; i++) {
1126 if (hp->tx_skbs[i] != NULL) {
1127 struct sk_buff *skb = hp->tx_skbs[i];
1128 struct happy_meal_txd *txd;
1129 u32 dma_addr;
1130 int frag;
1131
1132 hp->tx_skbs[i] = NULL;
1133
1134 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1135 txd = &hp->happy_block->happy_meal_txd[i];
1136 dma_addr = hme_read_desc32(hp, &txd->tx_addr);
1137 if (!frag)
1138 dma_unmap_single(hp->dma_dev, dma_addr,
1139 (hme_read_desc32(hp, &txd->tx_flags)
1140 & TXFLAG_SIZE),
1141 DMA_TO_DEVICE);
1142 else
1143 dma_unmap_page(hp->dma_dev, dma_addr,
1144 (hme_read_desc32(hp, &txd->tx_flags)
1145 & TXFLAG_SIZE),
1146 DMA_TO_DEVICE);
1147
1148 if (frag != skb_shinfo(skb)->nr_frags)
1149 i++;
1150 }
1151
1152 dev_kfree_skb_any(skb);
1153 }
1154 }
1155 }
1156
1157 /* hp->happy_lock must be held */
happy_meal_init_rings(struct happy_meal * hp)1158 static void happy_meal_init_rings(struct happy_meal *hp)
1159 {
1160 struct hmeal_init_block *hb = hp->happy_block;
1161 int i;
1162
1163 HMD("counters to zero\n");
1164 hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
1165
1166 /* Free any skippy bufs left around in the rings. */
1167 happy_meal_clean_rings(hp);
1168
1169 /* Now get new skippy bufs for the receive ring. */
1170 HMD("init rxring\n");
1171 for (i = 0; i < RX_RING_SIZE; i++) {
1172 struct sk_buff *skb;
1173 u32 mapping;
1174
1175 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1176 if (!skb) {
1177 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1178 continue;
1179 }
1180 hp->rx_skbs[i] = skb;
1181
1182 /* Because we reserve afterwards. */
1183 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1184 mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1185 DMA_FROM_DEVICE);
1186 if (dma_mapping_error(hp->dma_dev, mapping)) {
1187 dev_kfree_skb_any(skb);
1188 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1189 continue;
1190 }
1191 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1192 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1193 mapping);
1194 skb_reserve(skb, RX_OFFSET);
1195 }
1196
1197 HMD("init txring\n");
1198 for (i = 0; i < TX_RING_SIZE; i++)
1199 hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
1200
1201 HMD("done\n");
1202 }
1203
1204 /* hp->happy_lock must be held */
1205 static void
happy_meal_begin_auto_negotiation(struct happy_meal * hp,void __iomem * tregs,const struct ethtool_link_ksettings * ep)1206 happy_meal_begin_auto_negotiation(struct happy_meal *hp,
1207 void __iomem *tregs,
1208 const struct ethtool_link_ksettings *ep)
1209 {
1210 int timeout;
1211
1212 /* Read all of the registers we are interested in now. */
1213 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1214 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1215 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1216 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1217
1218 /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
1219
1220 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1221 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1222 /* Advertise everything we can support. */
1223 if (hp->sw_bmsr & BMSR_10HALF)
1224 hp->sw_advertise |= (ADVERTISE_10HALF);
1225 else
1226 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1227
1228 if (hp->sw_bmsr & BMSR_10FULL)
1229 hp->sw_advertise |= (ADVERTISE_10FULL);
1230 else
1231 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1232 if (hp->sw_bmsr & BMSR_100HALF)
1233 hp->sw_advertise |= (ADVERTISE_100HALF);
1234 else
1235 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1236 if (hp->sw_bmsr & BMSR_100FULL)
1237 hp->sw_advertise |= (ADVERTISE_100FULL);
1238 else
1239 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1240 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1241
1242 /* XXX Currently no Happy Meal cards I know off support 100BaseT4,
1243 * XXX and this is because the DP83840 does not support it, changes
1244 * XXX would need to be made to the tx/rx logic in the driver as well
1245 * XXX so I completely skip checking for it in the BMSR for now.
1246 */
1247
1248 ASD("Advertising [ %s%s%s%s]\n",
1249 hp->sw_advertise & ADVERTISE_10HALF ? "10H " : "",
1250 hp->sw_advertise & ADVERTISE_10FULL ? "10F " : "",
1251 hp->sw_advertise & ADVERTISE_100HALF ? "100H " : "",
1252 hp->sw_advertise & ADVERTISE_100FULL ? "100F " : "");
1253
1254 /* Enable Auto-Negotiation, this is usually on already... */
1255 hp->sw_bmcr |= BMCR_ANENABLE;
1256 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1257
1258 /* Restart it to make sure it is going. */
1259 hp->sw_bmcr |= BMCR_ANRESTART;
1260 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1261
1262 /* BMCR_ANRESTART self clears when the process has begun. */
1263
1264 timeout = 64; /* More than enough. */
1265 while (--timeout) {
1266 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1267 if (!(hp->sw_bmcr & BMCR_ANRESTART))
1268 break; /* got it. */
1269 udelay(10);
1270 }
1271 if (!timeout) {
1272 netdev_err(hp->dev,
1273 "Happy Meal would not start auto negotiation BMCR=0x%04x\n",
1274 hp->sw_bmcr);
1275 netdev_notice(hp->dev,
1276 "Performing force link detection.\n");
1277 goto force_link;
1278 } else {
1279 hp->timer_state = arbwait;
1280 }
1281 } else {
1282 force_link:
1283 /* Force the link up, trying first a particular mode.
1284 * Either we are here at the request of ethtool or
1285 * because the Happy Meal would not start to autoneg.
1286 */
1287
1288 /* Disable auto-negotiation in BMCR, enable the duplex and
1289 * speed setting, init the timer state machine, and fire it off.
1290 */
1291 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1292 hp->sw_bmcr = BMCR_SPEED100;
1293 } else {
1294 if (ep->base.speed == SPEED_100)
1295 hp->sw_bmcr = BMCR_SPEED100;
1296 else
1297 hp->sw_bmcr = 0;
1298 if (ep->base.duplex == DUPLEX_FULL)
1299 hp->sw_bmcr |= BMCR_FULLDPLX;
1300 }
1301 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1302
1303 if (!is_lucent_phy(hp)) {
1304 /* OK, seems we need do disable the transceiver for the first
1305 * tick to make sure we get an accurate link state at the
1306 * second tick.
1307 */
1308 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
1309 DP83840_CSCONFIG);
1310 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
1311 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
1312 hp->sw_csconfig);
1313 }
1314 hp->timer_state = ltrywait;
1315 }
1316
1317 hp->timer_ticks = 0;
1318 hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
1319 add_timer(&hp->happy_timer);
1320 }
1321
1322 /* hp->happy_lock must be held */
happy_meal_init(struct happy_meal * hp)1323 static int happy_meal_init(struct happy_meal *hp)
1324 {
1325 const unsigned char *e = &hp->dev->dev_addr[0];
1326 void __iomem *gregs = hp->gregs;
1327 void __iomem *etxregs = hp->etxregs;
1328 void __iomem *erxregs = hp->erxregs;
1329 void __iomem *bregs = hp->bigmacregs;
1330 void __iomem *tregs = hp->tcvregs;
1331 const char *bursts = "64";
1332 u32 regtmp, rxcfg;
1333
1334 /* If auto-negotiation timer is running, kill it. */
1335 del_timer(&hp->happy_timer);
1336
1337 HMD("happy_flags[%08x]\n", hp->happy_flags);
1338 if (!(hp->happy_flags & HFLAG_INIT)) {
1339 HMD("set HFLAG_INIT\n");
1340 hp->happy_flags |= HFLAG_INIT;
1341 happy_meal_get_counters(hp, bregs);
1342 }
1343
1344 /* Stop polling. */
1345 HMD("to happy_meal_poll_stop\n");
1346 happy_meal_poll_stop(hp, tregs);
1347
1348 /* Stop transmitter and receiver. */
1349 HMD("to happy_meal_stop\n");
1350 happy_meal_stop(hp, gregs);
1351
1352 /* Alloc and reset the tx/rx descriptor chains. */
1353 HMD("to happy_meal_init_rings\n");
1354 happy_meal_init_rings(hp);
1355
1356 /* Shut up the MIF. */
1357 HMD("Disable all MIF irqs (old[%08x])\n",
1358 hme_read32(hp, tregs + TCVR_IMASK));
1359 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1360
1361 /* See if we can enable the MIF frame on this card to speak to the DP83840. */
1362 if (hp->happy_flags & HFLAG_FENABLE) {
1363 HMD("use frame old[%08x]\n",
1364 hme_read32(hp, tregs + TCVR_CFG));
1365 hme_write32(hp, tregs + TCVR_CFG,
1366 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1367 } else {
1368 HMD("use bitbang old[%08x]\n",
1369 hme_read32(hp, tregs + TCVR_CFG));
1370 hme_write32(hp, tregs + TCVR_CFG,
1371 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1372 }
1373
1374 /* Check the state of the transceiver. */
1375 HMD("to happy_meal_transceiver_check\n");
1376 happy_meal_transceiver_check(hp, tregs);
1377
1378 /* Put the Big Mac into a sane state. */
1379 switch(hp->tcvr_type) {
1380 case none:
1381 /* Cannot operate if we don't know the transceiver type! */
1382 HMD("AAIEEE no transceiver type, EAGAIN\n");
1383 return -EAGAIN;
1384
1385 case internal:
1386 /* Using the MII buffers. */
1387 HMD("internal, using MII\n");
1388 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1389 break;
1390
1391 case external:
1392 /* Not using the MII, disable it. */
1393 HMD("external, disable MII\n");
1394 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1395 break;
1396 }
1397
1398 if (happy_meal_tcvr_reset(hp, tregs))
1399 return -EAGAIN;
1400
1401 /* Reset the Happy Meal Big Mac transceiver and the receiver. */
1402 HMD("tx/rx reset\n");
1403 happy_meal_tx_reset(hp, bregs);
1404 happy_meal_rx_reset(hp, bregs);
1405
1406 /* Set jam size and inter-packet gaps to reasonable defaults. */
1407 hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
1408 hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
1409 hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
1410
1411 /* Load up the MAC address and random seed. */
1412
1413 /* The docs recommend to use the 10LSB of our MAC here. */
1414 hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
1415
1416 hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
1417 hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
1418 hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
1419
1420 if ((hp->dev->flags & IFF_ALLMULTI) ||
1421 (netdev_mc_count(hp->dev) > 64)) {
1422 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
1423 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
1424 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
1425 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
1426 } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
1427 u16 hash_table[4];
1428 struct netdev_hw_addr *ha;
1429 u32 crc;
1430
1431 memset(hash_table, 0, sizeof(hash_table));
1432 netdev_for_each_mc_addr(ha, hp->dev) {
1433 crc = ether_crc_le(6, ha->addr);
1434 crc >>= 26;
1435 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1436 }
1437 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
1438 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
1439 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
1440 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
1441 } else {
1442 hme_write32(hp, bregs + BMAC_HTABLE3, 0);
1443 hme_write32(hp, bregs + BMAC_HTABLE2, 0);
1444 hme_write32(hp, bregs + BMAC_HTABLE1, 0);
1445 hme_write32(hp, bregs + BMAC_HTABLE0, 0);
1446 }
1447
1448 /* Set the RX and TX ring ptrs. */
1449 HMD("ring ptrs rxr[%08x] txr[%08x]\n",
1450 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
1451 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1452 hme_write32(hp, erxregs + ERX_RING,
1453 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
1454 hme_write32(hp, etxregs + ETX_RING,
1455 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1456
1457 /* Parity issues in the ERX unit of some HME revisions can cause some
1458 * registers to not be written unless their parity is even. Detect such
1459 * lost writes and simply rewrite with a low bit set (which will be ignored
1460 * since the rxring needs to be 2K aligned).
1461 */
1462 if (hme_read32(hp, erxregs + ERX_RING) !=
1463 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
1464 hme_write32(hp, erxregs + ERX_RING,
1465 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
1466 | 0x4);
1467
1468 /* Set the supported burst sizes. */
1469 #ifndef CONFIG_SPARC
1470 /* It is always PCI and can handle 64byte bursts. */
1471 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
1472 #else
1473 if ((hp->happy_bursts & DMA_BURST64) &&
1474 ((hp->happy_flags & HFLAG_PCI) != 0
1475 #ifdef CONFIG_SBUS
1476 || sbus_can_burst64()
1477 #endif
1478 || 0)) {
1479 u32 gcfg = GREG_CFG_BURST64;
1480
1481 /* I have no idea if I should set the extended
1482 * transfer mode bit for Cheerio, so for now I
1483 * do not. -DaveM
1484 */
1485 #ifdef CONFIG_SBUS
1486 if ((hp->happy_flags & HFLAG_PCI) == 0) {
1487 struct platform_device *op = hp->happy_dev;
1488 if (sbus_can_dma_64bit()) {
1489 sbus_set_sbus64(&op->dev,
1490 hp->happy_bursts);
1491 gcfg |= GREG_CFG_64BIT;
1492 }
1493 }
1494 #endif
1495
1496 bursts = "64";
1497 hme_write32(hp, gregs + GREG_CFG, gcfg);
1498 } else if (hp->happy_bursts & DMA_BURST32) {
1499 bursts = "32";
1500 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
1501 } else if (hp->happy_bursts & DMA_BURST16) {
1502 bursts = "16";
1503 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
1504 } else {
1505 bursts = "XXX";
1506 hme_write32(hp, gregs + GREG_CFG, 0);
1507 }
1508 #endif /* CONFIG_SPARC */
1509
1510 HMD("old[%08x] bursts<%s>\n",
1511 hme_read32(hp, gregs + GREG_CFG), bursts);
1512
1513 /* Turn off interrupts we do not want to hear. */
1514 hme_write32(hp, gregs + GREG_IMASK,
1515 (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
1516 GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
1517
1518 /* Set the transmit ring buffer size. */
1519 HMD("tx rsize=%d oreg[%08x]\n", (int)TX_RING_SIZE,
1520 hme_read32(hp, etxregs + ETX_RSIZE));
1521 hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
1522
1523 /* Enable transmitter DVMA. */
1524 HMD("tx dma enable old[%08x]\n", hme_read32(hp, etxregs + ETX_CFG));
1525 hme_write32(hp, etxregs + ETX_CFG,
1526 hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
1527
1528 /* This chip really rots, for the receiver sometimes when you
1529 * write to its control registers not all the bits get there
1530 * properly. I cannot think of a sane way to provide complete
1531 * coverage for this hardware bug yet.
1532 */
1533 HMD("erx regs bug old[%08x]\n",
1534 hme_read32(hp, erxregs + ERX_CFG));
1535 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1536 regtmp = hme_read32(hp, erxregs + ERX_CFG);
1537 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1538 if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
1539 netdev_err(hp->dev,
1540 "Eieee, rx config register gets greasy fries.\n");
1541 netdev_err(hp->dev,
1542 "Trying to set %08x, reread gives %08x\n",
1543 ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
1544 /* XXX Should return failure here... */
1545 }
1546
1547 /* Enable Big Mac hash table filter. */
1548 HMD("enable hash rx_cfg_old[%08x]\n",
1549 hme_read32(hp, bregs + BMAC_RXCFG));
1550 rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
1551 if (hp->dev->flags & IFF_PROMISC)
1552 rxcfg |= BIGMAC_RXCFG_PMISC;
1553 hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
1554
1555 /* Let the bits settle in the chip. */
1556 udelay(10);
1557
1558 /* Ok, configure the Big Mac transmitter. */
1559 HMD("BIGMAC init\n");
1560 regtmp = 0;
1561 if (hp->happy_flags & HFLAG_FULL)
1562 regtmp |= BIGMAC_TXCFG_FULLDPLX;
1563
1564 /* Don't turn on the "don't give up" bit for now. It could cause hme
1565 * to deadlock with the PHY if a Jabber occurs.
1566 */
1567 hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/);
1568
1569 /* Give up after 16 TX attempts. */
1570 hme_write32(hp, bregs + BMAC_ALIMIT, 16);
1571
1572 /* Enable the output drivers no matter what. */
1573 regtmp = BIGMAC_XCFG_ODENABLE;
1574
1575 /* If card can do lance mode, enable it. */
1576 if (hp->happy_flags & HFLAG_LANCE)
1577 regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
1578
1579 /* Disable the MII buffers if using external transceiver. */
1580 if (hp->tcvr_type == external)
1581 regtmp |= BIGMAC_XCFG_MIIDISAB;
1582
1583 HMD("XIF config old[%08x]\n", hme_read32(hp, bregs + BMAC_XIFCFG));
1584 hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
1585
1586 /* Start things up. */
1587 HMD("tx old[%08x] and rx [%08x] ON!\n",
1588 hme_read32(hp, bregs + BMAC_TXCFG),
1589 hme_read32(hp, bregs + BMAC_RXCFG));
1590
1591 /* Set larger TX/RX size to allow for 802.1q */
1592 hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
1593 hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
1594
1595 hme_write32(hp, bregs + BMAC_TXCFG,
1596 hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
1597 hme_write32(hp, bregs + BMAC_RXCFG,
1598 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
1599
1600 /* Get the autonegotiation started, and the watch timer ticking. */
1601 happy_meal_begin_auto_negotiation(hp, tregs, NULL);
1602
1603 /* Success. */
1604 return 0;
1605 }
1606
1607 /* hp->happy_lock must be held */
happy_meal_set_initial_advertisement(struct happy_meal * hp)1608 static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
1609 {
1610 void __iomem *tregs = hp->tcvregs;
1611 void __iomem *bregs = hp->bigmacregs;
1612 void __iomem *gregs = hp->gregs;
1613
1614 happy_meal_stop(hp, gregs);
1615 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1616 if (hp->happy_flags & HFLAG_FENABLE)
1617 hme_write32(hp, tregs + TCVR_CFG,
1618 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1619 else
1620 hme_write32(hp, tregs + TCVR_CFG,
1621 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1622 happy_meal_transceiver_check(hp, tregs);
1623 switch(hp->tcvr_type) {
1624 case none:
1625 return;
1626 case internal:
1627 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1628 break;
1629 case external:
1630 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1631 break;
1632 }
1633 if (happy_meal_tcvr_reset(hp, tregs))
1634 return;
1635
1636 /* Latch PHY registers as of now. */
1637 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1638 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1639
1640 /* Advertise everything we can support. */
1641 if (hp->sw_bmsr & BMSR_10HALF)
1642 hp->sw_advertise |= (ADVERTISE_10HALF);
1643 else
1644 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1645
1646 if (hp->sw_bmsr & BMSR_10FULL)
1647 hp->sw_advertise |= (ADVERTISE_10FULL);
1648 else
1649 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1650 if (hp->sw_bmsr & BMSR_100HALF)
1651 hp->sw_advertise |= (ADVERTISE_100HALF);
1652 else
1653 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1654 if (hp->sw_bmsr & BMSR_100FULL)
1655 hp->sw_advertise |= (ADVERTISE_100FULL);
1656 else
1657 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1658
1659 /* Update the PHY advertisement register. */
1660 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1661 }
1662
1663 /* Once status is latched (by happy_meal_interrupt) it is cleared by
1664 * the hardware, so we cannot re-read it and get a correct value.
1665 *
1666 * hp->happy_lock must be held
1667 */
happy_meal_is_not_so_happy(struct happy_meal * hp,u32 status)1668 static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
1669 {
1670 int reset = 0;
1671
1672 /* Only print messages for non-counter related interrupts. */
1673 if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
1674 GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
1675 GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
1676 GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
1677 GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
1678 GREG_STAT_SLVPERR))
1679 netdev_err(hp->dev,
1680 "Error interrupt for happy meal, status = %08x\n",
1681 status);
1682
1683 if (status & GREG_STAT_RFIFOVF) {
1684 /* Receive FIFO overflow is harmless and the hardware will take
1685 care of it, just some packets are lost. Who cares. */
1686 netdev_dbg(hp->dev, "Happy Meal receive FIFO overflow.\n");
1687 }
1688
1689 if (status & GREG_STAT_STSTERR) {
1690 /* BigMAC SQE link test failed. */
1691 netdev_err(hp->dev, "Happy Meal BigMAC SQE test failed.\n");
1692 reset = 1;
1693 }
1694
1695 if (status & GREG_STAT_TFIFO_UND) {
1696 /* Transmit FIFO underrun, again DMA error likely. */
1697 netdev_err(hp->dev,
1698 "Happy Meal transmitter FIFO underrun, DMA error.\n");
1699 reset = 1;
1700 }
1701
1702 if (status & GREG_STAT_MAXPKTERR) {
1703 /* Driver error, tried to transmit something larger
1704 * than ethernet max mtu.
1705 */
1706 netdev_err(hp->dev, "Happy Meal MAX Packet size error.\n");
1707 reset = 1;
1708 }
1709
1710 if (status & GREG_STAT_NORXD) {
1711 /* This is harmless, it just means the system is
1712 * quite loaded and the incoming packet rate was
1713 * faster than the interrupt handler could keep up
1714 * with.
1715 */
1716 netdev_info(hp->dev,
1717 "Happy Meal out of receive descriptors, packet dropped.\n");
1718 }
1719
1720 if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
1721 /* All sorts of DMA receive errors. */
1722 netdev_err(hp->dev, "Happy Meal rx DMA errors [ %s%s%s]\n",
1723 status & GREG_STAT_RXERR ? "GenericError " : "",
1724 status & GREG_STAT_RXPERR ? "ParityError " : "",
1725 status & GREG_STAT_RXTERR ? "RxTagBotch " : "");
1726 reset = 1;
1727 }
1728
1729 if (status & GREG_STAT_EOPERR) {
1730 /* Driver bug, didn't set EOP bit in tx descriptor given
1731 * to the happy meal.
1732 */
1733 netdev_err(hp->dev,
1734 "EOP not set in happy meal transmit descriptor!\n");
1735 reset = 1;
1736 }
1737
1738 if (status & GREG_STAT_MIFIRQ) {
1739 /* MIF signalled an interrupt, were we polling it? */
1740 netdev_err(hp->dev, "Happy Meal MIF interrupt.\n");
1741 }
1742
1743 if (status &
1744 (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
1745 /* All sorts of transmit DMA errors. */
1746 netdev_err(hp->dev, "Happy Meal tx DMA errors [ %s%s%s%s]\n",
1747 status & GREG_STAT_TXEACK ? "GenericError " : "",
1748 status & GREG_STAT_TXLERR ? "LateError " : "",
1749 status & GREG_STAT_TXPERR ? "ParityError " : "",
1750 status & GREG_STAT_TXTERR ? "TagBotch " : "");
1751 reset = 1;
1752 }
1753
1754 if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
1755 /* Bus or parity error when cpu accessed happy meal registers
1756 * or it's internal FIFO's. Should never see this.
1757 */
1758 netdev_err(hp->dev,
1759 "Happy Meal register access SBUS slave (%s) error.\n",
1760 (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
1761 reset = 1;
1762 }
1763
1764 if (reset) {
1765 netdev_notice(hp->dev, "Resetting...\n");
1766 happy_meal_init(hp);
1767 return 1;
1768 }
1769 return 0;
1770 }
1771
1772 /* hp->happy_lock must be held */
happy_meal_mif_interrupt(struct happy_meal * hp)1773 static void happy_meal_mif_interrupt(struct happy_meal *hp)
1774 {
1775 void __iomem *tregs = hp->tcvregs;
1776
1777 netdev_info(hp->dev, "Link status change.\n");
1778 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1779 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
1780
1781 /* Use the fastest transmission protocol possible. */
1782 if (hp->sw_lpa & LPA_100FULL) {
1783 netdev_info(hp->dev, "Switching to 100Mbps at full duplex.\n");
1784 hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
1785 } else if (hp->sw_lpa & LPA_100HALF) {
1786 netdev_info(hp->dev, "Switching to 100MBps at half duplex.\n");
1787 hp->sw_bmcr |= BMCR_SPEED100;
1788 } else if (hp->sw_lpa & LPA_10FULL) {
1789 netdev_info(hp->dev, "Switching to 10MBps at full duplex.\n");
1790 hp->sw_bmcr |= BMCR_FULLDPLX;
1791 } else {
1792 netdev_info(hp->dev, "Using 10Mbps at half duplex.\n");
1793 }
1794 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1795
1796 /* Finally stop polling and shut up the MIF. */
1797 happy_meal_poll_stop(hp, tregs);
1798 }
1799
1800 /* hp->happy_lock must be held */
happy_meal_tx(struct happy_meal * hp)1801 static void happy_meal_tx(struct happy_meal *hp)
1802 {
1803 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
1804 struct happy_meal_txd *this;
1805 struct net_device *dev = hp->dev;
1806 int elem;
1807
1808 elem = hp->tx_old;
1809 while (elem != hp->tx_new) {
1810 struct sk_buff *skb;
1811 u32 flags, dma_addr, dma_len;
1812 int frag;
1813
1814 netdev_vdbg(hp->dev, "TX[%d]\n", elem);
1815 this = &txbase[elem];
1816 flags = hme_read_desc32(hp, &this->tx_flags);
1817 if (flags & TXFLAG_OWN)
1818 break;
1819 skb = hp->tx_skbs[elem];
1820 if (skb_shinfo(skb)->nr_frags) {
1821 int last;
1822
1823 last = elem + skb_shinfo(skb)->nr_frags;
1824 last &= (TX_RING_SIZE - 1);
1825 flags = hme_read_desc32(hp, &txbase[last].tx_flags);
1826 if (flags & TXFLAG_OWN)
1827 break;
1828 }
1829 hp->tx_skbs[elem] = NULL;
1830 dev->stats.tx_bytes += skb->len;
1831
1832 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1833 dma_addr = hme_read_desc32(hp, &this->tx_addr);
1834 dma_len = hme_read_desc32(hp, &this->tx_flags);
1835
1836 dma_len &= TXFLAG_SIZE;
1837 if (!frag)
1838 dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1839 else
1840 dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1841
1842 elem = NEXT_TX(elem);
1843 this = &txbase[elem];
1844 }
1845
1846 dev_consume_skb_irq(skb);
1847 dev->stats.tx_packets++;
1848 }
1849 hp->tx_old = elem;
1850
1851 if (netif_queue_stopped(dev) &&
1852 TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
1853 netif_wake_queue(dev);
1854 }
1855
1856 /* Originally I used to handle the allocation failure by just giving back just
1857 * that one ring buffer to the happy meal. Problem is that usually when that
1858 * condition is triggered, the happy meal expects you to do something reasonable
1859 * with all of the packets it has DMA'd in. So now I just drop the entire
1860 * ring when we cannot get a new skb and give them all back to the happy meal,
1861 * maybe things will be "happier" now.
1862 *
1863 * hp->happy_lock must be held
1864 */
happy_meal_rx(struct happy_meal * hp,struct net_device * dev)1865 static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
1866 {
1867 struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
1868 struct happy_meal_rxd *this;
1869 int elem = hp->rx_new, drops = 0;
1870 u32 flags;
1871
1872 this = &rxbase[elem];
1873 while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
1874 struct sk_buff *skb;
1875 int len = flags >> 16;
1876 u16 csum = flags & RXFLAG_CSUM;
1877 u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
1878
1879 /* Check for errors. */
1880 if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
1881 netdev_vdbg(dev, "RX[%d ERR(%08x)]", elem, flags);
1882 dev->stats.rx_errors++;
1883 if (len < ETH_ZLEN)
1884 dev->stats.rx_length_errors++;
1885 if (len & (RXFLAG_OVERFLOW >> 16)) {
1886 dev->stats.rx_over_errors++;
1887 dev->stats.rx_fifo_errors++;
1888 }
1889
1890 /* Return it to the Happy meal. */
1891 drop_it:
1892 dev->stats.rx_dropped++;
1893 hme_write_rxd(hp, this,
1894 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1895 dma_addr);
1896 goto next;
1897 }
1898 skb = hp->rx_skbs[elem];
1899 if (len > RX_COPY_THRESHOLD) {
1900 struct sk_buff *new_skb;
1901 u32 mapping;
1902
1903 /* Now refill the entry, if we can. */
1904 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1905 if (new_skb == NULL) {
1906 drops++;
1907 goto drop_it;
1908 }
1909 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1910 mapping = dma_map_single(hp->dma_dev, new_skb->data,
1911 RX_BUF_ALLOC_SIZE,
1912 DMA_FROM_DEVICE);
1913 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
1914 dev_kfree_skb_any(new_skb);
1915 drops++;
1916 goto drop_it;
1917 }
1918
1919 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1920 hp->rx_skbs[elem] = new_skb;
1921 hme_write_rxd(hp, this,
1922 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1923 mapping);
1924 skb_reserve(new_skb, RX_OFFSET);
1925
1926 /* Trim the original skb for the netif. */
1927 skb_trim(skb, len);
1928 } else {
1929 struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
1930
1931 if (copy_skb == NULL) {
1932 drops++;
1933 goto drop_it;
1934 }
1935
1936 skb_reserve(copy_skb, 2);
1937 skb_put(copy_skb, len);
1938 dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
1939 skb_copy_from_linear_data(skb, copy_skb->data, len);
1940 dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
1941 /* Reuse original ring buffer. */
1942 hme_write_rxd(hp, this,
1943 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1944 dma_addr);
1945
1946 skb = copy_skb;
1947 }
1948
1949 /* This card is _fucking_ hot... */
1950 skb->csum = csum_unfold(~(__force __sum16)htons(csum));
1951 skb->ip_summed = CHECKSUM_COMPLETE;
1952
1953 netdev_vdbg(dev, "RX[%d len=%d csum=%4x]", elem, len, csum);
1954 skb->protocol = eth_type_trans(skb, dev);
1955 netif_rx(skb);
1956
1957 dev->stats.rx_packets++;
1958 dev->stats.rx_bytes += len;
1959 next:
1960 elem = NEXT_RX(elem);
1961 this = &rxbase[elem];
1962 }
1963 hp->rx_new = elem;
1964 if (drops)
1965 netdev_info(hp->dev, "Memory squeeze, deferring packet.\n");
1966 }
1967
happy_meal_interrupt(int irq,void * dev_id)1968 static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
1969 {
1970 struct net_device *dev = dev_id;
1971 struct happy_meal *hp = netdev_priv(dev);
1972 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
1973
1974 HMD("status=%08x\n", happy_status);
1975
1976 spin_lock(&hp->happy_lock);
1977
1978 if (happy_status & GREG_STAT_ERRORS) {
1979 if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
1980 goto out;
1981 }
1982
1983 if (happy_status & GREG_STAT_MIFIRQ)
1984 happy_meal_mif_interrupt(hp);
1985
1986 if (happy_status & GREG_STAT_TXALL)
1987 happy_meal_tx(hp);
1988
1989 if (happy_status & GREG_STAT_RXTOHOST)
1990 happy_meal_rx(hp, dev);
1991
1992 HMD("done\n");
1993 out:
1994 spin_unlock(&hp->happy_lock);
1995
1996 return IRQ_HANDLED;
1997 }
1998
1999 #ifdef CONFIG_SBUS
quattro_sbus_interrupt(int irq,void * cookie)2000 static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
2001 {
2002 struct quattro *qp = (struct quattro *) cookie;
2003 int i;
2004
2005 for (i = 0; i < 4; i++) {
2006 struct net_device *dev = qp->happy_meals[i];
2007 struct happy_meal *hp = netdev_priv(dev);
2008 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2009
2010 HMD("status=%08x\n", happy_status);
2011
2012 if (!(happy_status & (GREG_STAT_ERRORS |
2013 GREG_STAT_MIFIRQ |
2014 GREG_STAT_TXALL |
2015 GREG_STAT_RXTOHOST)))
2016 continue;
2017
2018 spin_lock(&hp->happy_lock);
2019
2020 if (happy_status & GREG_STAT_ERRORS)
2021 if (happy_meal_is_not_so_happy(hp, happy_status))
2022 goto next;
2023
2024 if (happy_status & GREG_STAT_MIFIRQ)
2025 happy_meal_mif_interrupt(hp);
2026
2027 if (happy_status & GREG_STAT_TXALL)
2028 happy_meal_tx(hp);
2029
2030 if (happy_status & GREG_STAT_RXTOHOST)
2031 happy_meal_rx(hp, dev);
2032
2033 next:
2034 spin_unlock(&hp->happy_lock);
2035 }
2036 HMD("done\n");
2037
2038 return IRQ_HANDLED;
2039 }
2040 #endif
2041
happy_meal_open(struct net_device * dev)2042 static int happy_meal_open(struct net_device *dev)
2043 {
2044 struct happy_meal *hp = netdev_priv(dev);
2045 int res;
2046
2047 /* On SBUS Quattro QFE cards, all hme interrupts are concentrated
2048 * into a single source which we register handling at probe time.
2049 */
2050 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
2051 res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
2052 dev->name, dev);
2053 if (res) {
2054 HMD("EAGAIN\n");
2055 netdev_err(dev, "Can't order irq %d to go.\n", hp->irq);
2056
2057 return -EAGAIN;
2058 }
2059 }
2060
2061 HMD("to happy_meal_init\n");
2062
2063 spin_lock_irq(&hp->happy_lock);
2064 res = happy_meal_init(hp);
2065 spin_unlock_irq(&hp->happy_lock);
2066
2067 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
2068 free_irq(hp->irq, dev);
2069 return res;
2070 }
2071
happy_meal_close(struct net_device * dev)2072 static int happy_meal_close(struct net_device *dev)
2073 {
2074 struct happy_meal *hp = netdev_priv(dev);
2075
2076 spin_lock_irq(&hp->happy_lock);
2077 happy_meal_stop(hp, hp->gregs);
2078 happy_meal_clean_rings(hp);
2079
2080 /* If auto-negotiation timer is running, kill it. */
2081 del_timer(&hp->happy_timer);
2082
2083 spin_unlock_irq(&hp->happy_lock);
2084
2085 /* On Quattro QFE cards, all hme interrupts are concentrated
2086 * into a single source which we register handling at probe
2087 * time and never unregister.
2088 */
2089 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
2090 free_irq(hp->irq, dev);
2091
2092 return 0;
2093 }
2094
happy_meal_tx_timeout(struct net_device * dev,unsigned int txqueue)2095 static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue)
2096 {
2097 struct happy_meal *hp = netdev_priv(dev);
2098
2099 netdev_err(dev, "transmit timed out, resetting\n");
2100 tx_dump_log();
2101 netdev_err(dev, "Happy Status %08x TX[%08x:%08x]\n",
2102 hme_read32(hp, hp->gregs + GREG_STAT),
2103 hme_read32(hp, hp->etxregs + ETX_CFG),
2104 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
2105
2106 spin_lock_irq(&hp->happy_lock);
2107 happy_meal_init(hp);
2108 spin_unlock_irq(&hp->happy_lock);
2109
2110 netif_wake_queue(dev);
2111 }
2112
unmap_partial_tx_skb(struct happy_meal * hp,u32 first_mapping,u32 first_len,u32 first_entry,u32 entry)2113 static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
2114 u32 first_len, u32 first_entry, u32 entry)
2115 {
2116 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
2117
2118 dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
2119
2120 first_entry = NEXT_TX(first_entry);
2121 while (first_entry != entry) {
2122 struct happy_meal_txd *this = &txbase[first_entry];
2123 u32 addr, len;
2124
2125 addr = hme_read_desc32(hp, &this->tx_addr);
2126 len = hme_read_desc32(hp, &this->tx_flags);
2127 len &= TXFLAG_SIZE;
2128 dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
2129 }
2130 }
2131
happy_meal_start_xmit(struct sk_buff * skb,struct net_device * dev)2132 static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2133 struct net_device *dev)
2134 {
2135 struct happy_meal *hp = netdev_priv(dev);
2136 int entry;
2137 u32 tx_flags;
2138
2139 tx_flags = TXFLAG_OWN;
2140 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2141 const u32 csum_start_off = skb_checksum_start_offset(skb);
2142 const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
2143
2144 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
2145 ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
2146 ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
2147 }
2148
2149 spin_lock_irq(&hp->happy_lock);
2150
2151 if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
2152 netif_stop_queue(dev);
2153 spin_unlock_irq(&hp->happy_lock);
2154 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2155 return NETDEV_TX_BUSY;
2156 }
2157
2158 entry = hp->tx_new;
2159 netdev_vdbg(dev, "SX<l[%d]e[%d]>\n", skb->len, entry);
2160 hp->tx_skbs[entry] = skb;
2161
2162 if (skb_shinfo(skb)->nr_frags == 0) {
2163 u32 mapping, len;
2164
2165 len = skb->len;
2166 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2167 if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2168 goto out_dma_error;
2169 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2170 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2171 (tx_flags | (len & TXFLAG_SIZE)),
2172 mapping);
2173 entry = NEXT_TX(entry);
2174 } else {
2175 u32 first_len, first_mapping;
2176 int frag, first_entry = entry;
2177
2178 /* We must give this initial chunk to the device last.
2179 * Otherwise we could race with the device.
2180 */
2181 first_len = skb_headlen(skb);
2182 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
2183 DMA_TO_DEVICE);
2184 if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2185 goto out_dma_error;
2186 entry = NEXT_TX(entry);
2187
2188 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
2189 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
2190 u32 len, mapping, this_txflags;
2191
2192 len = skb_frag_size(this_frag);
2193 mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
2194 0, len, DMA_TO_DEVICE);
2195 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2196 unmap_partial_tx_skb(hp, first_mapping, first_len,
2197 first_entry, entry);
2198 goto out_dma_error;
2199 }
2200 this_txflags = tx_flags;
2201 if (frag == skb_shinfo(skb)->nr_frags - 1)
2202 this_txflags |= TXFLAG_EOP;
2203 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2204 (this_txflags | (len & TXFLAG_SIZE)),
2205 mapping);
2206 entry = NEXT_TX(entry);
2207 }
2208 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
2209 (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
2210 first_mapping);
2211 }
2212
2213 hp->tx_new = entry;
2214
2215 if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
2216 netif_stop_queue(dev);
2217
2218 /* Get it going. */
2219 hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
2220
2221 spin_unlock_irq(&hp->happy_lock);
2222
2223 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2224 return NETDEV_TX_OK;
2225
2226 out_dma_error:
2227 hp->tx_skbs[hp->tx_new] = NULL;
2228 spin_unlock_irq(&hp->happy_lock);
2229
2230 dev_kfree_skb_any(skb);
2231 dev->stats.tx_dropped++;
2232 return NETDEV_TX_OK;
2233 }
2234
happy_meal_get_stats(struct net_device * dev)2235 static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
2236 {
2237 struct happy_meal *hp = netdev_priv(dev);
2238
2239 spin_lock_irq(&hp->happy_lock);
2240 happy_meal_get_counters(hp, hp->bigmacregs);
2241 spin_unlock_irq(&hp->happy_lock);
2242
2243 return &dev->stats;
2244 }
2245
happy_meal_set_multicast(struct net_device * dev)2246 static void happy_meal_set_multicast(struct net_device *dev)
2247 {
2248 struct happy_meal *hp = netdev_priv(dev);
2249 void __iomem *bregs = hp->bigmacregs;
2250 struct netdev_hw_addr *ha;
2251 u32 crc;
2252
2253 spin_lock_irq(&hp->happy_lock);
2254
2255 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
2256 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
2257 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
2258 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
2259 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
2260 } else if (dev->flags & IFF_PROMISC) {
2261 hme_write32(hp, bregs + BMAC_RXCFG,
2262 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
2263 } else {
2264 u16 hash_table[4];
2265
2266 memset(hash_table, 0, sizeof(hash_table));
2267 netdev_for_each_mc_addr(ha, dev) {
2268 crc = ether_crc_le(6, ha->addr);
2269 crc >>= 26;
2270 hash_table[crc >> 4] |= 1 << (crc & 0xf);
2271 }
2272 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
2273 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
2274 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
2275 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
2276 }
2277
2278 spin_unlock_irq(&hp->happy_lock);
2279 }
2280
2281 /* Ethtool support... */
hme_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)2282 static int hme_get_link_ksettings(struct net_device *dev,
2283 struct ethtool_link_ksettings *cmd)
2284 {
2285 struct happy_meal *hp = netdev_priv(dev);
2286 u32 speed;
2287 u32 supported;
2288
2289 supported =
2290 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2291 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2292 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
2293
2294 /* XXX hardcoded stuff for now */
2295 cmd->base.port = PORT_TP; /* XXX no MII support */
2296 cmd->base.phy_address = 0; /* XXX fixed PHYAD */
2297
2298 /* Record PHY settings. */
2299 spin_lock_irq(&hp->happy_lock);
2300 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2301 hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
2302 spin_unlock_irq(&hp->happy_lock);
2303
2304 if (hp->sw_bmcr & BMCR_ANENABLE) {
2305 cmd->base.autoneg = AUTONEG_ENABLE;
2306 speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
2307 SPEED_100 : SPEED_10);
2308 if (speed == SPEED_100)
2309 cmd->base.duplex =
2310 (hp->sw_lpa & (LPA_100FULL)) ?
2311 DUPLEX_FULL : DUPLEX_HALF;
2312 else
2313 cmd->base.duplex =
2314 (hp->sw_lpa & (LPA_10FULL)) ?
2315 DUPLEX_FULL : DUPLEX_HALF;
2316 } else {
2317 cmd->base.autoneg = AUTONEG_DISABLE;
2318 speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
2319 cmd->base.duplex =
2320 (hp->sw_bmcr & BMCR_FULLDPLX) ?
2321 DUPLEX_FULL : DUPLEX_HALF;
2322 }
2323 cmd->base.speed = speed;
2324 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2325 supported);
2326
2327 return 0;
2328 }
2329
hme_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)2330 static int hme_set_link_ksettings(struct net_device *dev,
2331 const struct ethtool_link_ksettings *cmd)
2332 {
2333 struct happy_meal *hp = netdev_priv(dev);
2334
2335 /* Verify the settings we care about. */
2336 if (cmd->base.autoneg != AUTONEG_ENABLE &&
2337 cmd->base.autoneg != AUTONEG_DISABLE)
2338 return -EINVAL;
2339 if (cmd->base.autoneg == AUTONEG_DISABLE &&
2340 ((cmd->base.speed != SPEED_100 &&
2341 cmd->base.speed != SPEED_10) ||
2342 (cmd->base.duplex != DUPLEX_HALF &&
2343 cmd->base.duplex != DUPLEX_FULL)))
2344 return -EINVAL;
2345
2346 /* Ok, do it to it. */
2347 spin_lock_irq(&hp->happy_lock);
2348 del_timer(&hp->happy_timer);
2349 happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
2350 spin_unlock_irq(&hp->happy_lock);
2351
2352 return 0;
2353 }
2354
hme_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2355 static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2356 {
2357 struct happy_meal *hp = netdev_priv(dev);
2358
2359 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
2360 if (hp->happy_flags & HFLAG_PCI) {
2361 struct pci_dev *pdev = hp->happy_dev;
2362 strscpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
2363 }
2364 #ifdef CONFIG_SBUS
2365 else {
2366 const struct linux_prom_registers *regs;
2367 struct platform_device *op = hp->happy_dev;
2368 regs = of_get_property(op->dev.of_node, "regs", NULL);
2369 if (regs)
2370 snprintf(info->bus_info, sizeof(info->bus_info),
2371 "SBUS:%d",
2372 regs->which_io);
2373 }
2374 #endif
2375 }
2376
hme_get_link(struct net_device * dev)2377 static u32 hme_get_link(struct net_device *dev)
2378 {
2379 struct happy_meal *hp = netdev_priv(dev);
2380
2381 spin_lock_irq(&hp->happy_lock);
2382 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2383 spin_unlock_irq(&hp->happy_lock);
2384
2385 return hp->sw_bmsr & BMSR_LSTATUS;
2386 }
2387
2388 static const struct ethtool_ops hme_ethtool_ops = {
2389 .get_drvinfo = hme_get_drvinfo,
2390 .get_link = hme_get_link,
2391 .get_link_ksettings = hme_get_link_ksettings,
2392 .set_link_ksettings = hme_set_link_ksettings,
2393 };
2394
2395 #ifdef CONFIG_SBUS
2396 /* Given a happy meal sbus device, find it's quattro parent.
2397 * If none exist, allocate and return a new one.
2398 *
2399 * Return NULL on failure.
2400 */
quattro_sbus_find(struct platform_device * child)2401 static struct quattro *quattro_sbus_find(struct platform_device *child)
2402 {
2403 struct device *parent = child->dev.parent;
2404 struct platform_device *op;
2405 struct quattro *qp;
2406
2407 op = to_platform_device(parent);
2408 qp = platform_get_drvdata(op);
2409 if (qp)
2410 return qp;
2411
2412 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2413 if (!qp)
2414 return NULL;
2415
2416 qp->quattro_dev = child;
2417 qp->next = qfe_sbus_list;
2418 qfe_sbus_list = qp;
2419
2420 platform_set_drvdata(op, qp);
2421 return qp;
2422 }
2423
2424 /* After all quattro cards have been probed, we call these functions
2425 * to register the IRQ handlers for the cards that have been
2426 * successfully probed and skip the cards that failed to initialize
2427 */
quattro_sbus_register_irqs(void)2428 static int __init quattro_sbus_register_irqs(void)
2429 {
2430 struct quattro *qp;
2431
2432 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2433 struct platform_device *op = qp->quattro_dev;
2434 int err, qfe_slot, skip = 0;
2435
2436 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2437 if (!qp->happy_meals[qfe_slot])
2438 skip = 1;
2439 }
2440 if (skip)
2441 continue;
2442
2443 err = request_irq(op->archdata.irqs[0],
2444 quattro_sbus_interrupt,
2445 IRQF_SHARED, "Quattro",
2446 qp);
2447 if (err != 0) {
2448 dev_err(&op->dev,
2449 "Quattro HME: IRQ registration error %d.\n",
2450 err);
2451 return err;
2452 }
2453 }
2454
2455 return 0;
2456 }
2457
quattro_sbus_free_irqs(void)2458 static void quattro_sbus_free_irqs(void)
2459 {
2460 struct quattro *qp;
2461
2462 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2463 struct platform_device *op = qp->quattro_dev;
2464 int qfe_slot, skip = 0;
2465
2466 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2467 if (!qp->happy_meals[qfe_slot])
2468 skip = 1;
2469 }
2470 if (skip)
2471 continue;
2472
2473 free_irq(op->archdata.irqs[0], qp);
2474 }
2475 }
2476 #endif /* CONFIG_SBUS */
2477
2478 #ifdef CONFIG_PCI
quattro_pci_find(struct pci_dev * pdev)2479 static struct quattro *quattro_pci_find(struct pci_dev *pdev)
2480 {
2481 int i;
2482 struct pci_dev *bdev = pdev->bus->self;
2483 struct quattro *qp;
2484
2485 if (!bdev)
2486 return ERR_PTR(-ENODEV);
2487
2488 for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
2489 struct pci_dev *qpdev = qp->quattro_dev;
2490
2491 if (qpdev == bdev)
2492 return qp;
2493 }
2494
2495 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2496 if (!qp)
2497 return ERR_PTR(-ENOMEM);
2498
2499 for (i = 0; i < 4; i++)
2500 qp->happy_meals[i] = NULL;
2501
2502 qp->quattro_dev = bdev;
2503 qp->next = qfe_pci_list;
2504 qfe_pci_list = qp;
2505
2506 /* No range tricks necessary on PCI. */
2507 qp->nranges = 0;
2508 return qp;
2509 }
2510 #endif /* CONFIG_PCI */
2511
2512 static const struct net_device_ops hme_netdev_ops = {
2513 .ndo_open = happy_meal_open,
2514 .ndo_stop = happy_meal_close,
2515 .ndo_start_xmit = happy_meal_start_xmit,
2516 .ndo_tx_timeout = happy_meal_tx_timeout,
2517 .ndo_get_stats = happy_meal_get_stats,
2518 .ndo_set_rx_mode = happy_meal_set_multicast,
2519 .ndo_set_mac_address = eth_mac_addr,
2520 .ndo_validate_addr = eth_validate_addr,
2521 };
2522
2523 #ifdef CONFIG_SBUS
happy_meal_sbus_probe_one(struct platform_device * op,int is_qfe)2524 static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2525 {
2526 struct device_node *dp = op->dev.of_node, *sbus_dp;
2527 struct quattro *qp = NULL;
2528 struct happy_meal *hp;
2529 struct net_device *dev;
2530 int i, qfe_slot = -1;
2531 u8 addr[ETH_ALEN];
2532 int err = -ENODEV;
2533
2534 sbus_dp = op->dev.parent->of_node;
2535
2536 /* We can match PCI devices too, do not accept those here. */
2537 if (!of_node_name_eq(sbus_dp, "sbus") && !of_node_name_eq(sbus_dp, "sbi"))
2538 return err;
2539
2540 if (is_qfe) {
2541 qp = quattro_sbus_find(op);
2542 if (qp == NULL)
2543 goto err_out;
2544 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2545 if (qp->happy_meals[qfe_slot] == NULL)
2546 break;
2547 if (qfe_slot == 4)
2548 goto err_out;
2549 }
2550
2551 err = -ENOMEM;
2552 dev = alloc_etherdev(sizeof(struct happy_meal));
2553 if (!dev)
2554 goto err_out;
2555 SET_NETDEV_DEV(dev, &op->dev);
2556
2557 /* If user did not specify a MAC address specifically, use
2558 * the Quattro local-mac-address property...
2559 */
2560 for (i = 0; i < 6; i++) {
2561 if (macaddr[i] != 0)
2562 break;
2563 }
2564 if (i < 6) { /* a mac address was given */
2565 for (i = 0; i < 6; i++)
2566 addr[i] = macaddr[i];
2567 eth_hw_addr_set(dev, addr);
2568 macaddr[5]++;
2569 } else {
2570 const unsigned char *addr;
2571 int len;
2572
2573 addr = of_get_property(dp, "local-mac-address", &len);
2574
2575 if (qfe_slot != -1 && addr && len == ETH_ALEN)
2576 eth_hw_addr_set(dev, addr);
2577 else
2578 eth_hw_addr_set(dev, idprom->id_ethaddr);
2579 }
2580
2581 hp = netdev_priv(dev);
2582
2583 hp->happy_dev = op;
2584 hp->dma_dev = &op->dev;
2585
2586 spin_lock_init(&hp->happy_lock);
2587
2588 err = -ENODEV;
2589 if (qp != NULL) {
2590 hp->qfe_parent = qp;
2591 hp->qfe_ent = qfe_slot;
2592 qp->happy_meals[qfe_slot] = dev;
2593 }
2594
2595 hp->gregs = of_ioremap(&op->resource[0], 0,
2596 GREG_REG_SIZE, "HME Global Regs");
2597 if (!hp->gregs) {
2598 dev_err(&op->dev, "Cannot map global registers.\n");
2599 goto err_out_free_netdev;
2600 }
2601
2602 hp->etxregs = of_ioremap(&op->resource[1], 0,
2603 ETX_REG_SIZE, "HME TX Regs");
2604 if (!hp->etxregs) {
2605 dev_err(&op->dev, "Cannot map MAC TX registers.\n");
2606 goto err_out_iounmap;
2607 }
2608
2609 hp->erxregs = of_ioremap(&op->resource[2], 0,
2610 ERX_REG_SIZE, "HME RX Regs");
2611 if (!hp->erxregs) {
2612 dev_err(&op->dev, "Cannot map MAC RX registers.\n");
2613 goto err_out_iounmap;
2614 }
2615
2616 hp->bigmacregs = of_ioremap(&op->resource[3], 0,
2617 BMAC_REG_SIZE, "HME BIGMAC Regs");
2618 if (!hp->bigmacregs) {
2619 dev_err(&op->dev, "Cannot map BIGMAC registers.\n");
2620 goto err_out_iounmap;
2621 }
2622
2623 hp->tcvregs = of_ioremap(&op->resource[4], 0,
2624 TCVR_REG_SIZE, "HME Tranceiver Regs");
2625 if (!hp->tcvregs) {
2626 dev_err(&op->dev, "Cannot map TCVR registers.\n");
2627 goto err_out_iounmap;
2628 }
2629
2630 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
2631 if (hp->hm_revision == 0xff)
2632 hp->hm_revision = 0xa0;
2633
2634 /* Now enable the feature flags we can. */
2635 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
2636 hp->happy_flags = HFLAG_20_21;
2637 else if (hp->hm_revision != 0xa0)
2638 hp->happy_flags = HFLAG_NOT_A0;
2639
2640 if (qp != NULL)
2641 hp->happy_flags |= HFLAG_QUATTRO;
2642
2643 /* Get the supported DVMA burst sizes from our Happy SBUS. */
2644 hp->happy_bursts = of_getintprop_default(sbus_dp,
2645 "burst-sizes", 0x00);
2646
2647 hp->happy_block = dma_alloc_coherent(hp->dma_dev,
2648 PAGE_SIZE,
2649 &hp->hblock_dvma,
2650 GFP_ATOMIC);
2651 err = -ENOMEM;
2652 if (!hp->happy_block)
2653 goto err_out_iounmap;
2654
2655 /* Force check of the link first time we are brought up. */
2656 hp->linkcheck = 0;
2657
2658 /* Force timer state to 'asleep' with count of zero. */
2659 hp->timer_state = asleep;
2660 hp->timer_ticks = 0;
2661
2662 timer_setup(&hp->happy_timer, happy_meal_timer, 0);
2663
2664 hp->dev = dev;
2665 dev->netdev_ops = &hme_netdev_ops;
2666 dev->watchdog_timeo = 5*HZ;
2667 dev->ethtool_ops = &hme_ethtool_ops;
2668
2669 /* Happy Meal can do it all... */
2670 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2671 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2672
2673 hp->irq = op->archdata.irqs[0];
2674
2675 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
2676 /* Hook up SBUS register/descriptor accessors. */
2677 hp->read_desc32 = sbus_hme_read_desc32;
2678 hp->write_txd = sbus_hme_write_txd;
2679 hp->write_rxd = sbus_hme_write_rxd;
2680 hp->read32 = sbus_hme_read32;
2681 hp->write32 = sbus_hme_write32;
2682 #endif
2683
2684 /* Grrr, Happy Meal comes up by default not advertising
2685 * full duplex 100baseT capabilities, fix this.
2686 */
2687 spin_lock_irq(&hp->happy_lock);
2688 happy_meal_set_initial_advertisement(hp);
2689 spin_unlock_irq(&hp->happy_lock);
2690
2691 err = register_netdev(hp->dev);
2692 if (err) {
2693 dev_err(&op->dev, "Cannot register net device, aborting.\n");
2694 goto err_out_free_coherent;
2695 }
2696
2697 platform_set_drvdata(op, hp);
2698
2699 if (qfe_slot != -1)
2700 netdev_info(dev,
2701 "Quattro HME slot %d (SBUS) 10/100baseT Ethernet %pM\n",
2702 qfe_slot, dev->dev_addr);
2703 else
2704 netdev_info(dev, "HAPPY MEAL (SBUS) 10/100baseT Ethernet %pM\n",
2705 dev->dev_addr);
2706
2707 return 0;
2708
2709 err_out_free_coherent:
2710 dma_free_coherent(hp->dma_dev,
2711 PAGE_SIZE,
2712 hp->happy_block,
2713 hp->hblock_dvma);
2714
2715 err_out_iounmap:
2716 if (hp->gregs)
2717 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
2718 if (hp->etxregs)
2719 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
2720 if (hp->erxregs)
2721 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
2722 if (hp->bigmacregs)
2723 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
2724 if (hp->tcvregs)
2725 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
2726
2727 if (qp)
2728 qp->happy_meals[qfe_slot] = NULL;
2729
2730 err_out_free_netdev:
2731 free_netdev(dev);
2732
2733 err_out:
2734 return err;
2735 }
2736 #endif
2737
2738 #ifdef CONFIG_PCI
2739 #ifndef CONFIG_SPARC
is_quattro_p(struct pci_dev * pdev)2740 static int is_quattro_p(struct pci_dev *pdev)
2741 {
2742 struct pci_dev *busdev = pdev->bus->self;
2743 struct pci_dev *this_pdev;
2744 int n_hmes;
2745
2746 if (busdev == NULL ||
2747 busdev->vendor != PCI_VENDOR_ID_DEC ||
2748 busdev->device != PCI_DEVICE_ID_DEC_21153)
2749 return 0;
2750
2751 n_hmes = 0;
2752 list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
2753 if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
2754 this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
2755 n_hmes++;
2756 }
2757
2758 if (n_hmes != 4)
2759 return 0;
2760
2761 return 1;
2762 }
2763
2764 /* Fetch MAC address from vital product data of PCI ROM. */
find_eth_addr_in_vpd(void __iomem * rom_base,int len,int index,unsigned char * dev_addr)2765 static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
2766 {
2767 int this_offset;
2768
2769 for (this_offset = 0x20; this_offset < len; this_offset++) {
2770 void __iomem *p = rom_base + this_offset;
2771
2772 if (readb(p + 0) != 0x90 ||
2773 readb(p + 1) != 0x00 ||
2774 readb(p + 2) != 0x09 ||
2775 readb(p + 3) != 0x4e ||
2776 readb(p + 4) != 0x41 ||
2777 readb(p + 5) != 0x06)
2778 continue;
2779
2780 this_offset += 6;
2781 p += 6;
2782
2783 if (index == 0) {
2784 int i;
2785
2786 for (i = 0; i < 6; i++)
2787 dev_addr[i] = readb(p + i);
2788 return 1;
2789 }
2790 index--;
2791 }
2792 return 0;
2793 }
2794
get_hme_mac_nonsparc(struct pci_dev * pdev,unsigned char * dev_addr)2795 static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
2796 {
2797 size_t size;
2798 void __iomem *p = pci_map_rom(pdev, &size);
2799
2800 if (p) {
2801 int index = 0;
2802 int found;
2803
2804 if (is_quattro_p(pdev))
2805 index = PCI_SLOT(pdev->devfn);
2806
2807 found = readb(p) == 0x55 &&
2808 readb(p + 1) == 0xaa &&
2809 find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
2810 pci_unmap_rom(pdev, p);
2811 if (found)
2812 return;
2813 }
2814
2815 /* Sun MAC prefix then 3 random bytes. */
2816 dev_addr[0] = 0x08;
2817 dev_addr[1] = 0x00;
2818 dev_addr[2] = 0x20;
2819 get_random_bytes(&dev_addr[3], 3);
2820 }
2821 #endif /* !(CONFIG_SPARC) */
2822
happy_meal_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2823 static int happy_meal_pci_probe(struct pci_dev *pdev,
2824 const struct pci_device_id *ent)
2825 {
2826 struct quattro *qp = NULL;
2827 #ifdef CONFIG_SPARC
2828 struct device_node *dp;
2829 #endif
2830 struct happy_meal *hp;
2831 struct net_device *dev;
2832 void __iomem *hpreg_base;
2833 struct resource *hpreg_res;
2834 int i, qfe_slot = -1;
2835 char prom_name[64];
2836 u8 addr[ETH_ALEN];
2837 int err = -ENODEV;
2838
2839 /* Now make sure pci_dev cookie is there. */
2840 #ifdef CONFIG_SPARC
2841 dp = pci_device_to_OF_node(pdev);
2842 snprintf(prom_name, sizeof(prom_name), "%pOFn", dp);
2843 #else
2844 if (is_quattro_p(pdev))
2845 strcpy(prom_name, "SUNW,qfe");
2846 else
2847 strcpy(prom_name, "SUNW,hme");
2848 #endif
2849
2850 err = pcim_enable_device(pdev);
2851 if (err)
2852 goto err_out;
2853 pci_set_master(pdev);
2854
2855 if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
2856 qp = quattro_pci_find(pdev);
2857 if (IS_ERR(qp)) {
2858 err = PTR_ERR(qp);
2859 goto err_out;
2860 }
2861
2862 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2863 if (!qp->happy_meals[qfe_slot])
2864 break;
2865
2866 if (qfe_slot == 4)
2867 goto err_out;
2868 }
2869
2870 dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct happy_meal));
2871 if (!dev) {
2872 err = -ENOMEM;
2873 goto err_out;
2874 }
2875 SET_NETDEV_DEV(dev, &pdev->dev);
2876
2877 hp = netdev_priv(dev);
2878
2879 hp->happy_dev = pdev;
2880 hp->dma_dev = &pdev->dev;
2881
2882 spin_lock_init(&hp->happy_lock);
2883
2884 if (qp != NULL) {
2885 hp->qfe_parent = qp;
2886 hp->qfe_ent = qfe_slot;
2887 qp->happy_meals[qfe_slot] = dev;
2888 }
2889
2890 err = -EINVAL;
2891 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
2892 dev_err(&pdev->dev,
2893 "Cannot find proper PCI device base address.\n");
2894 goto err_out_clear_quattro;
2895 }
2896
2897 hpreg_res = devm_request_mem_region(&pdev->dev,
2898 pci_resource_start(pdev, 0),
2899 pci_resource_len(pdev, 0),
2900 DRV_NAME);
2901 if (!hpreg_res) {
2902 err = -EBUSY;
2903 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
2904 goto err_out_clear_quattro;
2905 }
2906
2907 hpreg_base = pcim_iomap(pdev, 0, 0x8000);
2908 if (!hpreg_base) {
2909 err = -ENOMEM;
2910 dev_err(&pdev->dev, "Unable to remap card memory.\n");
2911 goto err_out_clear_quattro;
2912 }
2913
2914 for (i = 0; i < 6; i++) {
2915 if (macaddr[i] != 0)
2916 break;
2917 }
2918 if (i < 6) { /* a mac address was given */
2919 for (i = 0; i < 6; i++)
2920 addr[i] = macaddr[i];
2921 eth_hw_addr_set(dev, addr);
2922 macaddr[5]++;
2923 } else {
2924 #ifdef CONFIG_SPARC
2925 const unsigned char *addr;
2926 int len;
2927
2928 if (qfe_slot != -1 &&
2929 (addr = of_get_property(dp, "local-mac-address", &len))
2930 != NULL &&
2931 len == 6) {
2932 eth_hw_addr_set(dev, addr);
2933 } else {
2934 eth_hw_addr_set(dev, idprom->id_ethaddr);
2935 }
2936 #else
2937 u8 addr[ETH_ALEN];
2938
2939 get_hme_mac_nonsparc(pdev, addr);
2940 eth_hw_addr_set(dev, addr);
2941 #endif
2942 }
2943
2944 /* Layout registers. */
2945 hp->gregs = (hpreg_base + 0x0000UL);
2946 hp->etxregs = (hpreg_base + 0x2000UL);
2947 hp->erxregs = (hpreg_base + 0x4000UL);
2948 hp->bigmacregs = (hpreg_base + 0x6000UL);
2949 hp->tcvregs = (hpreg_base + 0x7000UL);
2950
2951 #ifdef CONFIG_SPARC
2952 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
2953 if (hp->hm_revision == 0xff)
2954 hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
2955 #else
2956 /* works with this on non-sparc hosts */
2957 hp->hm_revision = 0x20;
2958 #endif
2959
2960 /* Now enable the feature flags we can. */
2961 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
2962 hp->happy_flags = HFLAG_20_21;
2963 else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
2964 hp->happy_flags = HFLAG_NOT_A0;
2965
2966 if (qp != NULL)
2967 hp->happy_flags |= HFLAG_QUATTRO;
2968
2969 /* And of course, indicate this is PCI. */
2970 hp->happy_flags |= HFLAG_PCI;
2971
2972 #ifdef CONFIG_SPARC
2973 /* Assume PCI happy meals can handle all burst sizes. */
2974 hp->happy_bursts = DMA_BURSTBITS;
2975 #endif
2976
2977 hp->happy_block = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE,
2978 &hp->hblock_dvma, GFP_KERNEL);
2979 if (!hp->happy_block) {
2980 err = -ENOMEM;
2981 goto err_out_clear_quattro;
2982 }
2983
2984 hp->linkcheck = 0;
2985 hp->timer_state = asleep;
2986 hp->timer_ticks = 0;
2987
2988 timer_setup(&hp->happy_timer, happy_meal_timer, 0);
2989
2990 hp->irq = pdev->irq;
2991 hp->dev = dev;
2992 dev->netdev_ops = &hme_netdev_ops;
2993 dev->watchdog_timeo = 5*HZ;
2994 dev->ethtool_ops = &hme_ethtool_ops;
2995
2996 /* Happy Meal can do it all... */
2997 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2998 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2999
3000 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
3001 /* Hook up PCI register/descriptor accessors. */
3002 hp->read_desc32 = pci_hme_read_desc32;
3003 hp->write_txd = pci_hme_write_txd;
3004 hp->write_rxd = pci_hme_write_rxd;
3005 hp->read32 = pci_hme_read32;
3006 hp->write32 = pci_hme_write32;
3007 #endif
3008
3009 /* Grrr, Happy Meal comes up by default not advertising
3010 * full duplex 100baseT capabilities, fix this.
3011 */
3012 spin_lock_irq(&hp->happy_lock);
3013 happy_meal_set_initial_advertisement(hp);
3014 spin_unlock_irq(&hp->happy_lock);
3015
3016 err = devm_register_netdev(&pdev->dev, dev);
3017 if (err) {
3018 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
3019 goto err_out_clear_quattro;
3020 }
3021
3022 pci_set_drvdata(pdev, hp);
3023
3024 if (!qfe_slot) {
3025 struct pci_dev *qpdev = qp->quattro_dev;
3026
3027 prom_name[0] = 0;
3028 if (!strncmp(dev->name, "eth", 3)) {
3029 int i = simple_strtoul(dev->name + 3, NULL, 10);
3030 sprintf(prom_name, "-%d", i + 3);
3031 }
3032 netdev_info(dev,
3033 "%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet bridge %04x.%04x\n",
3034 prom_name, qpdev->vendor, qpdev->device);
3035 }
3036
3037 if (qfe_slot != -1)
3038 netdev_info(dev,
3039 "Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet %pM\n",
3040 qfe_slot, dev->dev_addr);
3041 else
3042 netdev_info(dev,
3043 "HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet %pM\n",
3044 dev->dev_addr);
3045
3046 return 0;
3047
3048 err_out_clear_quattro:
3049 if (qp != NULL)
3050 qp->happy_meals[qfe_slot] = NULL;
3051
3052 err_out:
3053 return err;
3054 }
3055
3056 static const struct pci_device_id happymeal_pci_ids[] = {
3057 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
3058 { } /* Terminating entry */
3059 };
3060
3061 MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
3062
3063 static struct pci_driver hme_pci_driver = {
3064 .name = "hme",
3065 .id_table = happymeal_pci_ids,
3066 .probe = happy_meal_pci_probe,
3067 };
3068
happy_meal_pci_init(void)3069 static int __init happy_meal_pci_init(void)
3070 {
3071 return pci_register_driver(&hme_pci_driver);
3072 }
3073
happy_meal_pci_exit(void)3074 static void happy_meal_pci_exit(void)
3075 {
3076 pci_unregister_driver(&hme_pci_driver);
3077
3078 while (qfe_pci_list) {
3079 struct quattro *qfe = qfe_pci_list;
3080 struct quattro *next = qfe->next;
3081
3082 kfree(qfe);
3083
3084 qfe_pci_list = next;
3085 }
3086 }
3087
3088 #endif
3089
3090 #ifdef CONFIG_SBUS
3091 static const struct of_device_id hme_sbus_match[];
hme_sbus_probe(struct platform_device * op)3092 static int hme_sbus_probe(struct platform_device *op)
3093 {
3094 const struct of_device_id *match;
3095 struct device_node *dp = op->dev.of_node;
3096 const char *model = of_get_property(dp, "model", NULL);
3097 int is_qfe;
3098
3099 match = of_match_device(hme_sbus_match, &op->dev);
3100 if (!match)
3101 return -EINVAL;
3102 is_qfe = (match->data != NULL);
3103
3104 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
3105 is_qfe = 1;
3106
3107 return happy_meal_sbus_probe_one(op, is_qfe);
3108 }
3109
hme_sbus_remove(struct platform_device * op)3110 static int hme_sbus_remove(struct platform_device *op)
3111 {
3112 struct happy_meal *hp = platform_get_drvdata(op);
3113 struct net_device *net_dev = hp->dev;
3114
3115 unregister_netdev(net_dev);
3116
3117 /* XXX qfe parent interrupt... */
3118
3119 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
3120 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
3121 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
3122 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
3123 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
3124 dma_free_coherent(hp->dma_dev,
3125 PAGE_SIZE,
3126 hp->happy_block,
3127 hp->hblock_dvma);
3128
3129 free_netdev(net_dev);
3130
3131 return 0;
3132 }
3133
3134 static const struct of_device_id hme_sbus_match[] = {
3135 {
3136 .name = "SUNW,hme",
3137 },
3138 {
3139 .name = "SUNW,qfe",
3140 .data = (void *) 1,
3141 },
3142 {
3143 .name = "qfe",
3144 .data = (void *) 1,
3145 },
3146 {},
3147 };
3148
3149 MODULE_DEVICE_TABLE(of, hme_sbus_match);
3150
3151 static struct platform_driver hme_sbus_driver = {
3152 .driver = {
3153 .name = "hme",
3154 .of_match_table = hme_sbus_match,
3155 },
3156 .probe = hme_sbus_probe,
3157 .remove = hme_sbus_remove,
3158 };
3159
happy_meal_sbus_init(void)3160 static int __init happy_meal_sbus_init(void)
3161 {
3162 int err;
3163
3164 err = platform_driver_register(&hme_sbus_driver);
3165 if (!err)
3166 err = quattro_sbus_register_irqs();
3167
3168 return err;
3169 }
3170
happy_meal_sbus_exit(void)3171 static void happy_meal_sbus_exit(void)
3172 {
3173 platform_driver_unregister(&hme_sbus_driver);
3174 quattro_sbus_free_irqs();
3175
3176 while (qfe_sbus_list) {
3177 struct quattro *qfe = qfe_sbus_list;
3178 struct quattro *next = qfe->next;
3179
3180 kfree(qfe);
3181
3182 qfe_sbus_list = next;
3183 }
3184 }
3185 #endif
3186
happy_meal_probe(void)3187 static int __init happy_meal_probe(void)
3188 {
3189 int err = 0;
3190
3191 #ifdef CONFIG_SBUS
3192 err = happy_meal_sbus_init();
3193 #endif
3194 #ifdef CONFIG_PCI
3195 if (!err) {
3196 err = happy_meal_pci_init();
3197 #ifdef CONFIG_SBUS
3198 if (err)
3199 happy_meal_sbus_exit();
3200 #endif
3201 }
3202 #endif
3203
3204 return err;
3205 }
3206
3207
happy_meal_exit(void)3208 static void __exit happy_meal_exit(void)
3209 {
3210 #ifdef CONFIG_SBUS
3211 happy_meal_sbus_exit();
3212 #endif
3213 #ifdef CONFIG_PCI
3214 happy_meal_pci_exit();
3215 #endif
3216 }
3217
3218 module_init(happy_meal_probe);
3219 module_exit(happy_meal_exit);
3220