1 /* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
2 * auto carrier detecting ethernet driver. Also known as the
3 * "Happy Meal Ethernet" found on SunSwift SBUS cards.
4 *
5 * Copyright (C) 1996, 1998, 1999, 2002, 2003,
6 * 2006, 2008 David S. Miller (davem@davemloft.net)
7 *
8 * Changes :
9 * 2000/11/11 Willy Tarreau <willy AT meta-x.org>
10 * - port to non-sparc architectures. Tested only on x86 and
11 * only currently works with QFE PCI cards.
12 * - ability to specify the MAC address at module load time by passing this
13 * argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50
14 */
15
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/fcntl.h>
20 #include <linux/interrupt.h>
21 #include <linux/ioport.h>
22 #include <linux/in.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/ethtool.h>
28 #include <linux/mii.h>
29 #include <linux/crc32.h>
30 #include <linux/random.h>
31 #include <linux/errno.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/mm.h>
36 #include <linux/bitops.h>
37 #include <linux/dma-mapping.h>
38
39 #include <asm/io.h>
40 #include <asm/dma.h>
41 #include <asm/byteorder.h>
42
43 #ifdef CONFIG_SPARC
44 #include <linux/of.h>
45 #include <linux/of_device.h>
46 #include <asm/idprom.h>
47 #include <asm/openprom.h>
48 #include <asm/oplib.h>
49 #include <asm/prom.h>
50 #include <asm/auxio.h>
51 #endif
52 #include <asm/uaccess.h>
53
54 #include <asm/pgtable.h>
55 #include <asm/irq.h>
56
57 #ifdef CONFIG_PCI
58 #include <linux/pci.h>
59 #endif
60
61 #include "sunhme.h"
62
63 #define DRV_NAME "sunhme"
64 #define DRV_VERSION "3.10"
65 #define DRV_RELDATE "August 26, 2008"
66 #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
67
68 static char version[] =
69 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
70
71 MODULE_VERSION(DRV_VERSION);
72 MODULE_AUTHOR(DRV_AUTHOR);
73 MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
74 MODULE_LICENSE("GPL");
75
76 static int macaddr[6];
77
78 /* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
79 module_param_array(macaddr, int, NULL, 0);
80 MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
81
82 #ifdef CONFIG_SBUS
83 static struct quattro *qfe_sbus_list;
84 #endif
85
86 #ifdef CONFIG_PCI
87 static struct quattro *qfe_pci_list;
88 #endif
89
90 #undef HMEDEBUG
91 #undef SXDEBUG
92 #undef RXDEBUG
93 #undef TXDEBUG
94 #undef TXLOGGING
95
96 #ifdef TXLOGGING
97 struct hme_tx_logent {
98 unsigned int tstamp;
99 int tx_new, tx_old;
100 unsigned int action;
101 #define TXLOG_ACTION_IRQ 0x01
102 #define TXLOG_ACTION_TXMIT 0x02
103 #define TXLOG_ACTION_TBUSY 0x04
104 #define TXLOG_ACTION_NBUFS 0x08
105 unsigned int status;
106 };
107 #define TX_LOG_LEN 128
108 static struct hme_tx_logent tx_log[TX_LOG_LEN];
109 static int txlog_cur_entry;
tx_add_log(struct happy_meal * hp,unsigned int a,unsigned int s)110 static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
111 {
112 struct hme_tx_logent *tlp;
113 unsigned long flags;
114
115 local_irq_save(flags);
116 tlp = &tx_log[txlog_cur_entry];
117 tlp->tstamp = (unsigned int)jiffies;
118 tlp->tx_new = hp->tx_new;
119 tlp->tx_old = hp->tx_old;
120 tlp->action = a;
121 tlp->status = s;
122 txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
123 local_irq_restore(flags);
124 }
tx_dump_log(void)125 static __inline__ void tx_dump_log(void)
126 {
127 int i, this;
128
129 this = txlog_cur_entry;
130 for (i = 0; i < TX_LOG_LEN; i++) {
131 printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
132 tx_log[this].tstamp,
133 tx_log[this].tx_new, tx_log[this].tx_old,
134 tx_log[this].action, tx_log[this].status);
135 this = (this + 1) & (TX_LOG_LEN - 1);
136 }
137 }
tx_dump_ring(struct happy_meal * hp)138 static __inline__ void tx_dump_ring(struct happy_meal *hp)
139 {
140 struct hmeal_init_block *hb = hp->happy_block;
141 struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
142 int i;
143
144 for (i = 0; i < TX_RING_SIZE; i+=4) {
145 printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
146 i, i + 4,
147 le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
148 le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
149 le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
150 le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
151 }
152 }
153 #else
154 #define tx_add_log(hp, a, s) do { } while(0)
155 #define tx_dump_log() do { } while(0)
156 #define tx_dump_ring(hp) do { } while(0)
157 #endif
158
159 #ifdef HMEDEBUG
160 #define HMD(x) printk x
161 #else
162 #define HMD(x)
163 #endif
164
165 /* #define AUTO_SWITCH_DEBUG */
166
167 #ifdef AUTO_SWITCH_DEBUG
168 #define ASD(x) printk x
169 #else
170 #define ASD(x)
171 #endif
172
173 #define DEFAULT_IPG0 16 /* For lance-mode only */
174 #define DEFAULT_IPG1 8 /* For all modes */
175 #define DEFAULT_IPG2 4 /* For all modes */
176 #define DEFAULT_JAMSIZE 4 /* Toe jam */
177
178 /* NOTE: In the descriptor writes one _must_ write the address
179 * member _first_. The card must not be allowed to see
180 * the updated descriptor flags until the address is
181 * correct. I've added a write memory barrier between
182 * the two stores so that I can sleep well at night... -DaveM
183 */
184
185 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
sbus_hme_write32(void __iomem * reg,u32 val)186 static void sbus_hme_write32(void __iomem *reg, u32 val)
187 {
188 sbus_writel(val, reg);
189 }
190
sbus_hme_read32(void __iomem * reg)191 static u32 sbus_hme_read32(void __iomem *reg)
192 {
193 return sbus_readl(reg);
194 }
195
sbus_hme_write_rxd(struct happy_meal_rxd * rxd,u32 flags,u32 addr)196 static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
197 {
198 rxd->rx_addr = (__force hme32)addr;
199 dma_wmb();
200 rxd->rx_flags = (__force hme32)flags;
201 }
202
sbus_hme_write_txd(struct happy_meal_txd * txd,u32 flags,u32 addr)203 static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
204 {
205 txd->tx_addr = (__force hme32)addr;
206 dma_wmb();
207 txd->tx_flags = (__force hme32)flags;
208 }
209
sbus_hme_read_desc32(hme32 * p)210 static u32 sbus_hme_read_desc32(hme32 *p)
211 {
212 return (__force u32)*p;
213 }
214
pci_hme_write32(void __iomem * reg,u32 val)215 static void pci_hme_write32(void __iomem *reg, u32 val)
216 {
217 writel(val, reg);
218 }
219
pci_hme_read32(void __iomem * reg)220 static u32 pci_hme_read32(void __iomem *reg)
221 {
222 return readl(reg);
223 }
224
pci_hme_write_rxd(struct happy_meal_rxd * rxd,u32 flags,u32 addr)225 static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
226 {
227 rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
228 dma_wmb();
229 rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
230 }
231
pci_hme_write_txd(struct happy_meal_txd * txd,u32 flags,u32 addr)232 static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
233 {
234 txd->tx_addr = (__force hme32)cpu_to_le32(addr);
235 dma_wmb();
236 txd->tx_flags = (__force hme32)cpu_to_le32(flags);
237 }
238
pci_hme_read_desc32(hme32 * p)239 static u32 pci_hme_read_desc32(hme32 *p)
240 {
241 return le32_to_cpup((__le32 *)p);
242 }
243
244 #define hme_write32(__hp, __reg, __val) \
245 ((__hp)->write32((__reg), (__val)))
246 #define hme_read32(__hp, __reg) \
247 ((__hp)->read32(__reg))
248 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
249 ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
250 #define hme_write_txd(__hp, __txd, __flags, __addr) \
251 ((__hp)->write_txd((__txd), (__flags), (__addr)))
252 #define hme_read_desc32(__hp, __p) \
253 ((__hp)->read_desc32(__p))
254 #define hme_dma_map(__hp, __ptr, __size, __dir) \
255 ((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir)))
256 #define hme_dma_unmap(__hp, __addr, __size, __dir) \
257 ((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir)))
258 #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
259 ((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)))
260 #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
261 ((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)))
262 #else
263 #ifdef CONFIG_SBUS
264 /* SBUS only compilation */
265 #define hme_write32(__hp, __reg, __val) \
266 sbus_writel((__val), (__reg))
267 #define hme_read32(__hp, __reg) \
268 sbus_readl(__reg)
269 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
270 do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
271 dma_wmb(); \
272 (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
273 } while(0)
274 #define hme_write_txd(__hp, __txd, __flags, __addr) \
275 do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
276 dma_wmb(); \
277 (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
278 } while(0)
279 #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
280 #define hme_dma_map(__hp, __ptr, __size, __dir) \
281 dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
282 #define hme_dma_unmap(__hp, __addr, __size, __dir) \
283 dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
284 #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
285 dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
286 #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
287 dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
288 #else
289 /* PCI only compilation */
290 #define hme_write32(__hp, __reg, __val) \
291 writel((__val), (__reg))
292 #define hme_read32(__hp, __reg) \
293 readl(__reg)
294 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
295 do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
296 dma_wmb(); \
297 (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
298 } while(0)
299 #define hme_write_txd(__hp, __txd, __flags, __addr) \
300 do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
301 dma_wmb(); \
302 (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
303 } while(0)
hme_read_desc32(struct happy_meal * hp,hme32 * p)304 static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
305 {
306 return le32_to_cpup((__le32 *)p);
307 }
308 #define hme_dma_map(__hp, __ptr, __size, __dir) \
309 pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
310 #define hme_dma_unmap(__hp, __addr, __size, __dir) \
311 pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
312 #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
313 pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
314 #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
315 pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
316 #endif
317 #endif
318
319
320 /* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */
BB_PUT_BIT(struct happy_meal * hp,void __iomem * tregs,int bit)321 static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
322 {
323 hme_write32(hp, tregs + TCVR_BBDATA, bit);
324 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
325 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
326 }
327
328 #if 0
329 static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
330 {
331 u32 ret;
332
333 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
334 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
335 ret = hme_read32(hp, tregs + TCVR_CFG);
336 if (internal)
337 ret &= TCV_CFG_MDIO0;
338 else
339 ret &= TCV_CFG_MDIO1;
340
341 return ret;
342 }
343 #endif
344
BB_GET_BIT2(struct happy_meal * hp,void __iomem * tregs,int internal)345 static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
346 {
347 u32 retval;
348
349 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
350 udelay(1);
351 retval = hme_read32(hp, tregs + TCVR_CFG);
352 if (internal)
353 retval &= TCV_CFG_MDIO0;
354 else
355 retval &= TCV_CFG_MDIO1;
356 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
357
358 return retval;
359 }
360
361 #define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */
362
happy_meal_bb_read(struct happy_meal * hp,void __iomem * tregs,int reg)363 static int happy_meal_bb_read(struct happy_meal *hp,
364 void __iomem *tregs, int reg)
365 {
366 u32 tmp;
367 int retval = 0;
368 int i;
369
370 ASD(("happy_meal_bb_read: reg=%d ", reg));
371
372 /* Enable the MIF BitBang outputs. */
373 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
374
375 /* Force BitBang into the idle state. */
376 for (i = 0; i < 32; i++)
377 BB_PUT_BIT(hp, tregs, 1);
378
379 /* Give it the read sequence. */
380 BB_PUT_BIT(hp, tregs, 0);
381 BB_PUT_BIT(hp, tregs, 1);
382 BB_PUT_BIT(hp, tregs, 1);
383 BB_PUT_BIT(hp, tregs, 0);
384
385 /* Give it the PHY address. */
386 tmp = hp->paddr & 0xff;
387 for (i = 4; i >= 0; i--)
388 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
389
390 /* Tell it what register we want to read. */
391 tmp = (reg & 0xff);
392 for (i = 4; i >= 0; i--)
393 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
394
395 /* Close down the MIF BitBang outputs. */
396 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
397
398 /* Now read in the value. */
399 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
400 for (i = 15; i >= 0; i--)
401 retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
402 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
403 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
404 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
405 ASD(("value=%x\n", retval));
406 return retval;
407 }
408
happy_meal_bb_write(struct happy_meal * hp,void __iomem * tregs,int reg,unsigned short value)409 static void happy_meal_bb_write(struct happy_meal *hp,
410 void __iomem *tregs, int reg,
411 unsigned short value)
412 {
413 u32 tmp;
414 int i;
415
416 ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
417
418 /* Enable the MIF BitBang outputs. */
419 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
420
421 /* Force BitBang into the idle state. */
422 for (i = 0; i < 32; i++)
423 BB_PUT_BIT(hp, tregs, 1);
424
425 /* Give it write sequence. */
426 BB_PUT_BIT(hp, tregs, 0);
427 BB_PUT_BIT(hp, tregs, 1);
428 BB_PUT_BIT(hp, tregs, 0);
429 BB_PUT_BIT(hp, tregs, 1);
430
431 /* Give it the PHY address. */
432 tmp = (hp->paddr & 0xff);
433 for (i = 4; i >= 0; i--)
434 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
435
436 /* Tell it what register we will be writing. */
437 tmp = (reg & 0xff);
438 for (i = 4; i >= 0; i--)
439 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
440
441 /* Tell it to become ready for the bits. */
442 BB_PUT_BIT(hp, tregs, 1);
443 BB_PUT_BIT(hp, tregs, 0);
444
445 for (i = 15; i >= 0; i--)
446 BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
447
448 /* Close down the MIF BitBang outputs. */
449 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
450 }
451
452 #define TCVR_READ_TRIES 16
453
happy_meal_tcvr_read(struct happy_meal * hp,void __iomem * tregs,int reg)454 static int happy_meal_tcvr_read(struct happy_meal *hp,
455 void __iomem *tregs, int reg)
456 {
457 int tries = TCVR_READ_TRIES;
458 int retval;
459
460 ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
461 if (hp->tcvr_type == none) {
462 ASD(("no transceiver, value=TCVR_FAILURE\n"));
463 return TCVR_FAILURE;
464 }
465
466 if (!(hp->happy_flags & HFLAG_FENABLE)) {
467 ASD(("doing bit bang\n"));
468 return happy_meal_bb_read(hp, tregs, reg);
469 }
470
471 hme_write32(hp, tregs + TCVR_FRAME,
472 (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
473 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
474 udelay(20);
475 if (!tries) {
476 printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
477 return TCVR_FAILURE;
478 }
479 retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
480 ASD(("value=%04x\n", retval));
481 return retval;
482 }
483
484 #define TCVR_WRITE_TRIES 16
485
happy_meal_tcvr_write(struct happy_meal * hp,void __iomem * tregs,int reg,unsigned short value)486 static void happy_meal_tcvr_write(struct happy_meal *hp,
487 void __iomem *tregs, int reg,
488 unsigned short value)
489 {
490 int tries = TCVR_WRITE_TRIES;
491
492 ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
493
494 /* Welcome to Sun Microsystems, can I take your order please? */
495 if (!(hp->happy_flags & HFLAG_FENABLE)) {
496 happy_meal_bb_write(hp, tregs, reg, value);
497 return;
498 }
499
500 /* Would you like fries with that? */
501 hme_write32(hp, tregs + TCVR_FRAME,
502 (FRAME_WRITE | (hp->paddr << 23) |
503 ((reg & 0xff) << 18) | (value & 0xffff)));
504 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
505 udelay(20);
506
507 /* Anything else? */
508 if (!tries)
509 printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
510
511 /* Fifty-two cents is your change, have a nice day. */
512 }
513
514 /* Auto negotiation. The scheme is very simple. We have a timer routine
515 * that keeps watching the auto negotiation process as it progresses.
516 * The DP83840 is first told to start doing it's thing, we set up the time
517 * and place the timer state machine in it's initial state.
518 *
519 * Here the timer peeks at the DP83840 status registers at each click to see
520 * if the auto negotiation has completed, we assume here that the DP83840 PHY
521 * will time out at some point and just tell us what (didn't) happen. For
522 * complete coverage we only allow so many of the ticks at this level to run,
523 * when this has expired we print a warning message and try another strategy.
524 * This "other" strategy is to force the interface into various speed/duplex
525 * configurations and we stop when we see a link-up condition before the
526 * maximum number of "peek" ticks have occurred.
527 *
528 * Once a valid link status has been detected we configure the BigMAC and
529 * the rest of the Happy Meal to speak the most efficient protocol we could
530 * get a clean link for. The priority for link configurations, highest first
531 * is:
532 * 100 Base-T Full Duplex
533 * 100 Base-T Half Duplex
534 * 10 Base-T Full Duplex
535 * 10 Base-T Half Duplex
536 *
537 * We start a new timer now, after a successful auto negotiation status has
538 * been detected. This timer just waits for the link-up bit to get set in
539 * the BMCR of the DP83840. When this occurs we print a kernel log message
540 * describing the link type in use and the fact that it is up.
541 *
542 * If a fatal error of some sort is signalled and detected in the interrupt
543 * service routine, and the chip is reset, or the link is ifconfig'd down
544 * and then back up, this entire process repeats itself all over again.
545 */
try_next_permutation(struct happy_meal * hp,void __iomem * tregs)546 static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
547 {
548 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
549
550 /* Downgrade from full to half duplex. Only possible
551 * via ethtool.
552 */
553 if (hp->sw_bmcr & BMCR_FULLDPLX) {
554 hp->sw_bmcr &= ~(BMCR_FULLDPLX);
555 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
556 return 0;
557 }
558
559 /* Downgrade from 100 to 10. */
560 if (hp->sw_bmcr & BMCR_SPEED100) {
561 hp->sw_bmcr &= ~(BMCR_SPEED100);
562 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
563 return 0;
564 }
565
566 /* We've tried everything. */
567 return -1;
568 }
569
display_link_mode(struct happy_meal * hp,void __iomem * tregs)570 static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
571 {
572 printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
573 if (hp->tcvr_type == external)
574 printk("external ");
575 else
576 printk("internal ");
577 printk("transceiver at ");
578 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
579 if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
580 if (hp->sw_lpa & LPA_100FULL)
581 printk("100Mb/s, Full Duplex.\n");
582 else
583 printk("100Mb/s, Half Duplex.\n");
584 } else {
585 if (hp->sw_lpa & LPA_10FULL)
586 printk("10Mb/s, Full Duplex.\n");
587 else
588 printk("10Mb/s, Half Duplex.\n");
589 }
590 }
591
display_forced_link_mode(struct happy_meal * hp,void __iomem * tregs)592 static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
593 {
594 printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
595 if (hp->tcvr_type == external)
596 printk("external ");
597 else
598 printk("internal ");
599 printk("transceiver at ");
600 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
601 if (hp->sw_bmcr & BMCR_SPEED100)
602 printk("100Mb/s, ");
603 else
604 printk("10Mb/s, ");
605 if (hp->sw_bmcr & BMCR_FULLDPLX)
606 printk("Full Duplex.\n");
607 else
608 printk("Half Duplex.\n");
609 }
610
set_happy_link_modes(struct happy_meal * hp,void __iomem * tregs)611 static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
612 {
613 int full;
614
615 /* All we care about is making sure the bigmac tx_cfg has a
616 * proper duplex setting.
617 */
618 if (hp->timer_state == arbwait) {
619 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
620 if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
621 goto no_response;
622 if (hp->sw_lpa & LPA_100FULL)
623 full = 1;
624 else if (hp->sw_lpa & LPA_100HALF)
625 full = 0;
626 else if (hp->sw_lpa & LPA_10FULL)
627 full = 1;
628 else
629 full = 0;
630 } else {
631 /* Forcing a link mode. */
632 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
633 if (hp->sw_bmcr & BMCR_FULLDPLX)
634 full = 1;
635 else
636 full = 0;
637 }
638
639 /* Before changing other bits in the tx_cfg register, and in
640 * general any of other the TX config registers too, you
641 * must:
642 * 1) Clear Enable
643 * 2) Poll with reads until that bit reads back as zero
644 * 3) Make TX configuration changes
645 * 4) Set Enable once more
646 */
647 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
648 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
649 ~(BIGMAC_TXCFG_ENABLE));
650 while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
651 barrier();
652 if (full) {
653 hp->happy_flags |= HFLAG_FULL;
654 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
655 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
656 BIGMAC_TXCFG_FULLDPLX);
657 } else {
658 hp->happy_flags &= ~(HFLAG_FULL);
659 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
660 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
661 ~(BIGMAC_TXCFG_FULLDPLX));
662 }
663 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
664 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
665 BIGMAC_TXCFG_ENABLE);
666 return 0;
667 no_response:
668 return 1;
669 }
670
671 static int happy_meal_init(struct happy_meal *hp);
672
is_lucent_phy(struct happy_meal * hp)673 static int is_lucent_phy(struct happy_meal *hp)
674 {
675 void __iomem *tregs = hp->tcvregs;
676 unsigned short mr2, mr3;
677 int ret = 0;
678
679 mr2 = happy_meal_tcvr_read(hp, tregs, 2);
680 mr3 = happy_meal_tcvr_read(hp, tregs, 3);
681 if ((mr2 & 0xffff) == 0x0180 &&
682 ((mr3 & 0xffff) >> 10) == 0x1d)
683 ret = 1;
684
685 return ret;
686 }
687
happy_meal_timer(unsigned long data)688 static void happy_meal_timer(unsigned long data)
689 {
690 struct happy_meal *hp = (struct happy_meal *) data;
691 void __iomem *tregs = hp->tcvregs;
692 int restart_timer = 0;
693
694 spin_lock_irq(&hp->happy_lock);
695
696 hp->timer_ticks++;
697 switch(hp->timer_state) {
698 case arbwait:
699 /* Only allow for 5 ticks, thats 10 seconds and much too
700 * long to wait for arbitration to complete.
701 */
702 if (hp->timer_ticks >= 10) {
703 /* Enter force mode. */
704 do_force_mode:
705 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
706 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
707 hp->dev->name);
708 hp->sw_bmcr = BMCR_SPEED100;
709 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
710
711 if (!is_lucent_phy(hp)) {
712 /* OK, seems we need do disable the transceiver for the first
713 * tick to make sure we get an accurate link state at the
714 * second tick.
715 */
716 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
717 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
718 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
719 }
720 hp->timer_state = ltrywait;
721 hp->timer_ticks = 0;
722 restart_timer = 1;
723 } else {
724 /* Anything interesting happen? */
725 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
726 if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
727 int ret;
728
729 /* Just what we've been waiting for... */
730 ret = set_happy_link_modes(hp, tregs);
731 if (ret) {
732 /* Ooops, something bad happened, go to force
733 * mode.
734 *
735 * XXX Broken hubs which don't support 802.3u
736 * XXX auto-negotiation make this happen as well.
737 */
738 goto do_force_mode;
739 }
740
741 /* Success, at least so far, advance our state engine. */
742 hp->timer_state = lupwait;
743 restart_timer = 1;
744 } else {
745 restart_timer = 1;
746 }
747 }
748 break;
749
750 case lupwait:
751 /* Auto negotiation was successful and we are awaiting a
752 * link up status. I have decided to let this timer run
753 * forever until some sort of error is signalled, reporting
754 * a message to the user at 10 second intervals.
755 */
756 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
757 if (hp->sw_bmsr & BMSR_LSTATUS) {
758 /* Wheee, it's up, display the link mode in use and put
759 * the timer to sleep.
760 */
761 display_link_mode(hp, tregs);
762 hp->timer_state = asleep;
763 restart_timer = 0;
764 } else {
765 if (hp->timer_ticks >= 10) {
766 printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
767 "not completely up.\n", hp->dev->name);
768 hp->timer_ticks = 0;
769 restart_timer = 1;
770 } else {
771 restart_timer = 1;
772 }
773 }
774 break;
775
776 case ltrywait:
777 /* Making the timeout here too long can make it take
778 * annoyingly long to attempt all of the link mode
779 * permutations, but then again this is essentially
780 * error recovery code for the most part.
781 */
782 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
783 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
784 if (hp->timer_ticks == 1) {
785 if (!is_lucent_phy(hp)) {
786 /* Re-enable transceiver, we'll re-enable the transceiver next
787 * tick, then check link state on the following tick.
788 */
789 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
790 happy_meal_tcvr_write(hp, tregs,
791 DP83840_CSCONFIG, hp->sw_csconfig);
792 }
793 restart_timer = 1;
794 break;
795 }
796 if (hp->timer_ticks == 2) {
797 if (!is_lucent_phy(hp)) {
798 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
799 happy_meal_tcvr_write(hp, tregs,
800 DP83840_CSCONFIG, hp->sw_csconfig);
801 }
802 restart_timer = 1;
803 break;
804 }
805 if (hp->sw_bmsr & BMSR_LSTATUS) {
806 /* Force mode selection success. */
807 display_forced_link_mode(hp, tregs);
808 set_happy_link_modes(hp, tregs); /* XXX error? then what? */
809 hp->timer_state = asleep;
810 restart_timer = 0;
811 } else {
812 if (hp->timer_ticks >= 4) { /* 6 seconds or so... */
813 int ret;
814
815 ret = try_next_permutation(hp, tregs);
816 if (ret == -1) {
817 /* Aieee, tried them all, reset the
818 * chip and try all over again.
819 */
820
821 /* Let the user know... */
822 printk(KERN_NOTICE "%s: Link down, cable problem?\n",
823 hp->dev->name);
824
825 ret = happy_meal_init(hp);
826 if (ret) {
827 /* ho hum... */
828 printk(KERN_ERR "%s: Error, cannot re-init the "
829 "Happy Meal.\n", hp->dev->name);
830 }
831 goto out;
832 }
833 if (!is_lucent_phy(hp)) {
834 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
835 DP83840_CSCONFIG);
836 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
837 happy_meal_tcvr_write(hp, tregs,
838 DP83840_CSCONFIG, hp->sw_csconfig);
839 }
840 hp->timer_ticks = 0;
841 restart_timer = 1;
842 } else {
843 restart_timer = 1;
844 }
845 }
846 break;
847
848 case asleep:
849 default:
850 /* Can't happens.... */
851 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
852 hp->dev->name);
853 restart_timer = 0;
854 hp->timer_ticks = 0;
855 hp->timer_state = asleep; /* foo on you */
856 break;
857 }
858
859 if (restart_timer) {
860 hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
861 add_timer(&hp->happy_timer);
862 }
863
864 out:
865 spin_unlock_irq(&hp->happy_lock);
866 }
867
868 #define TX_RESET_TRIES 32
869 #define RX_RESET_TRIES 32
870
871 /* hp->happy_lock must be held */
happy_meal_tx_reset(struct happy_meal * hp,void __iomem * bregs)872 static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
873 {
874 int tries = TX_RESET_TRIES;
875
876 HMD(("happy_meal_tx_reset: reset, "));
877
878 /* Would you like to try our SMCC Delux? */
879 hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
880 while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
881 udelay(20);
882
883 /* Lettuce, tomato, buggy hardware (no extra charge)? */
884 if (!tries)
885 printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
886
887 /* Take care. */
888 HMD(("done\n"));
889 }
890
891 /* hp->happy_lock must be held */
happy_meal_rx_reset(struct happy_meal * hp,void __iomem * bregs)892 static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
893 {
894 int tries = RX_RESET_TRIES;
895
896 HMD(("happy_meal_rx_reset: reset, "));
897
898 /* We have a special on GNU/Viking hardware bugs today. */
899 hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
900 while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
901 udelay(20);
902
903 /* Will that be all? */
904 if (!tries)
905 printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
906
907 /* Don't forget your vik_1137125_wa. Have a nice day. */
908 HMD(("done\n"));
909 }
910
911 #define STOP_TRIES 16
912
913 /* hp->happy_lock must be held */
happy_meal_stop(struct happy_meal * hp,void __iomem * gregs)914 static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
915 {
916 int tries = STOP_TRIES;
917
918 HMD(("happy_meal_stop: reset, "));
919
920 /* We're consolidating our STB products, it's your lucky day. */
921 hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
922 while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
923 udelay(20);
924
925 /* Come back next week when we are "Sun Microelectronics". */
926 if (!tries)
927 printk(KERN_ERR "happy meal: Fry guys.");
928
929 /* Remember: "Different name, same old buggy as shit hardware." */
930 HMD(("done\n"));
931 }
932
933 /* hp->happy_lock must be held */
happy_meal_get_counters(struct happy_meal * hp,void __iomem * bregs)934 static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
935 {
936 struct net_device_stats *stats = &hp->net_stats;
937
938 stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
939 hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
940
941 stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
942 hme_write32(hp, bregs + BMAC_UNALECTR, 0);
943
944 stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
945 hme_write32(hp, bregs + BMAC_GLECTR, 0);
946
947 stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
948
949 stats->collisions +=
950 (hme_read32(hp, bregs + BMAC_EXCTR) +
951 hme_read32(hp, bregs + BMAC_LTCTR));
952 hme_write32(hp, bregs + BMAC_EXCTR, 0);
953 hme_write32(hp, bregs + BMAC_LTCTR, 0);
954 }
955
956 /* hp->happy_lock must be held */
happy_meal_poll_stop(struct happy_meal * hp,void __iomem * tregs)957 static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
958 {
959 ASD(("happy_meal_poll_stop: "));
960
961 /* If polling disabled or not polling already, nothing to do. */
962 if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
963 (HFLAG_POLLENABLE | HFLAG_POLL)) {
964 HMD(("not polling, return\n"));
965 return;
966 }
967
968 /* Shut up the MIF. */
969 ASD(("were polling, mif ints off, "));
970 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
971
972 /* Turn off polling. */
973 ASD(("polling off, "));
974 hme_write32(hp, tregs + TCVR_CFG,
975 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
976
977 /* We are no longer polling. */
978 hp->happy_flags &= ~(HFLAG_POLL);
979
980 /* Let the bits set. */
981 udelay(200);
982 ASD(("done\n"));
983 }
984
985 /* Only Sun can take such nice parts and fuck up the programming interface
986 * like this. Good job guys...
987 */
988 #define TCVR_RESET_TRIES 16 /* It should reset quickly */
989 #define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */
990
991 /* hp->happy_lock must be held */
happy_meal_tcvr_reset(struct happy_meal * hp,void __iomem * tregs)992 static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
993 {
994 u32 tconfig;
995 int result, tries = TCVR_RESET_TRIES;
996
997 tconfig = hme_read32(hp, tregs + TCVR_CFG);
998 ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
999 if (hp->tcvr_type == external) {
1000 ASD(("external<"));
1001 hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
1002 hp->tcvr_type = internal;
1003 hp->paddr = TCV_PADDR_ITX;
1004 ASD(("ISOLATE,"));
1005 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1006 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1007 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1008 if (result == TCVR_FAILURE) {
1009 ASD(("phyread_fail>\n"));
1010 return -1;
1011 }
1012 ASD(("phyread_ok,PSELECT>"));
1013 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1014 hp->tcvr_type = external;
1015 hp->paddr = TCV_PADDR_ETX;
1016 } else {
1017 if (tconfig & TCV_CFG_MDIO1) {
1018 ASD(("internal<PSELECT,"));
1019 hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
1020 ASD(("ISOLATE,"));
1021 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1022 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1023 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1024 if (result == TCVR_FAILURE) {
1025 ASD(("phyread_fail>\n"));
1026 return -1;
1027 }
1028 ASD(("phyread_ok,~PSELECT>"));
1029 hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
1030 hp->tcvr_type = internal;
1031 hp->paddr = TCV_PADDR_ITX;
1032 }
1033 }
1034
1035 ASD(("BMCR_RESET "));
1036 happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
1037
1038 while (--tries) {
1039 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1040 if (result == TCVR_FAILURE)
1041 return -1;
1042 hp->sw_bmcr = result;
1043 if (!(result & BMCR_RESET))
1044 break;
1045 udelay(20);
1046 }
1047 if (!tries) {
1048 ASD(("BMCR RESET FAILED!\n"));
1049 return -1;
1050 }
1051 ASD(("RESET_OK\n"));
1052
1053 /* Get fresh copies of the PHY registers. */
1054 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1055 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1056 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1057 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1058
1059 ASD(("UNISOLATE"));
1060 hp->sw_bmcr &= ~(BMCR_ISOLATE);
1061 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1062
1063 tries = TCVR_UNISOLATE_TRIES;
1064 while (--tries) {
1065 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1066 if (result == TCVR_FAILURE)
1067 return -1;
1068 if (!(result & BMCR_ISOLATE))
1069 break;
1070 udelay(20);
1071 }
1072 if (!tries) {
1073 ASD((" FAILED!\n"));
1074 return -1;
1075 }
1076 ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
1077 if (!is_lucent_phy(hp)) {
1078 result = happy_meal_tcvr_read(hp, tregs,
1079 DP83840_CSCONFIG);
1080 happy_meal_tcvr_write(hp, tregs,
1081 DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
1082 }
1083 return 0;
1084 }
1085
1086 /* Figure out whether we have an internal or external transceiver.
1087 *
1088 * hp->happy_lock must be held
1089 */
happy_meal_transceiver_check(struct happy_meal * hp,void __iomem * tregs)1090 static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
1091 {
1092 unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
1093
1094 ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
1095 if (hp->happy_flags & HFLAG_POLL) {
1096 /* If we are polling, we must stop to get the transceiver type. */
1097 ASD(("<polling> "));
1098 if (hp->tcvr_type == internal) {
1099 if (tconfig & TCV_CFG_MDIO1) {
1100 ASD(("<internal> <poll stop> "));
1101 happy_meal_poll_stop(hp, tregs);
1102 hp->paddr = TCV_PADDR_ETX;
1103 hp->tcvr_type = external;
1104 ASD(("<external>\n"));
1105 tconfig &= ~(TCV_CFG_PENABLE);
1106 tconfig |= TCV_CFG_PSELECT;
1107 hme_write32(hp, tregs + TCVR_CFG, tconfig);
1108 }
1109 } else {
1110 if (hp->tcvr_type == external) {
1111 ASD(("<external> "));
1112 if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
1113 ASD(("<poll stop> "));
1114 happy_meal_poll_stop(hp, tregs);
1115 hp->paddr = TCV_PADDR_ITX;
1116 hp->tcvr_type = internal;
1117 ASD(("<internal>\n"));
1118 hme_write32(hp, tregs + TCVR_CFG,
1119 hme_read32(hp, tregs + TCVR_CFG) &
1120 ~(TCV_CFG_PSELECT));
1121 }
1122 ASD(("\n"));
1123 } else {
1124 ASD(("<none>\n"));
1125 }
1126 }
1127 } else {
1128 u32 reread = hme_read32(hp, tregs + TCVR_CFG);
1129
1130 /* Else we can just work off of the MDIO bits. */
1131 ASD(("<not polling> "));
1132 if (reread & TCV_CFG_MDIO1) {
1133 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1134 hp->paddr = TCV_PADDR_ETX;
1135 hp->tcvr_type = external;
1136 ASD(("<external>\n"));
1137 } else {
1138 if (reread & TCV_CFG_MDIO0) {
1139 hme_write32(hp, tregs + TCVR_CFG,
1140 tconfig & ~(TCV_CFG_PSELECT));
1141 hp->paddr = TCV_PADDR_ITX;
1142 hp->tcvr_type = internal;
1143 ASD(("<internal>\n"));
1144 } else {
1145 printk(KERN_ERR "happy meal: Transceiver and a coke please.");
1146 hp->tcvr_type = none; /* Grrr... */
1147 ASD(("<none>\n"));
1148 }
1149 }
1150 }
1151 }
1152
1153 /* The receive ring buffers are a bit tricky to get right. Here goes...
1154 *
1155 * The buffers we dma into must be 64 byte aligned. So we use a special
1156 * alloc_skb() routine for the happy meal to allocate 64 bytes more than
1157 * we really need.
1158 *
1159 * We use skb_reserve() to align the data block we get in the skb. We
1160 * also program the etxregs->cfg register to use an offset of 2. This
1161 * imperical constant plus the ethernet header size will always leave
1162 * us with a nicely aligned ip header once we pass things up to the
1163 * protocol layers.
1164 *
1165 * The numbers work out to:
1166 *
1167 * Max ethernet frame size 1518
1168 * Ethernet header size 14
1169 * Happy Meal base offset 2
1170 *
1171 * Say a skb data area is at 0xf001b010, and its size alloced is
1172 * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes.
1173 *
1174 * First our alloc_skb() routine aligns the data base to a 64 byte
1175 * boundary. We now have 0xf001b040 as our skb data address. We
1176 * plug this into the receive descriptor address.
1177 *
1178 * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset.
1179 * So now the data we will end up looking at starts at 0xf001b042. When
1180 * the packet arrives, we will check out the size received and subtract
1181 * this from the skb->length. Then we just pass the packet up to the
1182 * protocols as is, and allocate a new skb to replace this slot we have
1183 * just received from.
1184 *
1185 * The ethernet layer will strip the ether header from the front of the
1186 * skb we just sent to it, this leaves us with the ip header sitting
1187 * nicely aligned at 0xf001b050. Also, for tcp and udp packets the
1188 * Happy Meal has even checksummed the tcp/udp data for us. The 16
1189 * bit checksum is obtained from the low bits of the receive descriptor
1190 * flags, thus:
1191 *
1192 * skb->csum = rxd->rx_flags & 0xffff;
1193 * skb->ip_summed = CHECKSUM_COMPLETE;
1194 *
1195 * before sending off the skb to the protocols, and we are good as gold.
1196 */
happy_meal_clean_rings(struct happy_meal * hp)1197 static void happy_meal_clean_rings(struct happy_meal *hp)
1198 {
1199 int i;
1200
1201 for (i = 0; i < RX_RING_SIZE; i++) {
1202 if (hp->rx_skbs[i] != NULL) {
1203 struct sk_buff *skb = hp->rx_skbs[i];
1204 struct happy_meal_rxd *rxd;
1205 u32 dma_addr;
1206
1207 rxd = &hp->happy_block->happy_meal_rxd[i];
1208 dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
1209 dma_unmap_single(hp->dma_dev, dma_addr,
1210 RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1211 dev_kfree_skb_any(skb);
1212 hp->rx_skbs[i] = NULL;
1213 }
1214 }
1215
1216 for (i = 0; i < TX_RING_SIZE; i++) {
1217 if (hp->tx_skbs[i] != NULL) {
1218 struct sk_buff *skb = hp->tx_skbs[i];
1219 struct happy_meal_txd *txd;
1220 u32 dma_addr;
1221 int frag;
1222
1223 hp->tx_skbs[i] = NULL;
1224
1225 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1226 txd = &hp->happy_block->happy_meal_txd[i];
1227 dma_addr = hme_read_desc32(hp, &txd->tx_addr);
1228 if (!frag)
1229 dma_unmap_single(hp->dma_dev, dma_addr,
1230 (hme_read_desc32(hp, &txd->tx_flags)
1231 & TXFLAG_SIZE),
1232 DMA_TO_DEVICE);
1233 else
1234 dma_unmap_page(hp->dma_dev, dma_addr,
1235 (hme_read_desc32(hp, &txd->tx_flags)
1236 & TXFLAG_SIZE),
1237 DMA_TO_DEVICE);
1238
1239 if (frag != skb_shinfo(skb)->nr_frags)
1240 i++;
1241 }
1242
1243 dev_kfree_skb_any(skb);
1244 }
1245 }
1246 }
1247
1248 /* hp->happy_lock must be held */
happy_meal_init_rings(struct happy_meal * hp)1249 static void happy_meal_init_rings(struct happy_meal *hp)
1250 {
1251 struct hmeal_init_block *hb = hp->happy_block;
1252 int i;
1253
1254 HMD(("happy_meal_init_rings: counters to zero, "));
1255 hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
1256
1257 /* Free any skippy bufs left around in the rings. */
1258 HMD(("clean, "));
1259 happy_meal_clean_rings(hp);
1260
1261 /* Now get new skippy bufs for the receive ring. */
1262 HMD(("init rxring, "));
1263 for (i = 0; i < RX_RING_SIZE; i++) {
1264 struct sk_buff *skb;
1265 u32 mapping;
1266
1267 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1268 if (!skb) {
1269 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1270 continue;
1271 }
1272 hp->rx_skbs[i] = skb;
1273
1274 /* Because we reserve afterwards. */
1275 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1276 mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1277 DMA_FROM_DEVICE);
1278 if (dma_mapping_error(hp->dma_dev, mapping)) {
1279 dev_kfree_skb_any(skb);
1280 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1281 continue;
1282 }
1283 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1284 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1285 mapping);
1286 skb_reserve(skb, RX_OFFSET);
1287 }
1288
1289 HMD(("init txring, "));
1290 for (i = 0; i < TX_RING_SIZE; i++)
1291 hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
1292
1293 HMD(("done\n"));
1294 }
1295
1296 /* hp->happy_lock must be held */
happy_meal_begin_auto_negotiation(struct happy_meal * hp,void __iomem * tregs,struct ethtool_cmd * ep)1297 static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
1298 void __iomem *tregs,
1299 struct ethtool_cmd *ep)
1300 {
1301 int timeout;
1302
1303 /* Read all of the registers we are interested in now. */
1304 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1305 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1306 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1307 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1308
1309 /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
1310
1311 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1312 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
1313 /* Advertise everything we can support. */
1314 if (hp->sw_bmsr & BMSR_10HALF)
1315 hp->sw_advertise |= (ADVERTISE_10HALF);
1316 else
1317 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1318
1319 if (hp->sw_bmsr & BMSR_10FULL)
1320 hp->sw_advertise |= (ADVERTISE_10FULL);
1321 else
1322 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1323 if (hp->sw_bmsr & BMSR_100HALF)
1324 hp->sw_advertise |= (ADVERTISE_100HALF);
1325 else
1326 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1327 if (hp->sw_bmsr & BMSR_100FULL)
1328 hp->sw_advertise |= (ADVERTISE_100FULL);
1329 else
1330 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1331 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1332
1333 /* XXX Currently no Happy Meal cards I know off support 100BaseT4,
1334 * XXX and this is because the DP83840 does not support it, changes
1335 * XXX would need to be made to the tx/rx logic in the driver as well
1336 * XXX so I completely skip checking for it in the BMSR for now.
1337 */
1338
1339 #ifdef AUTO_SWITCH_DEBUG
1340 ASD(("%s: Advertising [ ", hp->dev->name));
1341 if (hp->sw_advertise & ADVERTISE_10HALF)
1342 ASD(("10H "));
1343 if (hp->sw_advertise & ADVERTISE_10FULL)
1344 ASD(("10F "));
1345 if (hp->sw_advertise & ADVERTISE_100HALF)
1346 ASD(("100H "));
1347 if (hp->sw_advertise & ADVERTISE_100FULL)
1348 ASD(("100F "));
1349 #endif
1350
1351 /* Enable Auto-Negotiation, this is usually on already... */
1352 hp->sw_bmcr |= BMCR_ANENABLE;
1353 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1354
1355 /* Restart it to make sure it is going. */
1356 hp->sw_bmcr |= BMCR_ANRESTART;
1357 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1358
1359 /* BMCR_ANRESTART self clears when the process has begun. */
1360
1361 timeout = 64; /* More than enough. */
1362 while (--timeout) {
1363 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1364 if (!(hp->sw_bmcr & BMCR_ANRESTART))
1365 break; /* got it. */
1366 udelay(10);
1367 }
1368 if (!timeout) {
1369 printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
1370 "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
1371 printk(KERN_NOTICE "%s: Performing force link detection.\n",
1372 hp->dev->name);
1373 goto force_link;
1374 } else {
1375 hp->timer_state = arbwait;
1376 }
1377 } else {
1378 force_link:
1379 /* Force the link up, trying first a particular mode.
1380 * Either we are here at the request of ethtool or
1381 * because the Happy Meal would not start to autoneg.
1382 */
1383
1384 /* Disable auto-negotiation in BMCR, enable the duplex and
1385 * speed setting, init the timer state machine, and fire it off.
1386 */
1387 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
1388 hp->sw_bmcr = BMCR_SPEED100;
1389 } else {
1390 if (ethtool_cmd_speed(ep) == SPEED_100)
1391 hp->sw_bmcr = BMCR_SPEED100;
1392 else
1393 hp->sw_bmcr = 0;
1394 if (ep->duplex == DUPLEX_FULL)
1395 hp->sw_bmcr |= BMCR_FULLDPLX;
1396 }
1397 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1398
1399 if (!is_lucent_phy(hp)) {
1400 /* OK, seems we need do disable the transceiver for the first
1401 * tick to make sure we get an accurate link state at the
1402 * second tick.
1403 */
1404 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
1405 DP83840_CSCONFIG);
1406 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
1407 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
1408 hp->sw_csconfig);
1409 }
1410 hp->timer_state = ltrywait;
1411 }
1412
1413 hp->timer_ticks = 0;
1414 hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
1415 hp->happy_timer.data = (unsigned long) hp;
1416 hp->happy_timer.function = happy_meal_timer;
1417 add_timer(&hp->happy_timer);
1418 }
1419
1420 /* hp->happy_lock must be held */
happy_meal_init(struct happy_meal * hp)1421 static int happy_meal_init(struct happy_meal *hp)
1422 {
1423 void __iomem *gregs = hp->gregs;
1424 void __iomem *etxregs = hp->etxregs;
1425 void __iomem *erxregs = hp->erxregs;
1426 void __iomem *bregs = hp->bigmacregs;
1427 void __iomem *tregs = hp->tcvregs;
1428 u32 regtmp, rxcfg;
1429 unsigned char *e = &hp->dev->dev_addr[0];
1430
1431 /* If auto-negotiation timer is running, kill it. */
1432 del_timer(&hp->happy_timer);
1433
1434 HMD(("happy_meal_init: happy_flags[%08x] ",
1435 hp->happy_flags));
1436 if (!(hp->happy_flags & HFLAG_INIT)) {
1437 HMD(("set HFLAG_INIT, "));
1438 hp->happy_flags |= HFLAG_INIT;
1439 happy_meal_get_counters(hp, bregs);
1440 }
1441
1442 /* Stop polling. */
1443 HMD(("to happy_meal_poll_stop\n"));
1444 happy_meal_poll_stop(hp, tregs);
1445
1446 /* Stop transmitter and receiver. */
1447 HMD(("happy_meal_init: to happy_meal_stop\n"));
1448 happy_meal_stop(hp, gregs);
1449
1450 /* Alloc and reset the tx/rx descriptor chains. */
1451 HMD(("happy_meal_init: to happy_meal_init_rings\n"));
1452 happy_meal_init_rings(hp);
1453
1454 /* Shut up the MIF. */
1455 HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
1456 hme_read32(hp, tregs + TCVR_IMASK)));
1457 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1458
1459 /* See if we can enable the MIF frame on this card to speak to the DP83840. */
1460 if (hp->happy_flags & HFLAG_FENABLE) {
1461 HMD(("use frame old[%08x], ",
1462 hme_read32(hp, tregs + TCVR_CFG)));
1463 hme_write32(hp, tregs + TCVR_CFG,
1464 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1465 } else {
1466 HMD(("use bitbang old[%08x], ",
1467 hme_read32(hp, tregs + TCVR_CFG)));
1468 hme_write32(hp, tregs + TCVR_CFG,
1469 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1470 }
1471
1472 /* Check the state of the transceiver. */
1473 HMD(("to happy_meal_transceiver_check\n"));
1474 happy_meal_transceiver_check(hp, tregs);
1475
1476 /* Put the Big Mac into a sane state. */
1477 HMD(("happy_meal_init: "));
1478 switch(hp->tcvr_type) {
1479 case none:
1480 /* Cannot operate if we don't know the transceiver type! */
1481 HMD(("AAIEEE no transceiver type, EAGAIN"));
1482 return -EAGAIN;
1483
1484 case internal:
1485 /* Using the MII buffers. */
1486 HMD(("internal, using MII, "));
1487 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1488 break;
1489
1490 case external:
1491 /* Not using the MII, disable it. */
1492 HMD(("external, disable MII, "));
1493 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1494 break;
1495 }
1496
1497 if (happy_meal_tcvr_reset(hp, tregs))
1498 return -EAGAIN;
1499
1500 /* Reset the Happy Meal Big Mac transceiver and the receiver. */
1501 HMD(("tx/rx reset, "));
1502 happy_meal_tx_reset(hp, bregs);
1503 happy_meal_rx_reset(hp, bregs);
1504
1505 /* Set jam size and inter-packet gaps to reasonable defaults. */
1506 HMD(("jsize/ipg1/ipg2, "));
1507 hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
1508 hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
1509 hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
1510
1511 /* Load up the MAC address and random seed. */
1512 HMD(("rseed/macaddr, "));
1513
1514 /* The docs recommend to use the 10LSB of our MAC here. */
1515 hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
1516
1517 hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
1518 hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
1519 hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
1520
1521 HMD(("htable, "));
1522 if ((hp->dev->flags & IFF_ALLMULTI) ||
1523 (netdev_mc_count(hp->dev) > 64)) {
1524 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
1525 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
1526 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
1527 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
1528 } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
1529 u16 hash_table[4];
1530 struct netdev_hw_addr *ha;
1531 u32 crc;
1532
1533 memset(hash_table, 0, sizeof(hash_table));
1534 netdev_for_each_mc_addr(ha, hp->dev) {
1535 crc = ether_crc_le(6, ha->addr);
1536 crc >>= 26;
1537 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1538 }
1539 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
1540 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
1541 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
1542 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
1543 } else {
1544 hme_write32(hp, bregs + BMAC_HTABLE3, 0);
1545 hme_write32(hp, bregs + BMAC_HTABLE2, 0);
1546 hme_write32(hp, bregs + BMAC_HTABLE1, 0);
1547 hme_write32(hp, bregs + BMAC_HTABLE0, 0);
1548 }
1549
1550 /* Set the RX and TX ring ptrs. */
1551 HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
1552 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
1553 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
1554 hme_write32(hp, erxregs + ERX_RING,
1555 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
1556 hme_write32(hp, etxregs + ETX_RING,
1557 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1558
1559 /* Parity issues in the ERX unit of some HME revisions can cause some
1560 * registers to not be written unless their parity is even. Detect such
1561 * lost writes and simply rewrite with a low bit set (which will be ignored
1562 * since the rxring needs to be 2K aligned).
1563 */
1564 if (hme_read32(hp, erxregs + ERX_RING) !=
1565 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
1566 hme_write32(hp, erxregs + ERX_RING,
1567 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
1568 | 0x4);
1569
1570 /* Set the supported burst sizes. */
1571 HMD(("happy_meal_init: old[%08x] bursts<",
1572 hme_read32(hp, gregs + GREG_CFG)));
1573
1574 #ifndef CONFIG_SPARC
1575 /* It is always PCI and can handle 64byte bursts. */
1576 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
1577 #else
1578 if ((hp->happy_bursts & DMA_BURST64) &&
1579 ((hp->happy_flags & HFLAG_PCI) != 0
1580 #ifdef CONFIG_SBUS
1581 || sbus_can_burst64()
1582 #endif
1583 || 0)) {
1584 u32 gcfg = GREG_CFG_BURST64;
1585
1586 /* I have no idea if I should set the extended
1587 * transfer mode bit for Cheerio, so for now I
1588 * do not. -DaveM
1589 */
1590 #ifdef CONFIG_SBUS
1591 if ((hp->happy_flags & HFLAG_PCI) == 0) {
1592 struct platform_device *op = hp->happy_dev;
1593 if (sbus_can_dma_64bit()) {
1594 sbus_set_sbus64(&op->dev,
1595 hp->happy_bursts);
1596 gcfg |= GREG_CFG_64BIT;
1597 }
1598 }
1599 #endif
1600
1601 HMD(("64>"));
1602 hme_write32(hp, gregs + GREG_CFG, gcfg);
1603 } else if (hp->happy_bursts & DMA_BURST32) {
1604 HMD(("32>"));
1605 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
1606 } else if (hp->happy_bursts & DMA_BURST16) {
1607 HMD(("16>"));
1608 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
1609 } else {
1610 HMD(("XXX>"));
1611 hme_write32(hp, gregs + GREG_CFG, 0);
1612 }
1613 #endif /* CONFIG_SPARC */
1614
1615 /* Turn off interrupts we do not want to hear. */
1616 HMD((", enable global interrupts, "));
1617 hme_write32(hp, gregs + GREG_IMASK,
1618 (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
1619 GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
1620
1621 /* Set the transmit ring buffer size. */
1622 HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
1623 hme_read32(hp, etxregs + ETX_RSIZE)));
1624 hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
1625
1626 /* Enable transmitter DVMA. */
1627 HMD(("tx dma enable old[%08x], ",
1628 hme_read32(hp, etxregs + ETX_CFG)));
1629 hme_write32(hp, etxregs + ETX_CFG,
1630 hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
1631
1632 /* This chip really rots, for the receiver sometimes when you
1633 * write to its control registers not all the bits get there
1634 * properly. I cannot think of a sane way to provide complete
1635 * coverage for this hardware bug yet.
1636 */
1637 HMD(("erx regs bug old[%08x]\n",
1638 hme_read32(hp, erxregs + ERX_CFG)));
1639 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1640 regtmp = hme_read32(hp, erxregs + ERX_CFG);
1641 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1642 if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
1643 printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
1644 printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
1645 ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
1646 /* XXX Should return failure here... */
1647 }
1648
1649 /* Enable Big Mac hash table filter. */
1650 HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
1651 hme_read32(hp, bregs + BMAC_RXCFG)));
1652 rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
1653 if (hp->dev->flags & IFF_PROMISC)
1654 rxcfg |= BIGMAC_RXCFG_PMISC;
1655 hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
1656
1657 /* Let the bits settle in the chip. */
1658 udelay(10);
1659
1660 /* Ok, configure the Big Mac transmitter. */
1661 HMD(("BIGMAC init, "));
1662 regtmp = 0;
1663 if (hp->happy_flags & HFLAG_FULL)
1664 regtmp |= BIGMAC_TXCFG_FULLDPLX;
1665
1666 /* Don't turn on the "don't give up" bit for now. It could cause hme
1667 * to deadlock with the PHY if a Jabber occurs.
1668 */
1669 hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/);
1670
1671 /* Give up after 16 TX attempts. */
1672 hme_write32(hp, bregs + BMAC_ALIMIT, 16);
1673
1674 /* Enable the output drivers no matter what. */
1675 regtmp = BIGMAC_XCFG_ODENABLE;
1676
1677 /* If card can do lance mode, enable it. */
1678 if (hp->happy_flags & HFLAG_LANCE)
1679 regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
1680
1681 /* Disable the MII buffers if using external transceiver. */
1682 if (hp->tcvr_type == external)
1683 regtmp |= BIGMAC_XCFG_MIIDISAB;
1684
1685 HMD(("XIF config old[%08x], ",
1686 hme_read32(hp, bregs + BMAC_XIFCFG)));
1687 hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
1688
1689 /* Start things up. */
1690 HMD(("tx old[%08x] and rx [%08x] ON!\n",
1691 hme_read32(hp, bregs + BMAC_TXCFG),
1692 hme_read32(hp, bregs + BMAC_RXCFG)));
1693
1694 /* Set larger TX/RX size to allow for 802.1q */
1695 hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
1696 hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
1697
1698 hme_write32(hp, bregs + BMAC_TXCFG,
1699 hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
1700 hme_write32(hp, bregs + BMAC_RXCFG,
1701 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
1702
1703 /* Get the autonegotiation started, and the watch timer ticking. */
1704 happy_meal_begin_auto_negotiation(hp, tregs, NULL);
1705
1706 /* Success. */
1707 return 0;
1708 }
1709
1710 /* hp->happy_lock must be held */
happy_meal_set_initial_advertisement(struct happy_meal * hp)1711 static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
1712 {
1713 void __iomem *tregs = hp->tcvregs;
1714 void __iomem *bregs = hp->bigmacregs;
1715 void __iomem *gregs = hp->gregs;
1716
1717 happy_meal_stop(hp, gregs);
1718 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1719 if (hp->happy_flags & HFLAG_FENABLE)
1720 hme_write32(hp, tregs + TCVR_CFG,
1721 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1722 else
1723 hme_write32(hp, tregs + TCVR_CFG,
1724 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1725 happy_meal_transceiver_check(hp, tregs);
1726 switch(hp->tcvr_type) {
1727 case none:
1728 return;
1729 case internal:
1730 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1731 break;
1732 case external:
1733 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1734 break;
1735 }
1736 if (happy_meal_tcvr_reset(hp, tregs))
1737 return;
1738
1739 /* Latch PHY registers as of now. */
1740 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1741 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1742
1743 /* Advertise everything we can support. */
1744 if (hp->sw_bmsr & BMSR_10HALF)
1745 hp->sw_advertise |= (ADVERTISE_10HALF);
1746 else
1747 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1748
1749 if (hp->sw_bmsr & BMSR_10FULL)
1750 hp->sw_advertise |= (ADVERTISE_10FULL);
1751 else
1752 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1753 if (hp->sw_bmsr & BMSR_100HALF)
1754 hp->sw_advertise |= (ADVERTISE_100HALF);
1755 else
1756 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1757 if (hp->sw_bmsr & BMSR_100FULL)
1758 hp->sw_advertise |= (ADVERTISE_100FULL);
1759 else
1760 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1761
1762 /* Update the PHY advertisement register. */
1763 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1764 }
1765
1766 /* Once status is latched (by happy_meal_interrupt) it is cleared by
1767 * the hardware, so we cannot re-read it and get a correct value.
1768 *
1769 * hp->happy_lock must be held
1770 */
happy_meal_is_not_so_happy(struct happy_meal * hp,u32 status)1771 static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
1772 {
1773 int reset = 0;
1774
1775 /* Only print messages for non-counter related interrupts. */
1776 if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
1777 GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
1778 GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
1779 GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
1780 GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
1781 GREG_STAT_SLVPERR))
1782 printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
1783 hp->dev->name, status);
1784
1785 if (status & GREG_STAT_RFIFOVF) {
1786 /* Receive FIFO overflow is harmless and the hardware will take
1787 care of it, just some packets are lost. Who cares. */
1788 printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
1789 }
1790
1791 if (status & GREG_STAT_STSTERR) {
1792 /* BigMAC SQE link test failed. */
1793 printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
1794 reset = 1;
1795 }
1796
1797 if (status & GREG_STAT_TFIFO_UND) {
1798 /* Transmit FIFO underrun, again DMA error likely. */
1799 printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
1800 hp->dev->name);
1801 reset = 1;
1802 }
1803
1804 if (status & GREG_STAT_MAXPKTERR) {
1805 /* Driver error, tried to transmit something larger
1806 * than ethernet max mtu.
1807 */
1808 printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
1809 reset = 1;
1810 }
1811
1812 if (status & GREG_STAT_NORXD) {
1813 /* This is harmless, it just means the system is
1814 * quite loaded and the incoming packet rate was
1815 * faster than the interrupt handler could keep up
1816 * with.
1817 */
1818 printk(KERN_INFO "%s: Happy Meal out of receive "
1819 "descriptors, packet dropped.\n",
1820 hp->dev->name);
1821 }
1822
1823 if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
1824 /* All sorts of DMA receive errors. */
1825 printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
1826 if (status & GREG_STAT_RXERR)
1827 printk("GenericError ");
1828 if (status & GREG_STAT_RXPERR)
1829 printk("ParityError ");
1830 if (status & GREG_STAT_RXTERR)
1831 printk("RxTagBotch ");
1832 printk("]\n");
1833 reset = 1;
1834 }
1835
1836 if (status & GREG_STAT_EOPERR) {
1837 /* Driver bug, didn't set EOP bit in tx descriptor given
1838 * to the happy meal.
1839 */
1840 printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
1841 hp->dev->name);
1842 reset = 1;
1843 }
1844
1845 if (status & GREG_STAT_MIFIRQ) {
1846 /* MIF signalled an interrupt, were we polling it? */
1847 printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
1848 }
1849
1850 if (status &
1851 (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
1852 /* All sorts of transmit DMA errors. */
1853 printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
1854 if (status & GREG_STAT_TXEACK)
1855 printk("GenericError ");
1856 if (status & GREG_STAT_TXLERR)
1857 printk("LateError ");
1858 if (status & GREG_STAT_TXPERR)
1859 printk("ParityErro ");
1860 if (status & GREG_STAT_TXTERR)
1861 printk("TagBotch ");
1862 printk("]\n");
1863 reset = 1;
1864 }
1865
1866 if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
1867 /* Bus or parity error when cpu accessed happy meal registers
1868 * or it's internal FIFO's. Should never see this.
1869 */
1870 printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
1871 hp->dev->name,
1872 (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
1873 reset = 1;
1874 }
1875
1876 if (reset) {
1877 printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
1878 happy_meal_init(hp);
1879 return 1;
1880 }
1881 return 0;
1882 }
1883
1884 /* hp->happy_lock must be held */
happy_meal_mif_interrupt(struct happy_meal * hp)1885 static void happy_meal_mif_interrupt(struct happy_meal *hp)
1886 {
1887 void __iomem *tregs = hp->tcvregs;
1888
1889 printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
1890 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1891 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
1892
1893 /* Use the fastest transmission protocol possible. */
1894 if (hp->sw_lpa & LPA_100FULL) {
1895 printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
1896 hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
1897 } else if (hp->sw_lpa & LPA_100HALF) {
1898 printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
1899 hp->sw_bmcr |= BMCR_SPEED100;
1900 } else if (hp->sw_lpa & LPA_10FULL) {
1901 printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
1902 hp->sw_bmcr |= BMCR_FULLDPLX;
1903 } else {
1904 printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
1905 }
1906 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1907
1908 /* Finally stop polling and shut up the MIF. */
1909 happy_meal_poll_stop(hp, tregs);
1910 }
1911
1912 #ifdef TXDEBUG
1913 #define TXD(x) printk x
1914 #else
1915 #define TXD(x)
1916 #endif
1917
1918 /* hp->happy_lock must be held */
happy_meal_tx(struct happy_meal * hp)1919 static void happy_meal_tx(struct happy_meal *hp)
1920 {
1921 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
1922 struct happy_meal_txd *this;
1923 struct net_device *dev = hp->dev;
1924 int elem;
1925
1926 elem = hp->tx_old;
1927 TXD(("TX<"));
1928 while (elem != hp->tx_new) {
1929 struct sk_buff *skb;
1930 u32 flags, dma_addr, dma_len;
1931 int frag;
1932
1933 TXD(("[%d]", elem));
1934 this = &txbase[elem];
1935 flags = hme_read_desc32(hp, &this->tx_flags);
1936 if (flags & TXFLAG_OWN)
1937 break;
1938 skb = hp->tx_skbs[elem];
1939 if (skb_shinfo(skb)->nr_frags) {
1940 int last;
1941
1942 last = elem + skb_shinfo(skb)->nr_frags;
1943 last &= (TX_RING_SIZE - 1);
1944 flags = hme_read_desc32(hp, &txbase[last].tx_flags);
1945 if (flags & TXFLAG_OWN)
1946 break;
1947 }
1948 hp->tx_skbs[elem] = NULL;
1949 hp->net_stats.tx_bytes += skb->len;
1950
1951 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1952 dma_addr = hme_read_desc32(hp, &this->tx_addr);
1953 dma_len = hme_read_desc32(hp, &this->tx_flags);
1954
1955 dma_len &= TXFLAG_SIZE;
1956 if (!frag)
1957 dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1958 else
1959 dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1960
1961 elem = NEXT_TX(elem);
1962 this = &txbase[elem];
1963 }
1964
1965 dev_kfree_skb_irq(skb);
1966 hp->net_stats.tx_packets++;
1967 }
1968 hp->tx_old = elem;
1969 TXD((">"));
1970
1971 if (netif_queue_stopped(dev) &&
1972 TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
1973 netif_wake_queue(dev);
1974 }
1975
1976 #ifdef RXDEBUG
1977 #define RXD(x) printk x
1978 #else
1979 #define RXD(x)
1980 #endif
1981
1982 /* Originally I used to handle the allocation failure by just giving back just
1983 * that one ring buffer to the happy meal. Problem is that usually when that
1984 * condition is triggered, the happy meal expects you to do something reasonable
1985 * with all of the packets it has DMA'd in. So now I just drop the entire
1986 * ring when we cannot get a new skb and give them all back to the happy meal,
1987 * maybe things will be "happier" now.
1988 *
1989 * hp->happy_lock must be held
1990 */
happy_meal_rx(struct happy_meal * hp,struct net_device * dev)1991 static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
1992 {
1993 struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
1994 struct happy_meal_rxd *this;
1995 int elem = hp->rx_new, drops = 0;
1996 u32 flags;
1997
1998 RXD(("RX<"));
1999 this = &rxbase[elem];
2000 while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
2001 struct sk_buff *skb;
2002 int len = flags >> 16;
2003 u16 csum = flags & RXFLAG_CSUM;
2004 u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
2005
2006 RXD(("[%d ", elem));
2007
2008 /* Check for errors. */
2009 if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
2010 RXD(("ERR(%08x)]", flags));
2011 hp->net_stats.rx_errors++;
2012 if (len < ETH_ZLEN)
2013 hp->net_stats.rx_length_errors++;
2014 if (len & (RXFLAG_OVERFLOW >> 16)) {
2015 hp->net_stats.rx_over_errors++;
2016 hp->net_stats.rx_fifo_errors++;
2017 }
2018
2019 /* Return it to the Happy meal. */
2020 drop_it:
2021 hp->net_stats.rx_dropped++;
2022 hme_write_rxd(hp, this,
2023 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2024 dma_addr);
2025 goto next;
2026 }
2027 skb = hp->rx_skbs[elem];
2028 if (len > RX_COPY_THRESHOLD) {
2029 struct sk_buff *new_skb;
2030 u32 mapping;
2031
2032 /* Now refill the entry, if we can. */
2033 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
2034 if (new_skb == NULL) {
2035 drops++;
2036 goto drop_it;
2037 }
2038 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2039 mapping = dma_map_single(hp->dma_dev, new_skb->data,
2040 RX_BUF_ALLOC_SIZE,
2041 DMA_FROM_DEVICE);
2042 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2043 dev_kfree_skb_any(new_skb);
2044 drops++;
2045 goto drop_it;
2046 }
2047
2048 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
2049 hp->rx_skbs[elem] = new_skb;
2050 hme_write_rxd(hp, this,
2051 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2052 mapping);
2053 skb_reserve(new_skb, RX_OFFSET);
2054
2055 /* Trim the original skb for the netif. */
2056 skb_trim(skb, len);
2057 } else {
2058 struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
2059
2060 if (copy_skb == NULL) {
2061 drops++;
2062 goto drop_it;
2063 }
2064
2065 skb_reserve(copy_skb, 2);
2066 skb_put(copy_skb, len);
2067 dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
2068 skb_copy_from_linear_data(skb, copy_skb->data, len);
2069 dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
2070 /* Reuse original ring buffer. */
2071 hme_write_rxd(hp, this,
2072 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2073 dma_addr);
2074
2075 skb = copy_skb;
2076 }
2077
2078 /* This card is _fucking_ hot... */
2079 skb->csum = csum_unfold(~(__force __sum16)htons(csum));
2080 skb->ip_summed = CHECKSUM_COMPLETE;
2081
2082 RXD(("len=%d csum=%4x]", len, csum));
2083 skb->protocol = eth_type_trans(skb, dev);
2084 netif_rx(skb);
2085
2086 hp->net_stats.rx_packets++;
2087 hp->net_stats.rx_bytes += len;
2088 next:
2089 elem = NEXT_RX(elem);
2090 this = &rxbase[elem];
2091 }
2092 hp->rx_new = elem;
2093 if (drops)
2094 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
2095 RXD((">"));
2096 }
2097
happy_meal_interrupt(int irq,void * dev_id)2098 static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
2099 {
2100 struct net_device *dev = dev_id;
2101 struct happy_meal *hp = netdev_priv(dev);
2102 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2103
2104 HMD(("happy_meal_interrupt: status=%08x ", happy_status));
2105
2106 spin_lock(&hp->happy_lock);
2107
2108 if (happy_status & GREG_STAT_ERRORS) {
2109 HMD(("ERRORS "));
2110 if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
2111 goto out;
2112 }
2113
2114 if (happy_status & GREG_STAT_MIFIRQ) {
2115 HMD(("MIFIRQ "));
2116 happy_meal_mif_interrupt(hp);
2117 }
2118
2119 if (happy_status & GREG_STAT_TXALL) {
2120 HMD(("TXALL "));
2121 happy_meal_tx(hp);
2122 }
2123
2124 if (happy_status & GREG_STAT_RXTOHOST) {
2125 HMD(("RXTOHOST "));
2126 happy_meal_rx(hp, dev);
2127 }
2128
2129 HMD(("done\n"));
2130 out:
2131 spin_unlock(&hp->happy_lock);
2132
2133 return IRQ_HANDLED;
2134 }
2135
2136 #ifdef CONFIG_SBUS
quattro_sbus_interrupt(int irq,void * cookie)2137 static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
2138 {
2139 struct quattro *qp = (struct quattro *) cookie;
2140 int i;
2141
2142 for (i = 0; i < 4; i++) {
2143 struct net_device *dev = qp->happy_meals[i];
2144 struct happy_meal *hp = netdev_priv(dev);
2145 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2146
2147 HMD(("quattro_interrupt: status=%08x ", happy_status));
2148
2149 if (!(happy_status & (GREG_STAT_ERRORS |
2150 GREG_STAT_MIFIRQ |
2151 GREG_STAT_TXALL |
2152 GREG_STAT_RXTOHOST)))
2153 continue;
2154
2155 spin_lock(&hp->happy_lock);
2156
2157 if (happy_status & GREG_STAT_ERRORS) {
2158 HMD(("ERRORS "));
2159 if (happy_meal_is_not_so_happy(hp, happy_status))
2160 goto next;
2161 }
2162
2163 if (happy_status & GREG_STAT_MIFIRQ) {
2164 HMD(("MIFIRQ "));
2165 happy_meal_mif_interrupt(hp);
2166 }
2167
2168 if (happy_status & GREG_STAT_TXALL) {
2169 HMD(("TXALL "));
2170 happy_meal_tx(hp);
2171 }
2172
2173 if (happy_status & GREG_STAT_RXTOHOST) {
2174 HMD(("RXTOHOST "));
2175 happy_meal_rx(hp, dev);
2176 }
2177
2178 next:
2179 spin_unlock(&hp->happy_lock);
2180 }
2181 HMD(("done\n"));
2182
2183 return IRQ_HANDLED;
2184 }
2185 #endif
2186
happy_meal_open(struct net_device * dev)2187 static int happy_meal_open(struct net_device *dev)
2188 {
2189 struct happy_meal *hp = netdev_priv(dev);
2190 int res;
2191
2192 HMD(("happy_meal_open: "));
2193
2194 /* On SBUS Quattro QFE cards, all hme interrupts are concentrated
2195 * into a single source which we register handling at probe time.
2196 */
2197 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
2198 res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
2199 dev->name, dev);
2200 if (res) {
2201 HMD(("EAGAIN\n"));
2202 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
2203 hp->irq);
2204
2205 return -EAGAIN;
2206 }
2207 }
2208
2209 HMD(("to happy_meal_init\n"));
2210
2211 spin_lock_irq(&hp->happy_lock);
2212 res = happy_meal_init(hp);
2213 spin_unlock_irq(&hp->happy_lock);
2214
2215 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
2216 free_irq(hp->irq, dev);
2217 return res;
2218 }
2219
happy_meal_close(struct net_device * dev)2220 static int happy_meal_close(struct net_device *dev)
2221 {
2222 struct happy_meal *hp = netdev_priv(dev);
2223
2224 spin_lock_irq(&hp->happy_lock);
2225 happy_meal_stop(hp, hp->gregs);
2226 happy_meal_clean_rings(hp);
2227
2228 /* If auto-negotiation timer is running, kill it. */
2229 del_timer(&hp->happy_timer);
2230
2231 spin_unlock_irq(&hp->happy_lock);
2232
2233 /* On Quattro QFE cards, all hme interrupts are concentrated
2234 * into a single source which we register handling at probe
2235 * time and never unregister.
2236 */
2237 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
2238 free_irq(hp->irq, dev);
2239
2240 return 0;
2241 }
2242
2243 #ifdef SXDEBUG
2244 #define SXD(x) printk x
2245 #else
2246 #define SXD(x)
2247 #endif
2248
happy_meal_tx_timeout(struct net_device * dev)2249 static void happy_meal_tx_timeout(struct net_device *dev)
2250 {
2251 struct happy_meal *hp = netdev_priv(dev);
2252
2253 printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
2254 tx_dump_log();
2255 printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
2256 hme_read32(hp, hp->gregs + GREG_STAT),
2257 hme_read32(hp, hp->etxregs + ETX_CFG),
2258 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
2259
2260 spin_lock_irq(&hp->happy_lock);
2261 happy_meal_init(hp);
2262 spin_unlock_irq(&hp->happy_lock);
2263
2264 netif_wake_queue(dev);
2265 }
2266
unmap_partial_tx_skb(struct happy_meal * hp,u32 first_mapping,u32 first_len,u32 first_entry,u32 entry)2267 static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
2268 u32 first_len, u32 first_entry, u32 entry)
2269 {
2270 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
2271
2272 dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
2273
2274 first_entry = NEXT_TX(first_entry);
2275 while (first_entry != entry) {
2276 struct happy_meal_txd *this = &txbase[first_entry];
2277 u32 addr, len;
2278
2279 addr = hme_read_desc32(hp, &this->tx_addr);
2280 len = hme_read_desc32(hp, &this->tx_flags);
2281 len &= TXFLAG_SIZE;
2282 dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
2283 }
2284 }
2285
happy_meal_start_xmit(struct sk_buff * skb,struct net_device * dev)2286 static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2287 struct net_device *dev)
2288 {
2289 struct happy_meal *hp = netdev_priv(dev);
2290 int entry;
2291 u32 tx_flags;
2292
2293 tx_flags = TXFLAG_OWN;
2294 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2295 const u32 csum_start_off = skb_checksum_start_offset(skb);
2296 const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
2297
2298 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
2299 ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
2300 ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
2301 }
2302
2303 spin_lock_irq(&hp->happy_lock);
2304
2305 if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
2306 netif_stop_queue(dev);
2307 spin_unlock_irq(&hp->happy_lock);
2308 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
2309 dev->name);
2310 return NETDEV_TX_BUSY;
2311 }
2312
2313 entry = hp->tx_new;
2314 SXD(("SX<l[%d]e[%d]>", len, entry));
2315 hp->tx_skbs[entry] = skb;
2316
2317 if (skb_shinfo(skb)->nr_frags == 0) {
2318 u32 mapping, len;
2319
2320 len = skb->len;
2321 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2322 if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2323 goto out_dma_error;
2324 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2325 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2326 (tx_flags | (len & TXFLAG_SIZE)),
2327 mapping);
2328 entry = NEXT_TX(entry);
2329 } else {
2330 u32 first_len, first_mapping;
2331 int frag, first_entry = entry;
2332
2333 /* We must give this initial chunk to the device last.
2334 * Otherwise we could race with the device.
2335 */
2336 first_len = skb_headlen(skb);
2337 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
2338 DMA_TO_DEVICE);
2339 if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2340 goto out_dma_error;
2341 entry = NEXT_TX(entry);
2342
2343 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
2344 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
2345 u32 len, mapping, this_txflags;
2346
2347 len = skb_frag_size(this_frag);
2348 mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
2349 0, len, DMA_TO_DEVICE);
2350 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2351 unmap_partial_tx_skb(hp, first_mapping, first_len,
2352 first_entry, entry);
2353 goto out_dma_error;
2354 }
2355 this_txflags = tx_flags;
2356 if (frag == skb_shinfo(skb)->nr_frags - 1)
2357 this_txflags |= TXFLAG_EOP;
2358 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2359 (this_txflags | (len & TXFLAG_SIZE)),
2360 mapping);
2361 entry = NEXT_TX(entry);
2362 }
2363 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
2364 (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
2365 first_mapping);
2366 }
2367
2368 hp->tx_new = entry;
2369
2370 if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
2371 netif_stop_queue(dev);
2372
2373 /* Get it going. */
2374 hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
2375
2376 spin_unlock_irq(&hp->happy_lock);
2377
2378 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2379 return NETDEV_TX_OK;
2380
2381 out_dma_error:
2382 hp->tx_skbs[hp->tx_new] = NULL;
2383 spin_unlock_irq(&hp->happy_lock);
2384
2385 dev_kfree_skb_any(skb);
2386 dev->stats.tx_dropped++;
2387 return NETDEV_TX_OK;
2388 }
2389
happy_meal_get_stats(struct net_device * dev)2390 static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
2391 {
2392 struct happy_meal *hp = netdev_priv(dev);
2393
2394 spin_lock_irq(&hp->happy_lock);
2395 happy_meal_get_counters(hp, hp->bigmacregs);
2396 spin_unlock_irq(&hp->happy_lock);
2397
2398 return &hp->net_stats;
2399 }
2400
happy_meal_set_multicast(struct net_device * dev)2401 static void happy_meal_set_multicast(struct net_device *dev)
2402 {
2403 struct happy_meal *hp = netdev_priv(dev);
2404 void __iomem *bregs = hp->bigmacregs;
2405 struct netdev_hw_addr *ha;
2406 u32 crc;
2407
2408 spin_lock_irq(&hp->happy_lock);
2409
2410 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
2411 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
2412 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
2413 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
2414 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
2415 } else if (dev->flags & IFF_PROMISC) {
2416 hme_write32(hp, bregs + BMAC_RXCFG,
2417 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
2418 } else {
2419 u16 hash_table[4];
2420
2421 memset(hash_table, 0, sizeof(hash_table));
2422 netdev_for_each_mc_addr(ha, dev) {
2423 crc = ether_crc_le(6, ha->addr);
2424 crc >>= 26;
2425 hash_table[crc >> 4] |= 1 << (crc & 0xf);
2426 }
2427 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
2428 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
2429 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
2430 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
2431 }
2432
2433 spin_unlock_irq(&hp->happy_lock);
2434 }
2435
2436 /* Ethtool support... */
hme_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)2437 static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2438 {
2439 struct happy_meal *hp = netdev_priv(dev);
2440 u32 speed;
2441
2442 cmd->supported =
2443 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2444 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2445 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
2446
2447 /* XXX hardcoded stuff for now */
2448 cmd->port = PORT_TP; /* XXX no MII support */
2449 cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */
2450 cmd->phy_address = 0; /* XXX fixed PHYAD */
2451
2452 /* Record PHY settings. */
2453 spin_lock_irq(&hp->happy_lock);
2454 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2455 hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
2456 spin_unlock_irq(&hp->happy_lock);
2457
2458 if (hp->sw_bmcr & BMCR_ANENABLE) {
2459 cmd->autoneg = AUTONEG_ENABLE;
2460 speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
2461 SPEED_100 : SPEED_10);
2462 if (speed == SPEED_100)
2463 cmd->duplex =
2464 (hp->sw_lpa & (LPA_100FULL)) ?
2465 DUPLEX_FULL : DUPLEX_HALF;
2466 else
2467 cmd->duplex =
2468 (hp->sw_lpa & (LPA_10FULL)) ?
2469 DUPLEX_FULL : DUPLEX_HALF;
2470 } else {
2471 cmd->autoneg = AUTONEG_DISABLE;
2472 speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
2473 cmd->duplex =
2474 (hp->sw_bmcr & BMCR_FULLDPLX) ?
2475 DUPLEX_FULL : DUPLEX_HALF;
2476 }
2477 ethtool_cmd_speed_set(cmd, speed);
2478 return 0;
2479 }
2480
hme_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)2481 static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2482 {
2483 struct happy_meal *hp = netdev_priv(dev);
2484
2485 /* Verify the settings we care about. */
2486 if (cmd->autoneg != AUTONEG_ENABLE &&
2487 cmd->autoneg != AUTONEG_DISABLE)
2488 return -EINVAL;
2489 if (cmd->autoneg == AUTONEG_DISABLE &&
2490 ((ethtool_cmd_speed(cmd) != SPEED_100 &&
2491 ethtool_cmd_speed(cmd) != SPEED_10) ||
2492 (cmd->duplex != DUPLEX_HALF &&
2493 cmd->duplex != DUPLEX_FULL)))
2494 return -EINVAL;
2495
2496 /* Ok, do it to it. */
2497 spin_lock_irq(&hp->happy_lock);
2498 del_timer(&hp->happy_timer);
2499 happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
2500 spin_unlock_irq(&hp->happy_lock);
2501
2502 return 0;
2503 }
2504
hme_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2505 static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2506 {
2507 struct happy_meal *hp = netdev_priv(dev);
2508
2509 strlcpy(info->driver, "sunhme", sizeof(info->driver));
2510 strlcpy(info->version, "2.02", sizeof(info->version));
2511 if (hp->happy_flags & HFLAG_PCI) {
2512 struct pci_dev *pdev = hp->happy_dev;
2513 strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
2514 }
2515 #ifdef CONFIG_SBUS
2516 else {
2517 const struct linux_prom_registers *regs;
2518 struct platform_device *op = hp->happy_dev;
2519 regs = of_get_property(op->dev.of_node, "regs", NULL);
2520 if (regs)
2521 snprintf(info->bus_info, sizeof(info->bus_info),
2522 "SBUS:%d",
2523 regs->which_io);
2524 }
2525 #endif
2526 }
2527
hme_get_link(struct net_device * dev)2528 static u32 hme_get_link(struct net_device *dev)
2529 {
2530 struct happy_meal *hp = netdev_priv(dev);
2531
2532 spin_lock_irq(&hp->happy_lock);
2533 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2534 spin_unlock_irq(&hp->happy_lock);
2535
2536 return hp->sw_bmsr & BMSR_LSTATUS;
2537 }
2538
2539 static const struct ethtool_ops hme_ethtool_ops = {
2540 .get_settings = hme_get_settings,
2541 .set_settings = hme_set_settings,
2542 .get_drvinfo = hme_get_drvinfo,
2543 .get_link = hme_get_link,
2544 };
2545
2546 static int hme_version_printed;
2547
2548 #ifdef CONFIG_SBUS
2549 /* Given a happy meal sbus device, find it's quattro parent.
2550 * If none exist, allocate and return a new one.
2551 *
2552 * Return NULL on failure.
2553 */
quattro_sbus_find(struct platform_device * child)2554 static struct quattro *quattro_sbus_find(struct platform_device *child)
2555 {
2556 struct device *parent = child->dev.parent;
2557 struct platform_device *op;
2558 struct quattro *qp;
2559
2560 op = to_platform_device(parent);
2561 qp = platform_get_drvdata(op);
2562 if (qp)
2563 return qp;
2564
2565 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2566 if (qp != NULL) {
2567 int i;
2568
2569 for (i = 0; i < 4; i++)
2570 qp->happy_meals[i] = NULL;
2571
2572 qp->quattro_dev = child;
2573 qp->next = qfe_sbus_list;
2574 qfe_sbus_list = qp;
2575
2576 platform_set_drvdata(op, qp);
2577 }
2578 return qp;
2579 }
2580
2581 /* After all quattro cards have been probed, we call these functions
2582 * to register the IRQ handlers for the cards that have been
2583 * successfully probed and skip the cards that failed to initialize
2584 */
quattro_sbus_register_irqs(void)2585 static int __init quattro_sbus_register_irqs(void)
2586 {
2587 struct quattro *qp;
2588
2589 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2590 struct platform_device *op = qp->quattro_dev;
2591 int err, qfe_slot, skip = 0;
2592
2593 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2594 if (!qp->happy_meals[qfe_slot])
2595 skip = 1;
2596 }
2597 if (skip)
2598 continue;
2599
2600 err = request_irq(op->archdata.irqs[0],
2601 quattro_sbus_interrupt,
2602 IRQF_SHARED, "Quattro",
2603 qp);
2604 if (err != 0) {
2605 printk(KERN_ERR "Quattro HME: IRQ registration "
2606 "error %d.\n", err);
2607 return err;
2608 }
2609 }
2610
2611 return 0;
2612 }
2613
quattro_sbus_free_irqs(void)2614 static void quattro_sbus_free_irqs(void)
2615 {
2616 struct quattro *qp;
2617
2618 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2619 struct platform_device *op = qp->quattro_dev;
2620 int qfe_slot, skip = 0;
2621
2622 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2623 if (!qp->happy_meals[qfe_slot])
2624 skip = 1;
2625 }
2626 if (skip)
2627 continue;
2628
2629 free_irq(op->archdata.irqs[0], qp);
2630 }
2631 }
2632 #endif /* CONFIG_SBUS */
2633
2634 #ifdef CONFIG_PCI
quattro_pci_find(struct pci_dev * pdev)2635 static struct quattro *quattro_pci_find(struct pci_dev *pdev)
2636 {
2637 struct pci_dev *bdev = pdev->bus->self;
2638 struct quattro *qp;
2639
2640 if (!bdev) return NULL;
2641 for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
2642 struct pci_dev *qpdev = qp->quattro_dev;
2643
2644 if (qpdev == bdev)
2645 return qp;
2646 }
2647 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2648 if (qp != NULL) {
2649 int i;
2650
2651 for (i = 0; i < 4; i++)
2652 qp->happy_meals[i] = NULL;
2653
2654 qp->quattro_dev = bdev;
2655 qp->next = qfe_pci_list;
2656 qfe_pci_list = qp;
2657
2658 /* No range tricks necessary on PCI. */
2659 qp->nranges = 0;
2660 }
2661 return qp;
2662 }
2663 #endif /* CONFIG_PCI */
2664
2665 static const struct net_device_ops hme_netdev_ops = {
2666 .ndo_open = happy_meal_open,
2667 .ndo_stop = happy_meal_close,
2668 .ndo_start_xmit = happy_meal_start_xmit,
2669 .ndo_tx_timeout = happy_meal_tx_timeout,
2670 .ndo_get_stats = happy_meal_get_stats,
2671 .ndo_set_rx_mode = happy_meal_set_multicast,
2672 .ndo_change_mtu = eth_change_mtu,
2673 .ndo_set_mac_address = eth_mac_addr,
2674 .ndo_validate_addr = eth_validate_addr,
2675 };
2676
2677 #ifdef CONFIG_SBUS
happy_meal_sbus_probe_one(struct platform_device * op,int is_qfe)2678 static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2679 {
2680 struct device_node *dp = op->dev.of_node, *sbus_dp;
2681 struct quattro *qp = NULL;
2682 struct happy_meal *hp;
2683 struct net_device *dev;
2684 int i, qfe_slot = -1;
2685 int err = -ENODEV;
2686
2687 sbus_dp = op->dev.parent->of_node;
2688
2689 /* We can match PCI devices too, do not accept those here. */
2690 if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi"))
2691 return err;
2692
2693 if (is_qfe) {
2694 qp = quattro_sbus_find(op);
2695 if (qp == NULL)
2696 goto err_out;
2697 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2698 if (qp->happy_meals[qfe_slot] == NULL)
2699 break;
2700 if (qfe_slot == 4)
2701 goto err_out;
2702 }
2703
2704 err = -ENOMEM;
2705 dev = alloc_etherdev(sizeof(struct happy_meal));
2706 if (!dev)
2707 goto err_out;
2708 SET_NETDEV_DEV(dev, &op->dev);
2709
2710 if (hme_version_printed++ == 0)
2711 printk(KERN_INFO "%s", version);
2712
2713 /* If user did not specify a MAC address specifically, use
2714 * the Quattro local-mac-address property...
2715 */
2716 for (i = 0; i < 6; i++) {
2717 if (macaddr[i] != 0)
2718 break;
2719 }
2720 if (i < 6) { /* a mac address was given */
2721 for (i = 0; i < 6; i++)
2722 dev->dev_addr[i] = macaddr[i];
2723 macaddr[5]++;
2724 } else {
2725 const unsigned char *addr;
2726 int len;
2727
2728 addr = of_get_property(dp, "local-mac-address", &len);
2729
2730 if (qfe_slot != -1 && addr && len == ETH_ALEN)
2731 memcpy(dev->dev_addr, addr, ETH_ALEN);
2732 else
2733 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
2734 }
2735
2736 hp = netdev_priv(dev);
2737
2738 hp->happy_dev = op;
2739 hp->dma_dev = &op->dev;
2740
2741 spin_lock_init(&hp->happy_lock);
2742
2743 err = -ENODEV;
2744 if (qp != NULL) {
2745 hp->qfe_parent = qp;
2746 hp->qfe_ent = qfe_slot;
2747 qp->happy_meals[qfe_slot] = dev;
2748 }
2749
2750 hp->gregs = of_ioremap(&op->resource[0], 0,
2751 GREG_REG_SIZE, "HME Global Regs");
2752 if (!hp->gregs) {
2753 printk(KERN_ERR "happymeal: Cannot map global registers.\n");
2754 goto err_out_free_netdev;
2755 }
2756
2757 hp->etxregs = of_ioremap(&op->resource[1], 0,
2758 ETX_REG_SIZE, "HME TX Regs");
2759 if (!hp->etxregs) {
2760 printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n");
2761 goto err_out_iounmap;
2762 }
2763
2764 hp->erxregs = of_ioremap(&op->resource[2], 0,
2765 ERX_REG_SIZE, "HME RX Regs");
2766 if (!hp->erxregs) {
2767 printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n");
2768 goto err_out_iounmap;
2769 }
2770
2771 hp->bigmacregs = of_ioremap(&op->resource[3], 0,
2772 BMAC_REG_SIZE, "HME BIGMAC Regs");
2773 if (!hp->bigmacregs) {
2774 printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n");
2775 goto err_out_iounmap;
2776 }
2777
2778 hp->tcvregs = of_ioremap(&op->resource[4], 0,
2779 TCVR_REG_SIZE, "HME Tranceiver Regs");
2780 if (!hp->tcvregs) {
2781 printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n");
2782 goto err_out_iounmap;
2783 }
2784
2785 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
2786 if (hp->hm_revision == 0xff)
2787 hp->hm_revision = 0xa0;
2788
2789 /* Now enable the feature flags we can. */
2790 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
2791 hp->happy_flags = HFLAG_20_21;
2792 else if (hp->hm_revision != 0xa0)
2793 hp->happy_flags = HFLAG_NOT_A0;
2794
2795 if (qp != NULL)
2796 hp->happy_flags |= HFLAG_QUATTRO;
2797
2798 /* Get the supported DVMA burst sizes from our Happy SBUS. */
2799 hp->happy_bursts = of_getintprop_default(sbus_dp,
2800 "burst-sizes", 0x00);
2801
2802 hp->happy_block = dma_alloc_coherent(hp->dma_dev,
2803 PAGE_SIZE,
2804 &hp->hblock_dvma,
2805 GFP_ATOMIC);
2806 err = -ENOMEM;
2807 if (!hp->happy_block)
2808 goto err_out_iounmap;
2809
2810 /* Force check of the link first time we are brought up. */
2811 hp->linkcheck = 0;
2812
2813 /* Force timer state to 'asleep' with count of zero. */
2814 hp->timer_state = asleep;
2815 hp->timer_ticks = 0;
2816
2817 init_timer(&hp->happy_timer);
2818
2819 hp->dev = dev;
2820 dev->netdev_ops = &hme_netdev_ops;
2821 dev->watchdog_timeo = 5*HZ;
2822 dev->ethtool_ops = &hme_ethtool_ops;
2823
2824 /* Happy Meal can do it all... */
2825 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2826 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2827
2828 hp->irq = op->archdata.irqs[0];
2829
2830 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
2831 /* Hook up SBUS register/descriptor accessors. */
2832 hp->read_desc32 = sbus_hme_read_desc32;
2833 hp->write_txd = sbus_hme_write_txd;
2834 hp->write_rxd = sbus_hme_write_rxd;
2835 hp->read32 = sbus_hme_read32;
2836 hp->write32 = sbus_hme_write32;
2837 #endif
2838
2839 /* Grrr, Happy Meal comes up by default not advertising
2840 * full duplex 100baseT capabilities, fix this.
2841 */
2842 spin_lock_irq(&hp->happy_lock);
2843 happy_meal_set_initial_advertisement(hp);
2844 spin_unlock_irq(&hp->happy_lock);
2845
2846 err = register_netdev(hp->dev);
2847 if (err) {
2848 printk(KERN_ERR "happymeal: Cannot register net device, "
2849 "aborting.\n");
2850 goto err_out_free_coherent;
2851 }
2852
2853 platform_set_drvdata(op, hp);
2854
2855 if (qfe_slot != -1)
2856 printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
2857 dev->name, qfe_slot);
2858 else
2859 printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
2860 dev->name);
2861
2862 printk("%pM\n", dev->dev_addr);
2863
2864 return 0;
2865
2866 err_out_free_coherent:
2867 dma_free_coherent(hp->dma_dev,
2868 PAGE_SIZE,
2869 hp->happy_block,
2870 hp->hblock_dvma);
2871
2872 err_out_iounmap:
2873 if (hp->gregs)
2874 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
2875 if (hp->etxregs)
2876 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
2877 if (hp->erxregs)
2878 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
2879 if (hp->bigmacregs)
2880 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
2881 if (hp->tcvregs)
2882 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
2883
2884 if (qp)
2885 qp->happy_meals[qfe_slot] = NULL;
2886
2887 err_out_free_netdev:
2888 free_netdev(dev);
2889
2890 err_out:
2891 return err;
2892 }
2893 #endif
2894
2895 #ifdef CONFIG_PCI
2896 #ifndef CONFIG_SPARC
is_quattro_p(struct pci_dev * pdev)2897 static int is_quattro_p(struct pci_dev *pdev)
2898 {
2899 struct pci_dev *busdev = pdev->bus->self;
2900 struct pci_dev *this_pdev;
2901 int n_hmes;
2902
2903 if (busdev == NULL ||
2904 busdev->vendor != PCI_VENDOR_ID_DEC ||
2905 busdev->device != PCI_DEVICE_ID_DEC_21153)
2906 return 0;
2907
2908 n_hmes = 0;
2909 list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
2910 if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
2911 this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
2912 n_hmes++;
2913 }
2914
2915 if (n_hmes != 4)
2916 return 0;
2917
2918 return 1;
2919 }
2920
2921 /* Fetch MAC address from vital product data of PCI ROM. */
find_eth_addr_in_vpd(void __iomem * rom_base,int len,int index,unsigned char * dev_addr)2922 static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
2923 {
2924 int this_offset;
2925
2926 for (this_offset = 0x20; this_offset < len; this_offset++) {
2927 void __iomem *p = rom_base + this_offset;
2928
2929 if (readb(p + 0) != 0x90 ||
2930 readb(p + 1) != 0x00 ||
2931 readb(p + 2) != 0x09 ||
2932 readb(p + 3) != 0x4e ||
2933 readb(p + 4) != 0x41 ||
2934 readb(p + 5) != 0x06)
2935 continue;
2936
2937 this_offset += 6;
2938 p += 6;
2939
2940 if (index == 0) {
2941 int i;
2942
2943 for (i = 0; i < 6; i++)
2944 dev_addr[i] = readb(p + i);
2945 return 1;
2946 }
2947 index--;
2948 }
2949 return 0;
2950 }
2951
get_hme_mac_nonsparc(struct pci_dev * pdev,unsigned char * dev_addr)2952 static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
2953 {
2954 size_t size;
2955 void __iomem *p = pci_map_rom(pdev, &size);
2956
2957 if (p) {
2958 int index = 0;
2959 int found;
2960
2961 if (is_quattro_p(pdev))
2962 index = PCI_SLOT(pdev->devfn);
2963
2964 found = readb(p) == 0x55 &&
2965 readb(p + 1) == 0xaa &&
2966 find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
2967 pci_unmap_rom(pdev, p);
2968 if (found)
2969 return;
2970 }
2971
2972 /* Sun MAC prefix then 3 random bytes. */
2973 dev_addr[0] = 0x08;
2974 dev_addr[1] = 0x00;
2975 dev_addr[2] = 0x20;
2976 get_random_bytes(&dev_addr[3], 3);
2977 }
2978 #endif /* !(CONFIG_SPARC) */
2979
happy_meal_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2980 static int happy_meal_pci_probe(struct pci_dev *pdev,
2981 const struct pci_device_id *ent)
2982 {
2983 struct quattro *qp = NULL;
2984 #ifdef CONFIG_SPARC
2985 struct device_node *dp;
2986 #endif
2987 struct happy_meal *hp;
2988 struct net_device *dev;
2989 void __iomem *hpreg_base;
2990 unsigned long hpreg_res;
2991 int i, qfe_slot = -1;
2992 char prom_name[64];
2993 int err;
2994
2995 /* Now make sure pci_dev cookie is there. */
2996 #ifdef CONFIG_SPARC
2997 dp = pci_device_to_OF_node(pdev);
2998 strcpy(prom_name, dp->name);
2999 #else
3000 if (is_quattro_p(pdev))
3001 strcpy(prom_name, "SUNW,qfe");
3002 else
3003 strcpy(prom_name, "SUNW,hme");
3004 #endif
3005
3006 err = -ENODEV;
3007
3008 if (pci_enable_device(pdev))
3009 goto err_out;
3010 pci_set_master(pdev);
3011
3012 if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
3013 qp = quattro_pci_find(pdev);
3014 if (qp == NULL)
3015 goto err_out;
3016 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
3017 if (qp->happy_meals[qfe_slot] == NULL)
3018 break;
3019 if (qfe_slot == 4)
3020 goto err_out;
3021 }
3022
3023 dev = alloc_etherdev(sizeof(struct happy_meal));
3024 err = -ENOMEM;
3025 if (!dev)
3026 goto err_out;
3027 SET_NETDEV_DEV(dev, &pdev->dev);
3028
3029 if (hme_version_printed++ == 0)
3030 printk(KERN_INFO "%s", version);
3031
3032 hp = netdev_priv(dev);
3033
3034 hp->happy_dev = pdev;
3035 hp->dma_dev = &pdev->dev;
3036
3037 spin_lock_init(&hp->happy_lock);
3038
3039 if (qp != NULL) {
3040 hp->qfe_parent = qp;
3041 hp->qfe_ent = qfe_slot;
3042 qp->happy_meals[qfe_slot] = dev;
3043 }
3044
3045 hpreg_res = pci_resource_start(pdev, 0);
3046 err = -ENODEV;
3047 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
3048 printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
3049 goto err_out_clear_quattro;
3050 }
3051 if (pci_request_regions(pdev, DRV_NAME)) {
3052 printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
3053 "aborting.\n");
3054 goto err_out_clear_quattro;
3055 }
3056
3057 if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) {
3058 printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
3059 goto err_out_free_res;
3060 }
3061
3062 for (i = 0; i < 6; i++) {
3063 if (macaddr[i] != 0)
3064 break;
3065 }
3066 if (i < 6) { /* a mac address was given */
3067 for (i = 0; i < 6; i++)
3068 dev->dev_addr[i] = macaddr[i];
3069 macaddr[5]++;
3070 } else {
3071 #ifdef CONFIG_SPARC
3072 const unsigned char *addr;
3073 int len;
3074
3075 if (qfe_slot != -1 &&
3076 (addr = of_get_property(dp, "local-mac-address", &len))
3077 != NULL &&
3078 len == 6) {
3079 memcpy(dev->dev_addr, addr, ETH_ALEN);
3080 } else {
3081 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
3082 }
3083 #else
3084 get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
3085 #endif
3086 }
3087
3088 /* Layout registers. */
3089 hp->gregs = (hpreg_base + 0x0000UL);
3090 hp->etxregs = (hpreg_base + 0x2000UL);
3091 hp->erxregs = (hpreg_base + 0x4000UL);
3092 hp->bigmacregs = (hpreg_base + 0x6000UL);
3093 hp->tcvregs = (hpreg_base + 0x7000UL);
3094
3095 #ifdef CONFIG_SPARC
3096 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
3097 if (hp->hm_revision == 0xff)
3098 hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
3099 #else
3100 /* works with this on non-sparc hosts */
3101 hp->hm_revision = 0x20;
3102 #endif
3103
3104 /* Now enable the feature flags we can. */
3105 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
3106 hp->happy_flags = HFLAG_20_21;
3107 else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
3108 hp->happy_flags = HFLAG_NOT_A0;
3109
3110 if (qp != NULL)
3111 hp->happy_flags |= HFLAG_QUATTRO;
3112
3113 /* And of course, indicate this is PCI. */
3114 hp->happy_flags |= HFLAG_PCI;
3115
3116 #ifdef CONFIG_SPARC
3117 /* Assume PCI happy meals can handle all burst sizes. */
3118 hp->happy_bursts = DMA_BURSTBITS;
3119 #endif
3120
3121 hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3122 &hp->hblock_dvma, GFP_KERNEL);
3123 err = -ENODEV;
3124 if (!hp->happy_block)
3125 goto err_out_iounmap;
3126
3127 hp->linkcheck = 0;
3128 hp->timer_state = asleep;
3129 hp->timer_ticks = 0;
3130
3131 init_timer(&hp->happy_timer);
3132
3133 hp->irq = pdev->irq;
3134 hp->dev = dev;
3135 dev->netdev_ops = &hme_netdev_ops;
3136 dev->watchdog_timeo = 5*HZ;
3137 dev->ethtool_ops = &hme_ethtool_ops;
3138
3139 /* Happy Meal can do it all... */
3140 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
3141 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
3142
3143 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
3144 /* Hook up PCI register/descriptor accessors. */
3145 hp->read_desc32 = pci_hme_read_desc32;
3146 hp->write_txd = pci_hme_write_txd;
3147 hp->write_rxd = pci_hme_write_rxd;
3148 hp->read32 = pci_hme_read32;
3149 hp->write32 = pci_hme_write32;
3150 #endif
3151
3152 /* Grrr, Happy Meal comes up by default not advertising
3153 * full duplex 100baseT capabilities, fix this.
3154 */
3155 spin_lock_irq(&hp->happy_lock);
3156 happy_meal_set_initial_advertisement(hp);
3157 spin_unlock_irq(&hp->happy_lock);
3158
3159 err = register_netdev(hp->dev);
3160 if (err) {
3161 printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
3162 "aborting.\n");
3163 goto err_out_iounmap;
3164 }
3165
3166 pci_set_drvdata(pdev, hp);
3167
3168 if (!qfe_slot) {
3169 struct pci_dev *qpdev = qp->quattro_dev;
3170
3171 prom_name[0] = 0;
3172 if (!strncmp(dev->name, "eth", 3)) {
3173 int i = simple_strtoul(dev->name + 3, NULL, 10);
3174 sprintf(prom_name, "-%d", i + 3);
3175 }
3176 printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
3177 if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
3178 qpdev->device == PCI_DEVICE_ID_DEC_21153)
3179 printk("DEC 21153 PCI Bridge\n");
3180 else
3181 printk("unknown bridge %04x.%04x\n",
3182 qpdev->vendor, qpdev->device);
3183 }
3184
3185 if (qfe_slot != -1)
3186 printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
3187 dev->name, qfe_slot);
3188 else
3189 printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
3190 dev->name);
3191
3192 printk("%pM\n", dev->dev_addr);
3193
3194 return 0;
3195
3196 err_out_iounmap:
3197 iounmap(hp->gregs);
3198
3199 err_out_free_res:
3200 pci_release_regions(pdev);
3201
3202 err_out_clear_quattro:
3203 if (qp != NULL)
3204 qp->happy_meals[qfe_slot] = NULL;
3205
3206 free_netdev(dev);
3207
3208 err_out:
3209 return err;
3210 }
3211
happy_meal_pci_remove(struct pci_dev * pdev)3212 static void happy_meal_pci_remove(struct pci_dev *pdev)
3213 {
3214 struct happy_meal *hp = pci_get_drvdata(pdev);
3215 struct net_device *net_dev = hp->dev;
3216
3217 unregister_netdev(net_dev);
3218
3219 dma_free_coherent(hp->dma_dev, PAGE_SIZE,
3220 hp->happy_block, hp->hblock_dvma);
3221 iounmap(hp->gregs);
3222 pci_release_regions(hp->happy_dev);
3223
3224 free_netdev(net_dev);
3225 }
3226
3227 static const struct pci_device_id happymeal_pci_ids[] = {
3228 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
3229 { } /* Terminating entry */
3230 };
3231
3232 MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
3233
3234 static struct pci_driver hme_pci_driver = {
3235 .name = "hme",
3236 .id_table = happymeal_pci_ids,
3237 .probe = happy_meal_pci_probe,
3238 .remove = happy_meal_pci_remove,
3239 };
3240
happy_meal_pci_init(void)3241 static int __init happy_meal_pci_init(void)
3242 {
3243 return pci_register_driver(&hme_pci_driver);
3244 }
3245
happy_meal_pci_exit(void)3246 static void happy_meal_pci_exit(void)
3247 {
3248 pci_unregister_driver(&hme_pci_driver);
3249
3250 while (qfe_pci_list) {
3251 struct quattro *qfe = qfe_pci_list;
3252 struct quattro *next = qfe->next;
3253
3254 kfree(qfe);
3255
3256 qfe_pci_list = next;
3257 }
3258 }
3259
3260 #endif
3261
3262 #ifdef CONFIG_SBUS
3263 static const struct of_device_id hme_sbus_match[];
hme_sbus_probe(struct platform_device * op)3264 static int hme_sbus_probe(struct platform_device *op)
3265 {
3266 const struct of_device_id *match;
3267 struct device_node *dp = op->dev.of_node;
3268 const char *model = of_get_property(dp, "model", NULL);
3269 int is_qfe;
3270
3271 match = of_match_device(hme_sbus_match, &op->dev);
3272 if (!match)
3273 return -EINVAL;
3274 is_qfe = (match->data != NULL);
3275
3276 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
3277 is_qfe = 1;
3278
3279 return happy_meal_sbus_probe_one(op, is_qfe);
3280 }
3281
hme_sbus_remove(struct platform_device * op)3282 static int hme_sbus_remove(struct platform_device *op)
3283 {
3284 struct happy_meal *hp = platform_get_drvdata(op);
3285 struct net_device *net_dev = hp->dev;
3286
3287 unregister_netdev(net_dev);
3288
3289 /* XXX qfe parent interrupt... */
3290
3291 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
3292 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
3293 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
3294 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
3295 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
3296 dma_free_coherent(hp->dma_dev,
3297 PAGE_SIZE,
3298 hp->happy_block,
3299 hp->hblock_dvma);
3300
3301 free_netdev(net_dev);
3302
3303 return 0;
3304 }
3305
3306 static const struct of_device_id hme_sbus_match[] = {
3307 {
3308 .name = "SUNW,hme",
3309 },
3310 {
3311 .name = "SUNW,qfe",
3312 .data = (void *) 1,
3313 },
3314 {
3315 .name = "qfe",
3316 .data = (void *) 1,
3317 },
3318 {},
3319 };
3320
3321 MODULE_DEVICE_TABLE(of, hme_sbus_match);
3322
3323 static struct platform_driver hme_sbus_driver = {
3324 .driver = {
3325 .name = "hme",
3326 .of_match_table = hme_sbus_match,
3327 },
3328 .probe = hme_sbus_probe,
3329 .remove = hme_sbus_remove,
3330 };
3331
happy_meal_sbus_init(void)3332 static int __init happy_meal_sbus_init(void)
3333 {
3334 int err;
3335
3336 err = platform_driver_register(&hme_sbus_driver);
3337 if (!err)
3338 err = quattro_sbus_register_irqs();
3339
3340 return err;
3341 }
3342
happy_meal_sbus_exit(void)3343 static void happy_meal_sbus_exit(void)
3344 {
3345 platform_driver_unregister(&hme_sbus_driver);
3346 quattro_sbus_free_irqs();
3347
3348 while (qfe_sbus_list) {
3349 struct quattro *qfe = qfe_sbus_list;
3350 struct quattro *next = qfe->next;
3351
3352 kfree(qfe);
3353
3354 qfe_sbus_list = next;
3355 }
3356 }
3357 #endif
3358
happy_meal_probe(void)3359 static int __init happy_meal_probe(void)
3360 {
3361 int err = 0;
3362
3363 #ifdef CONFIG_SBUS
3364 err = happy_meal_sbus_init();
3365 #endif
3366 #ifdef CONFIG_PCI
3367 if (!err) {
3368 err = happy_meal_pci_init();
3369 #ifdef CONFIG_SBUS
3370 if (err)
3371 happy_meal_sbus_exit();
3372 #endif
3373 }
3374 #endif
3375
3376 return err;
3377 }
3378
3379
happy_meal_exit(void)3380 static void __exit happy_meal_exit(void)
3381 {
3382 #ifdef CONFIG_SBUS
3383 happy_meal_sbus_exit();
3384 #endif
3385 #ifdef CONFIG_PCI
3386 happy_meal_pci_exit();
3387 #endif
3388 }
3389
3390 module_init(happy_meal_probe);
3391 module_exit(happy_meal_exit);
3392