1 /*
2 * File Name:
3 * defxx.c
4 *
5 * Copyright Information:
6 * Copyright Digital Equipment Corporation 1996.
7 *
8 * This software may be used and distributed according to the terms of
9 * the GNU General Public License, incorporated herein by reference.
10 *
11 * Abstract:
12 * A Linux device driver supporting the Digital Equipment Corporation
13 * FDDI TURBOchannel, EISA and PCI controller families. Supported
14 * adapters include:
15 *
16 * DEC FDDIcontroller/TURBOchannel (DEFTA)
17 * DEC FDDIcontroller/EISA (DEFEA)
18 * DEC FDDIcontroller/PCI (DEFPA)
19 *
20 * The original author:
21 * LVS Lawrence V. Stefani <lstefani@yahoo.com>
22 *
23 * Maintainers:
24 * macro Maciej W. Rozycki <macro@linux-mips.org>
25 *
26 * Credits:
27 * I'd like to thank Patricia Cross for helping me get started with
28 * Linux, David Davies for a lot of help upgrading and configuring
29 * my development system and for answering many OS and driver
30 * development questions, and Alan Cox for recommendations and
31 * integration help on getting FDDI support into Linux. LVS
32 *
33 * Driver Architecture:
34 * The driver architecture is largely based on previous driver work
35 * for other operating systems. The upper edge interface and
36 * functions were largely taken from existing Linux device drivers
37 * such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C
38 * driver.
39 *
40 * Adapter Probe -
41 * The driver scans for supported EISA adapters by reading the
42 * SLOT ID register for each EISA slot and making a match
43 * against the expected value.
44 *
45 * Bus-Specific Initialization -
46 * This driver currently supports both EISA and PCI controller
47 * families. While the custom DMA chip and FDDI logic is similar
48 * or identical, the bus logic is very different. After
49 * initialization, the only bus-specific differences is in how the
50 * driver enables and disables interrupts. Other than that, the
51 * run-time critical code behaves the same on both families.
52 * It's important to note that both adapter families are configured
53 * to I/O map, rather than memory map, the adapter registers.
54 *
55 * Driver Open/Close -
56 * In the driver open routine, the driver ISR (interrupt service
57 * routine) is registered and the adapter is brought to an
58 * operational state. In the driver close routine, the opposite
59 * occurs; the driver ISR is deregistered and the adapter is
60 * brought to a safe, but closed state. Users may use consecutive
61 * commands to bring the adapter up and down as in the following
62 * example:
63 * ifconfig fddi0 up
64 * ifconfig fddi0 down
65 * ifconfig fddi0 up
66 *
67 * Driver Shutdown -
68 * Apparently, there is no shutdown or halt routine support under
69 * Linux. This routine would be called during "reboot" or
70 * "shutdown" to allow the driver to place the adapter in a safe
71 * state before a warm reboot occurs. To be really safe, the user
72 * should close the adapter before shutdown (eg. ifconfig fddi0 down)
73 * to ensure that the adapter DMA engine is taken off-line. However,
74 * the current driver code anticipates this problem and always issues
75 * a soft reset of the adapter at the beginning of driver initialization.
76 * A future driver enhancement in this area may occur in 2.1.X where
77 * Alan indicated that a shutdown handler may be implemented.
78 *
79 * Interrupt Service Routine -
80 * The driver supports shared interrupts, so the ISR is registered for
81 * each board with the appropriate flag and the pointer to that board's
82 * device structure. This provides the context during interrupt
83 * processing to support shared interrupts and multiple boards.
84 *
85 * Interrupt enabling/disabling can occur at many levels. At the host
86 * end, you can disable system interrupts, or disable interrupts at the
87 * PIC (on Intel systems). Across the bus, both EISA and PCI adapters
88 * have a bus-logic chip interrupt enable/disable as well as a DMA
89 * controller interrupt enable/disable.
90 *
91 * The driver currently enables and disables adapter interrupts at the
92 * bus-logic chip and assumes that Linux will take care of clearing or
93 * acknowledging any host-based interrupt chips.
94 *
95 * Control Functions -
96 * Control functions are those used to support functions such as adding
97 * or deleting multicast addresses, enabling or disabling packet
98 * reception filters, or other custom/proprietary commands. Presently,
99 * the driver supports the "get statistics", "set multicast list", and
100 * "set mac address" functions defined by Linux. A list of possible
101 * enhancements include:
102 *
103 * - Custom ioctl interface for executing port interface commands
104 * - Custom ioctl interface for adding unicast addresses to
105 * adapter CAM (to support bridge functions).
106 * - Custom ioctl interface for supporting firmware upgrades.
107 *
108 * Hardware (port interface) Support Routines -
109 * The driver function names that start with "dfx_hw_" represent
110 * low-level port interface routines that are called frequently. They
111 * include issuing a DMA or port control command to the adapter,
112 * resetting the adapter, or reading the adapter state. Since the
113 * driver initialization and run-time code must make calls into the
114 * port interface, these routines were written to be as generic and
115 * usable as possible.
116 *
117 * Receive Path -
118 * The adapter DMA engine supports a 256 entry receive descriptor block
119 * of which up to 255 entries can be used at any given time. The
120 * architecture is a standard producer, consumer, completion model in
121 * which the driver "produces" receive buffers to the adapter, the
122 * adapter "consumes" the receive buffers by DMAing incoming packet data,
123 * and the driver "completes" the receive buffers by servicing the
124 * incoming packet, then "produces" a new buffer and starts the cycle
125 * again. Receive buffers can be fragmented in up to 16 fragments
126 * (descriptor entries). For simplicity, this driver posts
127 * single-fragment receive buffers of 4608 bytes, then allocates a
128 * sk_buff, copies the data, then reposts the buffer. To reduce CPU
129 * utilization, a better approach would be to pass up the receive
130 * buffer (no extra copy) then allocate and post a replacement buffer.
131 * This is a performance enhancement that should be looked into at
132 * some point.
133 *
134 * Transmit Path -
135 * Like the receive path, the adapter DMA engine supports a 256 entry
136 * transmit descriptor block of which up to 255 entries can be used at
137 * any given time. Transmit buffers can be fragmented in up to 255
138 * fragments (descriptor entries). This driver always posts one
139 * fragment per transmit packet request.
140 *
141 * The fragment contains the entire packet from FC to end of data.
142 * Before posting the buffer to the adapter, the driver sets a three-byte
143 * packet request header (PRH) which is required by the Motorola MAC chip
144 * used on the adapters. The PRH tells the MAC the type of token to
145 * receive/send, whether or not to generate and append the CRC, whether
146 * synchronous or asynchronous framing is used, etc. Since the PRH
147 * definition is not necessarily consistent across all FDDI chipsets,
148 * the driver, rather than the common FDDI packet handler routines,
149 * sets these bytes.
150 *
151 * To reduce the amount of descriptor fetches needed per transmit request,
152 * the driver takes advantage of the fact that there are at least three
153 * bytes available before the skb->data field on the outgoing transmit
154 * request. This is guaranteed by having fddi_setup() in net_init.c set
155 * dev->hard_header_len to 24 bytes. 21 bytes accounts for the largest
156 * header in an 802.2 SNAP frame. The other 3 bytes are the extra "pad"
157 * bytes which we'll use to store the PRH.
158 *
159 * There's a subtle advantage to adding these pad bytes to the
160 * hard_header_len, it ensures that the data portion of the packet for
161 * an 802.2 SNAP frame is longword aligned. Other FDDI driver
162 * implementations may not need the extra padding and can start copying
163 * or DMAing directly from the FC byte which starts at skb->data. Should
164 * another driver implementation need ADDITIONAL padding, the net_init.c
165 * module should be updated and dev->hard_header_len should be increased.
166 * NOTE: To maintain the alignment on the data portion of the packet,
167 * dev->hard_header_len should always be evenly divisible by 4 and at
168 * least 24 bytes in size.
169 *
170 * Modification History:
171 * Date Name Description
172 * 16-Aug-96 LVS Created.
173 * 20-Aug-96 LVS Updated dfx_probe so that version information
174 * string is only displayed if 1 or more cards are
175 * found. Changed dfx_rcv_queue_process to copy
176 * 3 NULL bytes before FC to ensure that data is
177 * longword aligned in receive buffer.
178 * 09-Sep-96 LVS Updated dfx_ctl_set_multicast_list to enable
179 * LLC group promiscuous mode if multicast list
180 * is too large. LLC individual/group promiscuous
181 * mode is now disabled if IFF_PROMISC flag not set.
182 * dfx_xmt_queue_pkt no longer checks for NULL skb
183 * on Alan Cox recommendation. Added node address
184 * override support.
185 * 12-Sep-96 LVS Reset current address to factory address during
186 * device open. Updated transmit path to post a
187 * single fragment which includes PRH->end of data.
188 * Mar 2000 AC Did various cleanups for 2.3.x
189 * Jun 2000 jgarzik PCI and resource alloc cleanups
190 * Jul 2000 tjeerd Much cleanup and some bug fixes
191 * Sep 2000 tjeerd Fix leak on unload, cosmetic code cleanup
192 * Feb 2001 Skb allocation fixes
193 * Feb 2001 davej PCI enable cleanups.
194 * 04 Aug 2003 macro Converted to the DMA API.
195 * 14 Aug 2004 macro Fix device names reported.
196 * 14 Jun 2005 macro Use irqreturn_t.
197 * 23 Oct 2006 macro Big-endian host support.
198 * 14 Dec 2006 macro TURBOchannel support.
199 * 01 Jul 2014 macro Fixes for DMA on 64-bit hosts.
200 */
201
202 /* Include files */
203 #include <linux/bitops.h>
204 #include <linux/compiler.h>
205 #include <linux/delay.h>
206 #include <linux/dma-mapping.h>
207 #include <linux/eisa.h>
208 #include <linux/errno.h>
209 #include <linux/fddidevice.h>
210 #include <linux/interrupt.h>
211 #include <linux/ioport.h>
212 #include <linux/kernel.h>
213 #include <linux/module.h>
214 #include <linux/netdevice.h>
215 #include <linux/pci.h>
216 #include <linux/skbuff.h>
217 #include <linux/slab.h>
218 #include <linux/string.h>
219 #include <linux/tc.h>
220
221 #include <asm/byteorder.h>
222 #include <asm/io.h>
223
224 #include "defxx.h"
225
226 /* Version information string should be updated prior to each new release! */
227 #define DRV_NAME "defxx"
228 #define DRV_VERSION "v1.11"
229 #define DRV_RELDATE "2014/07/01"
230
231 static const char version[] =
232 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
233 " Lawrence V. Stefani and others\n";
234
235 #define DYNAMIC_BUFFERS 1
236
237 #define SKBUFF_RX_COPYBREAK 200
238 /*
239 * NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte
240 * alignment for compatibility with old EISA boards.
241 */
242 #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
243
244 #ifdef CONFIG_EISA
245 #define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
246 #else
247 #define DFX_BUS_EISA(dev) 0
248 #endif
249
250 #ifdef CONFIG_TC
251 #define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
252 #else
253 #define DFX_BUS_TC(dev) 0
254 #endif
255
256 #ifdef CONFIG_DEFXX_MMIO
257 #define DFX_MMIO 1
258 #else
259 #define DFX_MMIO 0
260 #endif
261
262 /* Define module-wide (static) routines */
263
264 static void dfx_bus_init(struct net_device *dev);
265 static void dfx_bus_uninit(struct net_device *dev);
266 static void dfx_bus_config_check(DFX_board_t *bp);
267
268 static int dfx_driver_init(struct net_device *dev,
269 const char *print_name,
270 resource_size_t bar_start);
271 static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
272
273 static int dfx_open(struct net_device *dev);
274 static int dfx_close(struct net_device *dev);
275
276 static void dfx_int_pr_halt_id(DFX_board_t *bp);
277 static void dfx_int_type_0_process(DFX_board_t *bp);
278 static void dfx_int_common(struct net_device *dev);
279 static irqreturn_t dfx_interrupt(int irq, void *dev_id);
280
281 static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
282 static void dfx_ctl_set_multicast_list(struct net_device *dev);
283 static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
284 static int dfx_ctl_update_cam(DFX_board_t *bp);
285 static int dfx_ctl_update_filters(DFX_board_t *bp);
286
287 static int dfx_hw_dma_cmd_req(DFX_board_t *bp);
288 static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
289 static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
290 static int dfx_hw_adap_state_rd(DFX_board_t *bp);
291 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
292
293 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
294 static void dfx_rcv_queue_process(DFX_board_t *bp);
295 #ifdef DYNAMIC_BUFFERS
296 static void dfx_rcv_flush(DFX_board_t *bp);
297 #else
dfx_rcv_flush(DFX_board_t * bp)298 static inline void dfx_rcv_flush(DFX_board_t *bp) {}
299 #endif
300
301 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
302 struct net_device *dev);
303 static int dfx_xmt_done(DFX_board_t *bp);
304 static void dfx_xmt_flush(DFX_board_t *bp);
305
306 /* Define module-wide (static) variables */
307
308 static struct pci_driver dfx_pci_driver;
309 static struct eisa_driver dfx_eisa_driver;
310 static struct tc_driver dfx_tc_driver;
311
312
313 /*
314 * =======================
315 * = dfx_port_write_long =
316 * = dfx_port_read_long =
317 * =======================
318 *
319 * Overview:
320 * Routines for reading and writing values from/to adapter
321 *
322 * Returns:
323 * None
324 *
325 * Arguments:
326 * bp - pointer to board information
327 * offset - register offset from base I/O address
328 * data - for dfx_port_write_long, this is a value to write;
329 * for dfx_port_read_long, this is a pointer to store
330 * the read value
331 *
332 * Functional Description:
333 * These routines perform the correct operation to read or write
334 * the adapter register.
335 *
336 * EISA port block base addresses are based on the slot number in which the
337 * controller is installed. For example, if the EISA controller is installed
338 * in slot 4, the port block base address is 0x4000. If the controller is
339 * installed in slot 2, the port block base address is 0x2000, and so on.
340 * This port block can be used to access PDQ, ESIC, and DEFEA on-board
341 * registers using the register offsets defined in DEFXX.H.
342 *
343 * PCI port block base addresses are assigned by the PCI BIOS or system
344 * firmware. There is one 128 byte port block which can be accessed. It
345 * allows for I/O mapping of both PDQ and PFI registers using the register
346 * offsets defined in DEFXX.H.
347 *
348 * Return Codes:
349 * None
350 *
351 * Assumptions:
352 * bp->base is a valid base I/O address for this adapter.
353 * offset is a valid register offset for this adapter.
354 *
355 * Side Effects:
356 * Rather than produce macros for these functions, these routines
357 * are defined using "inline" to ensure that the compiler will
358 * generate inline code and not waste a procedure call and return.
359 * This provides all the benefits of macros, but with the
360 * advantage of strict data type checking.
361 */
362
dfx_writel(DFX_board_t * bp,int offset,u32 data)363 static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
364 {
365 writel(data, bp->base.mem + offset);
366 mb();
367 }
368
dfx_outl(DFX_board_t * bp,int offset,u32 data)369 static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
370 {
371 outl(data, bp->base.port + offset);
372 }
373
dfx_port_write_long(DFX_board_t * bp,int offset,u32 data)374 static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
375 {
376 struct device __maybe_unused *bdev = bp->bus_dev;
377 int dfx_bus_tc = DFX_BUS_TC(bdev);
378 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
379
380 if (dfx_use_mmio)
381 dfx_writel(bp, offset, data);
382 else
383 dfx_outl(bp, offset, data);
384 }
385
386
dfx_readl(DFX_board_t * bp,int offset,u32 * data)387 static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
388 {
389 mb();
390 *data = readl(bp->base.mem + offset);
391 }
392
dfx_inl(DFX_board_t * bp,int offset,u32 * data)393 static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
394 {
395 *data = inl(bp->base.port + offset);
396 }
397
dfx_port_read_long(DFX_board_t * bp,int offset,u32 * data)398 static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
399 {
400 struct device __maybe_unused *bdev = bp->bus_dev;
401 int dfx_bus_tc = DFX_BUS_TC(bdev);
402 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
403
404 if (dfx_use_mmio)
405 dfx_readl(bp, offset, data);
406 else
407 dfx_inl(bp, offset, data);
408 }
409
410
411 /*
412 * ================
413 * = dfx_get_bars =
414 * ================
415 *
416 * Overview:
417 * Retrieves the address ranges used to access control and status
418 * registers.
419 *
420 * Returns:
421 * None
422 *
423 * Arguments:
424 * bdev - pointer to device information
425 * bar_start - pointer to store the start addresses
426 * bar_len - pointer to store the lengths of the areas
427 *
428 * Assumptions:
429 * I am sure there are some.
430 *
431 * Side Effects:
432 * None
433 */
dfx_get_bars(struct device * bdev,resource_size_t * bar_start,resource_size_t * bar_len)434 static void dfx_get_bars(struct device *bdev,
435 resource_size_t *bar_start, resource_size_t *bar_len)
436 {
437 int dfx_bus_pci = dev_is_pci(bdev);
438 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
439 int dfx_bus_tc = DFX_BUS_TC(bdev);
440 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
441
442 if (dfx_bus_pci) {
443 int num = dfx_use_mmio ? 0 : 1;
444
445 bar_start[0] = pci_resource_start(to_pci_dev(bdev), num);
446 bar_len[0] = pci_resource_len(to_pci_dev(bdev), num);
447 bar_start[2] = bar_start[1] = 0;
448 bar_len[2] = bar_len[1] = 0;
449 }
450 if (dfx_bus_eisa) {
451 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
452 resource_size_t bar_lo;
453 resource_size_t bar_hi;
454
455 if (dfx_use_mmio) {
456 bar_lo = inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_2);
457 bar_lo <<= 8;
458 bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_1);
459 bar_lo <<= 8;
460 bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_0);
461 bar_lo <<= 8;
462 bar_start[0] = bar_lo;
463 bar_hi = inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_2);
464 bar_hi <<= 8;
465 bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_1);
466 bar_hi <<= 8;
467 bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_0);
468 bar_hi <<= 8;
469 bar_len[0] = ((bar_hi - bar_lo) | PI_MEM_ADD_MASK_M) +
470 1;
471 } else {
472 bar_start[0] = base_addr;
473 bar_len[0] = PI_ESIC_K_CSR_IO_LEN;
474 }
475 bar_start[1] = base_addr + PI_DEFEA_K_BURST_HOLDOFF;
476 bar_len[1] = PI_ESIC_K_BURST_HOLDOFF_LEN;
477 bar_start[2] = base_addr + PI_ESIC_K_ESIC_CSR;
478 bar_len[2] = PI_ESIC_K_ESIC_CSR_LEN;
479 }
480 if (dfx_bus_tc) {
481 bar_start[0] = to_tc_dev(bdev)->resource.start +
482 PI_TC_K_CSR_OFFSET;
483 bar_len[0] = PI_TC_K_CSR_LEN;
484 bar_start[2] = bar_start[1] = 0;
485 bar_len[2] = bar_len[1] = 0;
486 }
487 }
488
489 static const struct net_device_ops dfx_netdev_ops = {
490 .ndo_open = dfx_open,
491 .ndo_stop = dfx_close,
492 .ndo_start_xmit = dfx_xmt_queue_pkt,
493 .ndo_get_stats = dfx_ctl_get_stats,
494 .ndo_set_rx_mode = dfx_ctl_set_multicast_list,
495 .ndo_set_mac_address = dfx_ctl_set_mac_address,
496 };
497
dfx_register_res_alloc_err(const char * print_name,bool mmio,bool eisa)498 static void dfx_register_res_alloc_err(const char *print_name, bool mmio,
499 bool eisa)
500 {
501 pr_err("%s: Cannot use %s, no address set, aborting\n",
502 print_name, mmio ? "MMIO" : "I/O");
503 pr_err("%s: Recompile driver with \"CONFIG_DEFXX_MMIO=%c\"\n",
504 print_name, mmio ? 'n' : 'y');
505 if (eisa && mmio)
506 pr_err("%s: Or run ECU and set adapter's MMIO location\n",
507 print_name);
508 }
509
dfx_register_res_err(const char * print_name,bool mmio,unsigned long start,unsigned long len)510 static void dfx_register_res_err(const char *print_name, bool mmio,
511 unsigned long start, unsigned long len)
512 {
513 pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n",
514 print_name, mmio ? "MMIO" : "I/O", len, start);
515 }
516
517 /*
518 * ================
519 * = dfx_register =
520 * ================
521 *
522 * Overview:
523 * Initializes a supported FDDI controller
524 *
525 * Returns:
526 * Condition code
527 *
528 * Arguments:
529 * bdev - pointer to device information
530 *
531 * Functional Description:
532 *
533 * Return Codes:
534 * 0 - This device (fddi0, fddi1, etc) configured successfully
535 * -EBUSY - Failed to get resources, or dfx_driver_init failed.
536 *
537 * Assumptions:
538 * It compiles so it should work :-( (PCI cards do :-)
539 *
540 * Side Effects:
541 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
542 * initialized and the board resources are read and stored in
543 * the device structure.
544 */
dfx_register(struct device * bdev)545 static int dfx_register(struct device *bdev)
546 {
547 static int version_disp;
548 int dfx_bus_pci = dev_is_pci(bdev);
549 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
550 int dfx_bus_tc = DFX_BUS_TC(bdev);
551 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
552 const char *print_name = dev_name(bdev);
553 struct net_device *dev;
554 DFX_board_t *bp; /* board pointer */
555 resource_size_t bar_start[3] = {0}; /* pointers to ports */
556 resource_size_t bar_len[3] = {0}; /* resource length */
557 int alloc_size; /* total buffer size used */
558 struct resource *region;
559 int err = 0;
560
561 if (!version_disp) { /* display version info if adapter is found */
562 version_disp = 1; /* set display flag to TRUE so that */
563 printk(version); /* we only display this string ONCE */
564 }
565
566 dev = alloc_fddidev(sizeof(*bp));
567 if (!dev) {
568 printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
569 print_name);
570 return -ENOMEM;
571 }
572
573 /* Enable PCI device. */
574 if (dfx_bus_pci) {
575 err = pci_enable_device(to_pci_dev(bdev));
576 if (err) {
577 pr_err("%s: Cannot enable PCI device, aborting\n",
578 print_name);
579 goto err_out;
580 }
581 }
582
583 SET_NETDEV_DEV(dev, bdev);
584
585 bp = netdev_priv(dev);
586 bp->bus_dev = bdev;
587 dev_set_drvdata(bdev, dev);
588
589 dfx_get_bars(bdev, bar_start, bar_len);
590 if (bar_len[0] == 0 ||
591 (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) {
592 dfx_register_res_alloc_err(print_name, dfx_use_mmio,
593 dfx_bus_eisa);
594 err = -ENXIO;
595 goto err_out_disable;
596 }
597
598 if (dfx_use_mmio)
599 region = request_mem_region(bar_start[0], bar_len[0],
600 print_name);
601 else
602 region = request_region(bar_start[0], bar_len[0], print_name);
603 if (!region) {
604 dfx_register_res_err(print_name, dfx_use_mmio,
605 bar_start[0], bar_len[0]);
606 err = -EBUSY;
607 goto err_out_disable;
608 }
609 if (bar_start[1] != 0) {
610 region = request_region(bar_start[1], bar_len[1], print_name);
611 if (!region) {
612 dfx_register_res_err(print_name, 0,
613 bar_start[1], bar_len[1]);
614 err = -EBUSY;
615 goto err_out_csr_region;
616 }
617 }
618 if (bar_start[2] != 0) {
619 region = request_region(bar_start[2], bar_len[2], print_name);
620 if (!region) {
621 dfx_register_res_err(print_name, 0,
622 bar_start[2], bar_len[2]);
623 err = -EBUSY;
624 goto err_out_bh_region;
625 }
626 }
627
628 /* Set up I/O base address. */
629 if (dfx_use_mmio) {
630 bp->base.mem = ioremap(bar_start[0], bar_len[0]);
631 if (!bp->base.mem) {
632 printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
633 err = -ENOMEM;
634 goto err_out_esic_region;
635 }
636 } else {
637 bp->base.port = bar_start[0];
638 dev->base_addr = bar_start[0];
639 }
640
641 /* Initialize new device structure */
642 dev->netdev_ops = &dfx_netdev_ops;
643
644 if (dfx_bus_pci)
645 pci_set_master(to_pci_dev(bdev));
646
647 if (dfx_driver_init(dev, print_name, bar_start[0]) != DFX_K_SUCCESS) {
648 err = -ENODEV;
649 goto err_out_unmap;
650 }
651
652 err = register_netdev(dev);
653 if (err)
654 goto err_out_kfree;
655
656 printk("%s: registered as %s\n", print_name, dev->name);
657 return 0;
658
659 err_out_kfree:
660 alloc_size = sizeof(PI_DESCR_BLOCK) +
661 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
662 #ifndef DYNAMIC_BUFFERS
663 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
664 #endif
665 sizeof(PI_CONSUMER_BLOCK) +
666 (PI_ALIGN_K_DESC_BLK - 1);
667 if (bp->kmalloced)
668 dma_free_coherent(bdev, alloc_size,
669 bp->kmalloced, bp->kmalloced_dma);
670
671 err_out_unmap:
672 if (dfx_use_mmio)
673 iounmap(bp->base.mem);
674
675 err_out_esic_region:
676 if (bar_start[2] != 0)
677 release_region(bar_start[2], bar_len[2]);
678
679 err_out_bh_region:
680 if (bar_start[1] != 0)
681 release_region(bar_start[1], bar_len[1]);
682
683 err_out_csr_region:
684 if (dfx_use_mmio)
685 release_mem_region(bar_start[0], bar_len[0]);
686 else
687 release_region(bar_start[0], bar_len[0]);
688
689 err_out_disable:
690 if (dfx_bus_pci)
691 pci_disable_device(to_pci_dev(bdev));
692
693 err_out:
694 free_netdev(dev);
695 return err;
696 }
697
698
699 /*
700 * ================
701 * = dfx_bus_init =
702 * ================
703 *
704 * Overview:
705 * Initializes the bus-specific controller logic.
706 *
707 * Returns:
708 * None
709 *
710 * Arguments:
711 * dev - pointer to device information
712 *
713 * Functional Description:
714 * Determine and save adapter IRQ in device table,
715 * then perform bus-specific logic initialization.
716 *
717 * Return Codes:
718 * None
719 *
720 * Assumptions:
721 * bp->base has already been set with the proper
722 * base I/O address for this device.
723 *
724 * Side Effects:
725 * Interrupts are enabled at the adapter bus-specific logic.
726 * Note: Interrupts at the DMA engine (PDQ chip) are not
727 * enabled yet.
728 */
729
dfx_bus_init(struct net_device * dev)730 static void dfx_bus_init(struct net_device *dev)
731 {
732 DFX_board_t *bp = netdev_priv(dev);
733 struct device *bdev = bp->bus_dev;
734 int dfx_bus_pci = dev_is_pci(bdev);
735 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
736 int dfx_bus_tc = DFX_BUS_TC(bdev);
737 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
738 u8 val;
739
740 DBG_printk("In dfx_bus_init...\n");
741
742 /* Initialize a pointer back to the net_device struct */
743 bp->dev = dev;
744
745 /* Initialize adapter based on bus type */
746
747 if (dfx_bus_tc)
748 dev->irq = to_tc_dev(bdev)->interrupt;
749 if (dfx_bus_eisa) {
750 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
751
752 /* Disable the board before fiddling with the decoders. */
753 outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
754
755 /* Get the interrupt level from the ESIC chip. */
756 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
757 val &= PI_CONFIG_STAT_0_M_IRQ;
758 val >>= PI_CONFIG_STAT_0_V_IRQ;
759
760 switch (val) {
761 case PI_CONFIG_STAT_0_IRQ_K_9:
762 dev->irq = 9;
763 break;
764
765 case PI_CONFIG_STAT_0_IRQ_K_10:
766 dev->irq = 10;
767 break;
768
769 case PI_CONFIG_STAT_0_IRQ_K_11:
770 dev->irq = 11;
771 break;
772
773 case PI_CONFIG_STAT_0_IRQ_K_15:
774 dev->irq = 15;
775 break;
776 }
777
778 /*
779 * Enable memory decoding (MEMCS1) and/or port decoding
780 * (IOCS1/IOCS0) as appropriate in Function Control
781 * Register. MEMCS1 or IOCS0 is used for PDQ registers,
782 * taking 16 32-bit words, while IOCS1 is used for the
783 * Burst Holdoff register, taking a single 32-bit word
784 * only. We use the slot-specific I/O range as per the
785 * ESIC spec, that is set bits 15:12 in the mask registers
786 * to mask them out.
787 */
788
789 /* Set the decode range of the board. */
790 val = 0;
791 outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_1);
792 val = PI_DEFEA_K_CSR_IO;
793 outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_0);
794
795 val = PI_IO_CMP_M_SLOT;
796 outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_1);
797 val = (PI_ESIC_K_CSR_IO_LEN - 1) & ~3;
798 outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_0);
799
800 val = 0;
801 outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_1);
802 val = PI_DEFEA_K_BURST_HOLDOFF;
803 outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_0);
804
805 val = PI_IO_CMP_M_SLOT;
806 outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_1);
807 val = (PI_ESIC_K_BURST_HOLDOFF_LEN - 1) & ~3;
808 outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_0);
809
810 /* Enable the decoders. */
811 val = PI_FUNCTION_CNTRL_M_IOCS1;
812 if (dfx_use_mmio)
813 val |= PI_FUNCTION_CNTRL_M_MEMCS1;
814 else
815 val |= PI_FUNCTION_CNTRL_M_IOCS0;
816 outb(val, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
817
818 /*
819 * Enable access to the rest of the module
820 * (including PDQ and packet memory).
821 */
822 val = PI_SLOT_CNTRL_M_ENB;
823 outb(val, base_addr + PI_ESIC_K_SLOT_CNTRL);
824
825 /*
826 * Map PDQ registers into memory or port space. This is
827 * done with a bit in the Burst Holdoff register.
828 */
829 val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
830 if (dfx_use_mmio)
831 val |= PI_BURST_HOLDOFF_M_MEM_MAP;
832 else
833 val &= ~PI_BURST_HOLDOFF_M_MEM_MAP;
834 outb(val, base_addr + PI_DEFEA_K_BURST_HOLDOFF);
835
836 /* Enable interrupts at EISA bus interface chip (ESIC) */
837 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
838 val |= PI_CONFIG_STAT_0_M_INT_ENB;
839 outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
840 }
841 if (dfx_bus_pci) {
842 struct pci_dev *pdev = to_pci_dev(bdev);
843
844 /* Get the interrupt level from the PCI Configuration Table */
845
846 dev->irq = pdev->irq;
847
848 /* Check Latency Timer and set if less than minimal */
849
850 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
851 if (val < PFI_K_LAT_TIMER_MIN) {
852 val = PFI_K_LAT_TIMER_DEF;
853 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
854 }
855
856 /* Enable interrupts at PCI bus interface chip (PFI) */
857 val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
858 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
859 }
860 }
861
862 /*
863 * ==================
864 * = dfx_bus_uninit =
865 * ==================
866 *
867 * Overview:
868 * Uninitializes the bus-specific controller logic.
869 *
870 * Returns:
871 * None
872 *
873 * Arguments:
874 * dev - pointer to device information
875 *
876 * Functional Description:
877 * Perform bus-specific logic uninitialization.
878 *
879 * Return Codes:
880 * None
881 *
882 * Assumptions:
883 * bp->base has already been set with the proper
884 * base I/O address for this device.
885 *
886 * Side Effects:
887 * Interrupts are disabled at the adapter bus-specific logic.
888 */
889
dfx_bus_uninit(struct net_device * dev)890 static void dfx_bus_uninit(struct net_device *dev)
891 {
892 DFX_board_t *bp = netdev_priv(dev);
893 struct device *bdev = bp->bus_dev;
894 int dfx_bus_pci = dev_is_pci(bdev);
895 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
896 u8 val;
897
898 DBG_printk("In dfx_bus_uninit...\n");
899
900 /* Uninitialize adapter based on bus type */
901
902 if (dfx_bus_eisa) {
903 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
904
905 /* Disable interrupts at EISA bus interface chip (ESIC) */
906 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
907 val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
908 outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
909
910 /* Disable the board. */
911 outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
912
913 /* Disable memory and port decoders. */
914 outb(0, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
915 }
916 if (dfx_bus_pci) {
917 /* Disable interrupts at PCI bus interface chip (PFI) */
918 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
919 }
920 }
921
922
923 /*
924 * ========================
925 * = dfx_bus_config_check =
926 * ========================
927 *
928 * Overview:
929 * Checks the configuration (burst size, full-duplex, etc.) If any parameters
930 * are illegal, then this routine will set new defaults.
931 *
932 * Returns:
933 * None
934 *
935 * Arguments:
936 * bp - pointer to board information
937 *
938 * Functional Description:
939 * For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later
940 * PDQ, and all FDDI PCI controllers, all values are legal.
941 *
942 * Return Codes:
943 * None
944 *
945 * Assumptions:
946 * dfx_adap_init has NOT been called yet so burst size and other items have
947 * not been set.
948 *
949 * Side Effects:
950 * None
951 */
952
dfx_bus_config_check(DFX_board_t * bp)953 static void dfx_bus_config_check(DFX_board_t *bp)
954 {
955 struct device __maybe_unused *bdev = bp->bus_dev;
956 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
957 int status; /* return code from adapter port control call */
958 u32 host_data; /* LW data returned from port control call */
959
960 DBG_printk("In dfx_bus_config_check...\n");
961
962 /* Configuration check only valid for EISA adapter */
963
964 if (dfx_bus_eisa) {
965 /*
966 * First check if revision 2 EISA controller. Rev. 1 cards used
967 * PDQ revision B, so no workaround needed in this case. Rev. 3
968 * cards used PDQ revision E, so no workaround needed in this
969 * case, either. Only Rev. 2 cards used either Rev. D or E
970 * chips, so we must verify the chip revision on Rev. 2 cards.
971 */
972 if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
973 /*
974 * Revision 2 FDDI EISA controller found,
975 * so let's check PDQ revision of adapter.
976 */
977 status = dfx_hw_port_ctrl_req(bp,
978 PI_PCTRL_M_SUB_CMD,
979 PI_SUB_CMD_K_PDQ_REV_GET,
980 0,
981 &host_data);
982 if ((status != DFX_K_SUCCESS) || (host_data == 2))
983 {
984 /*
985 * Either we couldn't determine the PDQ revision, or
986 * we determined that it is at revision D. In either case,
987 * we need to implement the workaround.
988 */
989
990 /* Ensure that the burst size is set to 8 longwords or less */
991
992 switch (bp->burst_size)
993 {
994 case PI_PDATA_B_DMA_BURST_SIZE_32:
995 case PI_PDATA_B_DMA_BURST_SIZE_16:
996 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
997 break;
998
999 default:
1000 break;
1001 }
1002
1003 /* Ensure that full-duplex mode is not enabled */
1004
1005 bp->full_duplex_enb = PI_SNMP_K_FALSE;
1006 }
1007 }
1008 }
1009 }
1010
1011
1012 /*
1013 * ===================
1014 * = dfx_driver_init =
1015 * ===================
1016 *
1017 * Overview:
1018 * Initializes remaining adapter board structure information
1019 * and makes sure adapter is in a safe state prior to dfx_open().
1020 *
1021 * Returns:
1022 * Condition code
1023 *
1024 * Arguments:
1025 * dev - pointer to device information
1026 * print_name - printable device name
1027 *
1028 * Functional Description:
1029 * This function allocates additional resources such as the host memory
1030 * blocks needed by the adapter (eg. descriptor and consumer blocks).
1031 * Remaining bus initialization steps are also completed. The adapter
1032 * is also reset so that it is in the DMA_UNAVAILABLE state. The OS
1033 * must call dfx_open() to open the adapter and bring it on-line.
1034 *
1035 * Return Codes:
1036 * DFX_K_SUCCESS - initialization succeeded
1037 * DFX_K_FAILURE - initialization failed - could not allocate memory
1038 * or read adapter MAC address
1039 *
1040 * Assumptions:
1041 * Memory allocated from pci_alloc_consistent() call is physically
1042 * contiguous, locked memory.
1043 *
1044 * Side Effects:
1045 * Adapter is reset and should be in DMA_UNAVAILABLE state before
1046 * returning from this routine.
1047 */
1048
dfx_driver_init(struct net_device * dev,const char * print_name,resource_size_t bar_start)1049 static int dfx_driver_init(struct net_device *dev, const char *print_name,
1050 resource_size_t bar_start)
1051 {
1052 DFX_board_t *bp = netdev_priv(dev);
1053 struct device *bdev = bp->bus_dev;
1054 int dfx_bus_pci = dev_is_pci(bdev);
1055 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1056 int dfx_bus_tc = DFX_BUS_TC(bdev);
1057 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
1058 int alloc_size; /* total buffer size needed */
1059 char *top_v, *curr_v; /* virtual addrs into memory block */
1060 dma_addr_t top_p, curr_p; /* physical addrs into memory block */
1061 u32 data; /* host data register value */
1062 __le32 le32;
1063 char *board_name = NULL;
1064
1065 DBG_printk("In dfx_driver_init...\n");
1066
1067 /* Initialize bus-specific hardware registers */
1068
1069 dfx_bus_init(dev);
1070
1071 /*
1072 * Initialize default values for configurable parameters
1073 *
1074 * Note: All of these parameters are ones that a user may
1075 * want to customize. It'd be nice to break these
1076 * out into Space.c or someplace else that's more
1077 * accessible/understandable than this file.
1078 */
1079
1080 bp->full_duplex_enb = PI_SNMP_K_FALSE;
1081 bp->req_ttrt = 8 * 12500; /* 8ms in 80 nanosec units */
1082 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF;
1083 bp->rcv_bufs_to_post = RCV_BUFS_DEF;
1084
1085 /*
1086 * Ensure that HW configuration is OK
1087 *
1088 * Note: Depending on the hardware revision, we may need to modify
1089 * some of the configurable parameters to workaround hardware
1090 * limitations. We'll perform this configuration check AFTER
1091 * setting the parameters to their default values.
1092 */
1093
1094 dfx_bus_config_check(bp);
1095
1096 /* Disable PDQ interrupts first */
1097
1098 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1099
1100 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1101
1102 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1103
1104 /* Read the factory MAC address from the adapter then save it */
1105
1106 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
1107 &data) != DFX_K_SUCCESS) {
1108 printk("%s: Could not read adapter factory MAC address!\n",
1109 print_name);
1110 return DFX_K_FAILURE;
1111 }
1112 le32 = cpu_to_le32(data);
1113 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
1114
1115 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
1116 &data) != DFX_K_SUCCESS) {
1117 printk("%s: Could not read adapter factory MAC address!\n",
1118 print_name);
1119 return DFX_K_FAILURE;
1120 }
1121 le32 = cpu_to_le32(data);
1122 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
1123
1124 /*
1125 * Set current address to factory address
1126 *
1127 * Note: Node address override support is handled through
1128 * dfx_ctl_set_mac_address.
1129 */
1130
1131 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1132 if (dfx_bus_tc)
1133 board_name = "DEFTA";
1134 if (dfx_bus_eisa)
1135 board_name = "DEFEA";
1136 if (dfx_bus_pci)
1137 board_name = "DEFPA";
1138 pr_info("%s: %s at %s addr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1139 print_name, board_name, dfx_use_mmio ? "MMIO" : "I/O",
1140 (long long)bar_start, dev->irq, dev->dev_addr);
1141
1142 /*
1143 * Get memory for descriptor block, consumer block, and other buffers
1144 * that need to be DMA read or written to by the adapter.
1145 */
1146
1147 alloc_size = sizeof(PI_DESCR_BLOCK) +
1148 PI_CMD_REQ_K_SIZE_MAX +
1149 PI_CMD_RSP_K_SIZE_MAX +
1150 #ifndef DYNAMIC_BUFFERS
1151 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
1152 #endif
1153 sizeof(PI_CONSUMER_BLOCK) +
1154 (PI_ALIGN_K_DESC_BLK - 1);
1155 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1156 &bp->kmalloced_dma,
1157 GFP_ATOMIC);
1158 if (top_v == NULL)
1159 return DFX_K_FAILURE;
1160
1161 top_p = bp->kmalloced_dma; /* get physical address of buffer */
1162
1163 /*
1164 * To guarantee the 8K alignment required for the descriptor block, 8K - 1
1165 * plus the amount of memory needed was allocated. The physical address
1166 * is now 8K aligned. By carving up the memory in a specific order,
1167 * we'll guarantee the alignment requirements for all other structures.
1168 *
1169 * Note: If the assumptions change regarding the non-paged, non-cached,
1170 * physically contiguous nature of the memory block or the address
1171 * alignments, then we'll need to implement a different algorithm
1172 * for allocating the needed memory.
1173 */
1174
1175 curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
1176 curr_v = top_v + (curr_p - top_p);
1177
1178 /* Reserve space for descriptor block */
1179
1180 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
1181 bp->descr_block_phys = curr_p;
1182 curr_v += sizeof(PI_DESCR_BLOCK);
1183 curr_p += sizeof(PI_DESCR_BLOCK);
1184
1185 /* Reserve space for command request buffer */
1186
1187 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
1188 bp->cmd_req_phys = curr_p;
1189 curr_v += PI_CMD_REQ_K_SIZE_MAX;
1190 curr_p += PI_CMD_REQ_K_SIZE_MAX;
1191
1192 /* Reserve space for command response buffer */
1193
1194 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
1195 bp->cmd_rsp_phys = curr_p;
1196 curr_v += PI_CMD_RSP_K_SIZE_MAX;
1197 curr_p += PI_CMD_RSP_K_SIZE_MAX;
1198
1199 /* Reserve space for the LLC host receive queue buffers */
1200
1201 bp->rcv_block_virt = curr_v;
1202 bp->rcv_block_phys = curr_p;
1203
1204 #ifndef DYNAMIC_BUFFERS
1205 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1206 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1207 #endif
1208
1209 /* Reserve space for the consumer block */
1210
1211 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
1212 bp->cons_block_phys = curr_p;
1213
1214 /* Display virtual and physical addresses if debug driver */
1215
1216 DBG_printk("%s: Descriptor block virt = %p, phys = %pad\n",
1217 print_name, bp->descr_block_virt, &bp->descr_block_phys);
1218 DBG_printk("%s: Command Request buffer virt = %p, phys = %pad\n",
1219 print_name, bp->cmd_req_virt, &bp->cmd_req_phys);
1220 DBG_printk("%s: Command Response buffer virt = %p, phys = %pad\n",
1221 print_name, bp->cmd_rsp_virt, &bp->cmd_rsp_phys);
1222 DBG_printk("%s: Receive buffer block virt = %p, phys = %pad\n",
1223 print_name, bp->rcv_block_virt, &bp->rcv_block_phys);
1224 DBG_printk("%s: Consumer block virt = %p, phys = %pad\n",
1225 print_name, bp->cons_block_virt, &bp->cons_block_phys);
1226
1227 return DFX_K_SUCCESS;
1228 }
1229
1230
1231 /*
1232 * =================
1233 * = dfx_adap_init =
1234 * =================
1235 *
1236 * Overview:
1237 * Brings the adapter to the link avail/link unavailable state.
1238 *
1239 * Returns:
1240 * Condition code
1241 *
1242 * Arguments:
1243 * bp - pointer to board information
1244 * get_buffers - non-zero if buffers to be allocated
1245 *
1246 * Functional Description:
1247 * Issues the low-level firmware/hardware calls necessary to bring
1248 * the adapter up, or to properly reset and restore adapter during
1249 * run-time.
1250 *
1251 * Return Codes:
1252 * DFX_K_SUCCESS - Adapter brought up successfully
1253 * DFX_K_FAILURE - Adapter initialization failed
1254 *
1255 * Assumptions:
1256 * bp->reset_type should be set to a valid reset type value before
1257 * calling this routine.
1258 *
1259 * Side Effects:
1260 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1261 * upon a successful return of this routine.
1262 */
1263
dfx_adap_init(DFX_board_t * bp,int get_buffers)1264 static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1265 {
1266 DBG_printk("In dfx_adap_init...\n");
1267
1268 /* Disable PDQ interrupts first */
1269
1270 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1271
1272 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1273
1274 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1275 {
1276 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1277 return DFX_K_FAILURE;
1278 }
1279
1280 /*
1281 * When the PDQ is reset, some false Type 0 interrupts may be pending,
1282 * so we'll acknowledge all Type 0 interrupts now before continuing.
1283 */
1284
1285 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1286
1287 /*
1288 * Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state
1289 *
1290 * Note: We only need to clear host copies of these registers. The PDQ reset
1291 * takes care of the on-board register values.
1292 */
1293
1294 bp->cmd_req_reg.lword = 0;
1295 bp->cmd_rsp_reg.lword = 0;
1296 bp->rcv_xmt_reg.lword = 0;
1297
1298 /* Clear consumer block before going to DMA_AVAILABLE state */
1299
1300 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1301
1302 /* Initialize the DMA Burst Size */
1303
1304 if (dfx_hw_port_ctrl_req(bp,
1305 PI_PCTRL_M_SUB_CMD,
1306 PI_SUB_CMD_K_BURST_SIZE_SET,
1307 bp->burst_size,
1308 NULL) != DFX_K_SUCCESS)
1309 {
1310 printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1311 return DFX_K_FAILURE;
1312 }
1313
1314 /*
1315 * Set base address of Consumer Block
1316 *
1317 * Assumption: 32-bit physical address of consumer block is 64 byte
1318 * aligned. That is, bits 0-5 of the address must be zero.
1319 */
1320
1321 if (dfx_hw_port_ctrl_req(bp,
1322 PI_PCTRL_M_CONS_BLOCK,
1323 bp->cons_block_phys,
1324 0,
1325 NULL) != DFX_K_SUCCESS)
1326 {
1327 printk("%s: Could not set consumer block address!\n", bp->dev->name);
1328 return DFX_K_FAILURE;
1329 }
1330
1331 /*
1332 * Set the base address of Descriptor Block and bring adapter
1333 * to DMA_AVAILABLE state.
1334 *
1335 * Note: We also set the literal and data swapping requirements
1336 * in this command.
1337 *
1338 * Assumption: 32-bit physical address of descriptor block
1339 * is 8Kbyte aligned.
1340 */
1341 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
1342 (u32)(bp->descr_block_phys |
1343 PI_PDATA_A_INIT_M_BSWAP_INIT),
1344 0, NULL) != DFX_K_SUCCESS) {
1345 printk("%s: Could not set descriptor block address!\n",
1346 bp->dev->name);
1347 return DFX_K_FAILURE;
1348 }
1349
1350 /* Set transmit flush timeout value */
1351
1352 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1353 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME;
1354 bp->cmd_req_virt->char_set.item[0].value = 3; /* 3 seconds */
1355 bp->cmd_req_virt->char_set.item[0].item_index = 0;
1356 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL;
1357 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1358 {
1359 printk("%s: DMA command request failed!\n", bp->dev->name);
1360 return DFX_K_FAILURE;
1361 }
1362
1363 /* Set the initial values for eFDXEnable and MACTReq MIB objects */
1364
1365 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1366 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS;
1367 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb;
1368 bp->cmd_req_virt->snmp_set.item[0].item_index = 0;
1369 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ;
1370 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt;
1371 bp->cmd_req_virt->snmp_set.item[1].item_index = 0;
1372 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL;
1373 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1374 {
1375 printk("%s: DMA command request failed!\n", bp->dev->name);
1376 return DFX_K_FAILURE;
1377 }
1378
1379 /* Initialize adapter CAM */
1380
1381 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1382 {
1383 printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1384 return DFX_K_FAILURE;
1385 }
1386
1387 /* Initialize adapter filters */
1388
1389 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1390 {
1391 printk("%s: Adapter filters update failed!\n", bp->dev->name);
1392 return DFX_K_FAILURE;
1393 }
1394
1395 /*
1396 * Remove any existing dynamic buffers (i.e. if the adapter is being
1397 * reinitialized)
1398 */
1399
1400 if (get_buffers)
1401 dfx_rcv_flush(bp);
1402
1403 /* Initialize receive descriptor block and produce buffers */
1404
1405 if (dfx_rcv_init(bp, get_buffers))
1406 {
1407 printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1408 if (get_buffers)
1409 dfx_rcv_flush(bp);
1410 return DFX_K_FAILURE;
1411 }
1412
1413 /* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
1414
1415 bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1416 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1417 {
1418 printk("%s: Start command failed\n", bp->dev->name);
1419 if (get_buffers)
1420 dfx_rcv_flush(bp);
1421 return DFX_K_FAILURE;
1422 }
1423
1424 /* Initialization succeeded, reenable PDQ interrupts */
1425
1426 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1427 return DFX_K_SUCCESS;
1428 }
1429
1430
1431 /*
1432 * ============
1433 * = dfx_open =
1434 * ============
1435 *
1436 * Overview:
1437 * Opens the adapter
1438 *
1439 * Returns:
1440 * Condition code
1441 *
1442 * Arguments:
1443 * dev - pointer to device information
1444 *
1445 * Functional Description:
1446 * This function brings the adapter to an operational state.
1447 *
1448 * Return Codes:
1449 * 0 - Adapter was successfully opened
1450 * -EAGAIN - Could not register IRQ or adapter initialization failed
1451 *
1452 * Assumptions:
1453 * This routine should only be called for a device that was
1454 * initialized successfully.
1455 *
1456 * Side Effects:
1457 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1458 * if the open is successful.
1459 */
1460
dfx_open(struct net_device * dev)1461 static int dfx_open(struct net_device *dev)
1462 {
1463 DFX_board_t *bp = netdev_priv(dev);
1464 int ret;
1465
1466 DBG_printk("In dfx_open...\n");
1467
1468 /* Register IRQ - support shared interrupts by passing device ptr */
1469
1470 ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1471 dev);
1472 if (ret) {
1473 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1474 return ret;
1475 }
1476
1477 /*
1478 * Set current address to factory MAC address
1479 *
1480 * Note: We've already done this step in dfx_driver_init.
1481 * However, it's possible that a user has set a node
1482 * address override, then closed and reopened the
1483 * adapter. Unless we reset the device address field
1484 * now, we'll continue to use the existing modified
1485 * address.
1486 */
1487
1488 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1489
1490 /* Clear local unicast/multicast address tables and counts */
1491
1492 memset(bp->uc_table, 0, sizeof(bp->uc_table));
1493 memset(bp->mc_table, 0, sizeof(bp->mc_table));
1494 bp->uc_count = 0;
1495 bp->mc_count = 0;
1496
1497 /* Disable promiscuous filter settings */
1498
1499 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
1500 bp->group_prom = PI_FSTATE_K_BLOCK;
1501
1502 spin_lock_init(&bp->lock);
1503
1504 /* Reset and initialize adapter */
1505
1506 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST; /* skip self-test */
1507 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1508 {
1509 printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1510 free_irq(dev->irq, dev);
1511 return -EAGAIN;
1512 }
1513
1514 /* Set device structure info */
1515 netif_start_queue(dev);
1516 return 0;
1517 }
1518
1519
1520 /*
1521 * =============
1522 * = dfx_close =
1523 * =============
1524 *
1525 * Overview:
1526 * Closes the device/module.
1527 *
1528 * Returns:
1529 * Condition code
1530 *
1531 * Arguments:
1532 * dev - pointer to device information
1533 *
1534 * Functional Description:
1535 * This routine closes the adapter and brings it to a safe state.
1536 * The interrupt service routine is deregistered with the OS.
1537 * The adapter can be opened again with another call to dfx_open().
1538 *
1539 * Return Codes:
1540 * Always return 0.
1541 *
1542 * Assumptions:
1543 * No further requests for this adapter are made after this routine is
1544 * called. dfx_open() can be called to reset and reinitialize the
1545 * adapter.
1546 *
1547 * Side Effects:
1548 * Adapter should be in DMA_UNAVAILABLE state upon completion of this
1549 * routine.
1550 */
1551
dfx_close(struct net_device * dev)1552 static int dfx_close(struct net_device *dev)
1553 {
1554 DFX_board_t *bp = netdev_priv(dev);
1555
1556 DBG_printk("In dfx_close...\n");
1557
1558 /* Disable PDQ interrupts first */
1559
1560 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1561
1562 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1563
1564 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1565
1566 /*
1567 * Flush any pending transmit buffers
1568 *
1569 * Note: It's important that we flush the transmit buffers
1570 * BEFORE we clear our copy of the Type 2 register.
1571 * Otherwise, we'll have no idea how many buffers
1572 * we need to free.
1573 */
1574
1575 dfx_xmt_flush(bp);
1576
1577 /*
1578 * Clear Type 1 and Type 2 registers after adapter reset
1579 *
1580 * Note: Even though we're closing the adapter, it's
1581 * possible that an interrupt will occur after
1582 * dfx_close is called. Without some assurance to
1583 * the contrary we want to make sure that we don't
1584 * process receive and transmit LLC frames and update
1585 * the Type 2 register with bad information.
1586 */
1587
1588 bp->cmd_req_reg.lword = 0;
1589 bp->cmd_rsp_reg.lword = 0;
1590 bp->rcv_xmt_reg.lword = 0;
1591
1592 /* Clear consumer block for the same reason given above */
1593
1594 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1595
1596 /* Release all dynamically allocate skb in the receive ring. */
1597
1598 dfx_rcv_flush(bp);
1599
1600 /* Clear device structure flags */
1601
1602 netif_stop_queue(dev);
1603
1604 /* Deregister (free) IRQ */
1605
1606 free_irq(dev->irq, dev);
1607
1608 return 0;
1609 }
1610
1611
1612 /*
1613 * ======================
1614 * = dfx_int_pr_halt_id =
1615 * ======================
1616 *
1617 * Overview:
1618 * Displays halt id's in string form.
1619 *
1620 * Returns:
1621 * None
1622 *
1623 * Arguments:
1624 * bp - pointer to board information
1625 *
1626 * Functional Description:
1627 * Determine current halt id and display appropriate string.
1628 *
1629 * Return Codes:
1630 * None
1631 *
1632 * Assumptions:
1633 * None
1634 *
1635 * Side Effects:
1636 * None
1637 */
1638
dfx_int_pr_halt_id(DFX_board_t * bp)1639 static void dfx_int_pr_halt_id(DFX_board_t *bp)
1640 {
1641 PI_UINT32 port_status; /* PDQ port status register value */
1642 PI_UINT32 halt_id; /* PDQ port status halt ID */
1643
1644 /* Read the latest port status */
1645
1646 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1647
1648 /* Display halt state transition information */
1649
1650 halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1651 switch (halt_id)
1652 {
1653 case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1654 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1655 break;
1656
1657 case PI_HALT_ID_K_PARITY_ERROR:
1658 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1659 break;
1660
1661 case PI_HALT_ID_K_HOST_DIR_HALT:
1662 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1663 break;
1664
1665 case PI_HALT_ID_K_SW_FAULT:
1666 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1667 break;
1668
1669 case PI_HALT_ID_K_HW_FAULT:
1670 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1671 break;
1672
1673 case PI_HALT_ID_K_PC_TRACE:
1674 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1675 break;
1676
1677 case PI_HALT_ID_K_DMA_ERROR:
1678 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1679 break;
1680
1681 case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1682 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1683 break;
1684
1685 case PI_HALT_ID_K_BUS_EXCEPTION:
1686 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1687 break;
1688
1689 default:
1690 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1691 break;
1692 }
1693 }
1694
1695
1696 /*
1697 * ==========================
1698 * = dfx_int_type_0_process =
1699 * ==========================
1700 *
1701 * Overview:
1702 * Processes Type 0 interrupts.
1703 *
1704 * Returns:
1705 * None
1706 *
1707 * Arguments:
1708 * bp - pointer to board information
1709 *
1710 * Functional Description:
1711 * Processes all enabled Type 0 interrupts. If the reason for the interrupt
1712 * is a serious fault on the adapter, then an error message is displayed
1713 * and the adapter is reset.
1714 *
1715 * One tricky potential timing window is the rapid succession of "link avail"
1716 * "link unavail" state change interrupts. The acknowledgement of the Type 0
1717 * interrupt must be done before reading the state from the Port Status
1718 * register. This is true because a state change could occur after reading
1719 * the data, but before acknowledging the interrupt. If this state change
1720 * does happen, it would be lost because the driver is using the old state,
1721 * and it will never know about the new state because it subsequently
1722 * acknowledges the state change interrupt.
1723 *
1724 * INCORRECT CORRECT
1725 * read type 0 int reasons read type 0 int reasons
1726 * read adapter state ack type 0 interrupts
1727 * ack type 0 interrupts read adapter state
1728 * ... process interrupt ... ... process interrupt ...
1729 *
1730 * Return Codes:
1731 * None
1732 *
1733 * Assumptions:
1734 * None
1735 *
1736 * Side Effects:
1737 * An adapter reset may occur if the adapter has any Type 0 error interrupts
1738 * or if the port status indicates that the adapter is halted. The driver
1739 * is responsible for reinitializing the adapter with the current CAM
1740 * contents and adapter filter settings.
1741 */
1742
dfx_int_type_0_process(DFX_board_t * bp)1743 static void dfx_int_type_0_process(DFX_board_t *bp)
1744
1745 {
1746 PI_UINT32 type_0_status; /* Host Interrupt Type 0 register */
1747 PI_UINT32 state; /* current adap state (from port status) */
1748
1749 /*
1750 * Read host interrupt Type 0 register to determine which Type 0
1751 * interrupts are pending. Immediately write it back out to clear
1752 * those interrupts.
1753 */
1754
1755 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1756 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1757
1758 /* Check for Type 0 error interrupts */
1759
1760 if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1761 PI_TYPE_0_STAT_M_PM_PAR_ERR |
1762 PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1763 {
1764 /* Check for Non-Existent Memory error */
1765
1766 if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1767 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1768
1769 /* Check for Packet Memory Parity error */
1770
1771 if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1772 printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1773
1774 /* Check for Host Bus Parity error */
1775
1776 if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1777 printk("%s: Host Bus Parity Error\n", bp->dev->name);
1778
1779 /* Reset adapter and bring it back on-line */
1780
1781 bp->link_available = PI_K_FALSE; /* link is no longer available */
1782 bp->reset_type = 0; /* rerun on-board diagnostics */
1783 printk("%s: Resetting adapter...\n", bp->dev->name);
1784 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1785 {
1786 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1787 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1788 return;
1789 }
1790 printk("%s: Adapter reset successful!\n", bp->dev->name);
1791 return;
1792 }
1793
1794 /* Check for transmit flush interrupt */
1795
1796 if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1797 {
1798 /* Flush any pending xmt's and acknowledge the flush interrupt */
1799
1800 bp->link_available = PI_K_FALSE; /* link is no longer available */
1801 dfx_xmt_flush(bp); /* flush any outstanding packets */
1802 (void) dfx_hw_port_ctrl_req(bp,
1803 PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1804 0,
1805 0,
1806 NULL);
1807 }
1808
1809 /* Check for adapter state change */
1810
1811 if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1812 {
1813 /* Get latest adapter state */
1814
1815 state = dfx_hw_adap_state_rd(bp); /* get adapter state */
1816 if (state == PI_STATE_K_HALTED)
1817 {
1818 /*
1819 * Adapter has transitioned to HALTED state, try to reset
1820 * adapter to bring it back on-line. If reset fails,
1821 * leave the adapter in the broken state.
1822 */
1823
1824 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1825 dfx_int_pr_halt_id(bp); /* display halt id as string */
1826
1827 /* Reset adapter and bring it back on-line */
1828
1829 bp->link_available = PI_K_FALSE; /* link is no longer available */
1830 bp->reset_type = 0; /* rerun on-board diagnostics */
1831 printk("%s: Resetting adapter...\n", bp->dev->name);
1832 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1833 {
1834 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1835 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1836 return;
1837 }
1838 printk("%s: Adapter reset successful!\n", bp->dev->name);
1839 }
1840 else if (state == PI_STATE_K_LINK_AVAIL)
1841 {
1842 bp->link_available = PI_K_TRUE; /* set link available flag */
1843 }
1844 }
1845 }
1846
1847
1848 /*
1849 * ==================
1850 * = dfx_int_common =
1851 * ==================
1852 *
1853 * Overview:
1854 * Interrupt service routine (ISR)
1855 *
1856 * Returns:
1857 * None
1858 *
1859 * Arguments:
1860 * bp - pointer to board information
1861 *
1862 * Functional Description:
1863 * This is the ISR which processes incoming adapter interrupts.
1864 *
1865 * Return Codes:
1866 * None
1867 *
1868 * Assumptions:
1869 * This routine assumes PDQ interrupts have not been disabled.
1870 * When interrupts are disabled at the PDQ, the Port Status register
1871 * is automatically cleared. This routine uses the Port Status
1872 * register value to determine whether a Type 0 interrupt occurred,
1873 * so it's important that adapter interrupts are not normally
1874 * enabled/disabled at the PDQ.
1875 *
1876 * It's vital that this routine is NOT reentered for the
1877 * same board and that the OS is not in another section of
1878 * code (eg. dfx_xmt_queue_pkt) for the same board on a
1879 * different thread.
1880 *
1881 * Side Effects:
1882 * Pending interrupts are serviced. Depending on the type of
1883 * interrupt, acknowledging and clearing the interrupt at the
1884 * PDQ involves writing a register to clear the interrupt bit
1885 * or updating completion indices.
1886 */
1887
dfx_int_common(struct net_device * dev)1888 static void dfx_int_common(struct net_device *dev)
1889 {
1890 DFX_board_t *bp = netdev_priv(dev);
1891 PI_UINT32 port_status; /* Port Status register */
1892
1893 /* Process xmt interrupts - frequent case, so always call this routine */
1894
1895 if(dfx_xmt_done(bp)) /* free consumed xmt packets */
1896 netif_wake_queue(dev);
1897
1898 /* Process rcv interrupts - frequent case, so always call this routine */
1899
1900 dfx_rcv_queue_process(bp); /* service received LLC frames */
1901
1902 /*
1903 * Transmit and receive producer and completion indices are updated on the
1904 * adapter by writing to the Type 2 Producer register. Since the frequent
1905 * case is that we'll be processing either LLC transmit or receive buffers,
1906 * we'll optimize I/O writes by doing a single register write here.
1907 */
1908
1909 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1910
1911 /* Read PDQ Port Status register to find out which interrupts need processing */
1912
1913 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1914
1915 /* Process Type 0 interrupts (if any) - infrequent, so only call when needed */
1916
1917 if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1918 dfx_int_type_0_process(bp); /* process Type 0 interrupts */
1919 }
1920
1921
1922 /*
1923 * =================
1924 * = dfx_interrupt =
1925 * =================
1926 *
1927 * Overview:
1928 * Interrupt processing routine
1929 *
1930 * Returns:
1931 * Whether a valid interrupt was seen.
1932 *
1933 * Arguments:
1934 * irq - interrupt vector
1935 * dev_id - pointer to device information
1936 *
1937 * Functional Description:
1938 * This routine calls the interrupt processing routine for this adapter. It
1939 * disables and reenables adapter interrupts, as appropriate. We can support
1940 * shared interrupts since the incoming dev_id pointer provides our device
1941 * structure context.
1942 *
1943 * Return Codes:
1944 * IRQ_HANDLED - an IRQ was handled.
1945 * IRQ_NONE - no IRQ was handled.
1946 *
1947 * Assumptions:
1948 * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
1949 * on Intel-based systems) is done by the operating system outside this
1950 * routine.
1951 *
1952 * System interrupts are enabled through this call.
1953 *
1954 * Side Effects:
1955 * Interrupts are disabled, then reenabled at the adapter.
1956 */
1957
dfx_interrupt(int irq,void * dev_id)1958 static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1959 {
1960 struct net_device *dev = dev_id;
1961 DFX_board_t *bp = netdev_priv(dev);
1962 struct device *bdev = bp->bus_dev;
1963 int dfx_bus_pci = dev_is_pci(bdev);
1964 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1965 int dfx_bus_tc = DFX_BUS_TC(bdev);
1966
1967 /* Service adapter interrupts */
1968
1969 if (dfx_bus_pci) {
1970 u32 status;
1971
1972 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
1973 if (!(status & PFI_STATUS_M_PDQ_INT))
1974 return IRQ_NONE;
1975
1976 spin_lock(&bp->lock);
1977
1978 /* Disable PDQ-PFI interrupts at PFI */
1979 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1980 PFI_MODE_M_DMA_ENB);
1981
1982 /* Call interrupt service routine for this adapter */
1983 dfx_int_common(dev);
1984
1985 /* Clear PDQ interrupt status bit and reenable interrupts */
1986 dfx_port_write_long(bp, PFI_K_REG_STATUS,
1987 PFI_STATUS_M_PDQ_INT);
1988 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1989 (PFI_MODE_M_PDQ_INT_ENB |
1990 PFI_MODE_M_DMA_ENB));
1991
1992 spin_unlock(&bp->lock);
1993 }
1994 if (dfx_bus_eisa) {
1995 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1996 u8 status;
1997
1998 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1999 if (!(status & PI_CONFIG_STAT_0_M_PEND))
2000 return IRQ_NONE;
2001
2002 spin_lock(&bp->lock);
2003
2004 /* Disable interrupts at the ESIC */
2005 status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
2006 outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
2007
2008 /* Call interrupt service routine for this adapter */
2009 dfx_int_common(dev);
2010
2011 /* Reenable interrupts at the ESIC */
2012 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
2013 status |= PI_CONFIG_STAT_0_M_INT_ENB;
2014 outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
2015
2016 spin_unlock(&bp->lock);
2017 }
2018 if (dfx_bus_tc) {
2019 u32 status;
2020
2021 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
2022 if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
2023 PI_PSTATUS_M_XMT_DATA_PENDING |
2024 PI_PSTATUS_M_SMT_HOST_PENDING |
2025 PI_PSTATUS_M_UNSOL_PENDING |
2026 PI_PSTATUS_M_CMD_RSP_PENDING |
2027 PI_PSTATUS_M_CMD_REQ_PENDING |
2028 PI_PSTATUS_M_TYPE_0_PENDING)))
2029 return IRQ_NONE;
2030
2031 spin_lock(&bp->lock);
2032
2033 /* Call interrupt service routine for this adapter */
2034 dfx_int_common(dev);
2035
2036 spin_unlock(&bp->lock);
2037 }
2038
2039 return IRQ_HANDLED;
2040 }
2041
2042
2043 /*
2044 * =====================
2045 * = dfx_ctl_get_stats =
2046 * =====================
2047 *
2048 * Overview:
2049 * Get statistics for FDDI adapter
2050 *
2051 * Returns:
2052 * Pointer to FDDI statistics structure
2053 *
2054 * Arguments:
2055 * dev - pointer to device information
2056 *
2057 * Functional Description:
2058 * Gets current MIB objects from adapter, then
2059 * returns FDDI statistics structure as defined
2060 * in if_fddi.h.
2061 *
2062 * Note: Since the FDDI statistics structure is
2063 * still new and the device structure doesn't
2064 * have an FDDI-specific get statistics handler,
2065 * we'll return the FDDI statistics structure as
2066 * a pointer to an Ethernet statistics structure.
2067 * That way, at least the first part of the statistics
2068 * structure can be decoded properly, and it allows
2069 * "smart" applications to perform a second cast to
2070 * decode the FDDI-specific statistics.
2071 *
2072 * We'll have to pay attention to this routine as the
2073 * device structure becomes more mature and LAN media
2074 * independent.
2075 *
2076 * Return Codes:
2077 * None
2078 *
2079 * Assumptions:
2080 * None
2081 *
2082 * Side Effects:
2083 * None
2084 */
2085
dfx_ctl_get_stats(struct net_device * dev)2086 static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2087 {
2088 DFX_board_t *bp = netdev_priv(dev);
2089
2090 /* Fill the bp->stats structure with driver-maintained counters */
2091
2092 bp->stats.gen.rx_packets = bp->rcv_total_frames;
2093 bp->stats.gen.tx_packets = bp->xmt_total_frames;
2094 bp->stats.gen.rx_bytes = bp->rcv_total_bytes;
2095 bp->stats.gen.tx_bytes = bp->xmt_total_bytes;
2096 bp->stats.gen.rx_errors = bp->rcv_crc_errors +
2097 bp->rcv_frame_status_errors +
2098 bp->rcv_length_errors;
2099 bp->stats.gen.tx_errors = bp->xmt_length_errors;
2100 bp->stats.gen.rx_dropped = bp->rcv_discards;
2101 bp->stats.gen.tx_dropped = bp->xmt_discards;
2102 bp->stats.gen.multicast = bp->rcv_multicast_frames;
2103 bp->stats.gen.collisions = 0; /* always zero (0) for FDDI */
2104
2105 /* Get FDDI SMT MIB objects */
2106
2107 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2108 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2109 return (struct net_device_stats *)&bp->stats;
2110
2111 /* Fill the bp->stats structure with the SMT MIB object values */
2112
2113 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
2114 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
2115 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
2116 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
2117 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
2118 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
2119 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
2120 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
2121 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
2122 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
2123 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
2124 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
2125 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
2126 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
2127 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
2128 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
2129 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
2130 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
2131 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
2132 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
2133 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
2134 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
2135 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
2136 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
2137 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
2138 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
2139 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
2140 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
2141 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
2142 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
2143 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
2144 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
2145 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
2146 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
2147 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
2148 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
2149 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
2150 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
2151 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
2152 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
2153 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
2154 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
2155 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
2156 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
2157 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
2158 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
2159 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
2160 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
2161 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
2162 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
2163 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
2164 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
2165 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
2166 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
2167 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
2168 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
2169 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
2170 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
2171 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
2172 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
2173 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
2174 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
2175 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
2176 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
2177 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
2178 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
2179 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
2180 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
2181 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
2182 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
2183 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
2184 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
2185 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
2186 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
2187 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
2188 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
2189 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
2190 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
2191 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
2192 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
2193 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
2194 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
2195 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
2196 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
2197 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
2198 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
2199 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
2200 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
2201 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
2202 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
2203 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
2204 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
2205
2206 /* Get FDDI counters */
2207
2208 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2209 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2210 return (struct net_device_stats *)&bp->stats;
2211
2212 /* Fill the bp->stats structure with the FDDI counter values */
2213
2214 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
2215 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
2216 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
2217 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
2218 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
2219 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
2220 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
2221 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
2222 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
2223 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2224 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2225
2226 return (struct net_device_stats *)&bp->stats;
2227 }
2228
2229
2230 /*
2231 * ==============================
2232 * = dfx_ctl_set_multicast_list =
2233 * ==============================
2234 *
2235 * Overview:
2236 * Enable/Disable LLC frame promiscuous mode reception
2237 * on the adapter and/or update multicast address table.
2238 *
2239 * Returns:
2240 * None
2241 *
2242 * Arguments:
2243 * dev - pointer to device information
2244 *
2245 * Functional Description:
2246 * This routine follows a fairly simple algorithm for setting the
2247 * adapter filters and CAM:
2248 *
2249 * if IFF_PROMISC flag is set
2250 * enable LLC individual/group promiscuous mode
2251 * else
2252 * disable LLC individual/group promiscuous mode
2253 * if number of incoming multicast addresses >
2254 * (CAM max size - number of unicast addresses in CAM)
2255 * enable LLC group promiscuous mode
2256 * set driver-maintained multicast address count to zero
2257 * else
2258 * disable LLC group promiscuous mode
2259 * set driver-maintained multicast address count to incoming count
2260 * update adapter CAM
2261 * update adapter filters
2262 *
2263 * Return Codes:
2264 * None
2265 *
2266 * Assumptions:
2267 * Multicast addresses are presented in canonical (LSB) format.
2268 *
2269 * Side Effects:
2270 * On-board adapter CAM and filters are updated.
2271 */
2272
dfx_ctl_set_multicast_list(struct net_device * dev)2273 static void dfx_ctl_set_multicast_list(struct net_device *dev)
2274 {
2275 DFX_board_t *bp = netdev_priv(dev);
2276 int i; /* used as index in for loop */
2277 struct netdev_hw_addr *ha;
2278
2279 /* Enable LLC frame promiscuous mode, if necessary */
2280
2281 if (dev->flags & IFF_PROMISC)
2282 bp->ind_group_prom = PI_FSTATE_K_PASS; /* Enable LLC ind/group prom mode */
2283
2284 /* Else, update multicast address table */
2285
2286 else
2287 {
2288 bp->ind_group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC ind/group prom mode */
2289 /*
2290 * Check whether incoming multicast address count exceeds table size
2291 *
2292 * Note: The adapters utilize an on-board 64 entry CAM for
2293 * supporting perfect filtering of multicast packets
2294 * and bridge functions when adding unicast addresses.
2295 * There is no hash function available. To support
2296 * additional multicast addresses, the all multicast
2297 * filter (LLC group promiscuous mode) must be enabled.
2298 *
2299 * The firmware reserves two CAM entries for SMT-related
2300 * multicast addresses, which leaves 62 entries available.
2301 * The following code ensures that we're not being asked
2302 * to add more than 62 addresses to the CAM. If we are,
2303 * the driver will enable the all multicast filter.
2304 * Should the number of multicast addresses drop below
2305 * the high water mark, the filter will be disabled and
2306 * perfect filtering will be used.
2307 */
2308
2309 if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2310 {
2311 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
2312 bp->mc_count = 0; /* Don't add mc addrs to CAM */
2313 }
2314 else
2315 {
2316 bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */
2317 bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */
2318 }
2319
2320 /* Copy addresses to multicast address table, then update adapter CAM */
2321
2322 i = 0;
2323 netdev_for_each_mc_addr(ha, dev)
2324 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2325 ha->addr, FDDI_K_ALEN);
2326
2327 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2328 {
2329 DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2330 }
2331 else
2332 {
2333 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count);
2334 }
2335 }
2336
2337 /* Update adapter filters */
2338
2339 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2340 {
2341 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2342 }
2343 else
2344 {
2345 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2346 }
2347 }
2348
2349
2350 /*
2351 * ===========================
2352 * = dfx_ctl_set_mac_address =
2353 * ===========================
2354 *
2355 * Overview:
2356 * Add node address override (unicast address) to adapter
2357 * CAM and update dev_addr field in device table.
2358 *
2359 * Returns:
2360 * None
2361 *
2362 * Arguments:
2363 * dev - pointer to device information
2364 * addr - pointer to sockaddr structure containing unicast address to add
2365 *
2366 * Functional Description:
2367 * The adapter supports node address overrides by adding one or more
2368 * unicast addresses to the adapter CAM. This is similar to adding
2369 * multicast addresses. In this routine we'll update the driver and
2370 * device structures with the new address, then update the adapter CAM
2371 * to ensure that the adapter will copy and strip frames destined and
2372 * sourced by that address.
2373 *
2374 * Return Codes:
2375 * Always returns zero.
2376 *
2377 * Assumptions:
2378 * The address pointed to by addr->sa_data is a valid unicast
2379 * address and is presented in canonical (LSB) format.
2380 *
2381 * Side Effects:
2382 * On-board adapter CAM is updated. On-board adapter filters
2383 * may be updated.
2384 */
2385
dfx_ctl_set_mac_address(struct net_device * dev,void * addr)2386 static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2387 {
2388 struct sockaddr *p_sockaddr = (struct sockaddr *)addr;
2389 DFX_board_t *bp = netdev_priv(dev);
2390
2391 /* Copy unicast address to driver-maintained structs and update count */
2392
2393 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN); /* update device struct */
2394 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */
2395 bp->uc_count = 1;
2396
2397 /*
2398 * Verify we're not exceeding the CAM size by adding unicast address
2399 *
2400 * Note: It's possible that before entering this routine we've
2401 * already filled the CAM with 62 multicast addresses.
2402 * Since we need to place the node address override into
2403 * the CAM, we have to check to see that we're not
2404 * exceeding the CAM size. If we are, we have to enable
2405 * the LLC group (multicast) promiscuous mode filter as
2406 * in dfx_ctl_set_multicast_list.
2407 */
2408
2409 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2410 {
2411 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
2412 bp->mc_count = 0; /* Don't add mc addrs to CAM */
2413
2414 /* Update adapter filters */
2415
2416 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2417 {
2418 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2419 }
2420 else
2421 {
2422 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2423 }
2424 }
2425
2426 /* Update adapter CAM with new unicast address */
2427
2428 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2429 {
2430 DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2431 }
2432 else
2433 {
2434 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2435 }
2436 return 0; /* always return zero */
2437 }
2438
2439
2440 /*
2441 * ======================
2442 * = dfx_ctl_update_cam =
2443 * ======================
2444 *
2445 * Overview:
2446 * Procedure to update adapter CAM (Content Addressable Memory)
2447 * with desired unicast and multicast address entries.
2448 *
2449 * Returns:
2450 * Condition code
2451 *
2452 * Arguments:
2453 * bp - pointer to board information
2454 *
2455 * Functional Description:
2456 * Updates adapter CAM with current contents of board structure
2457 * unicast and multicast address tables. Since there are only 62
2458 * free entries in CAM, this routine ensures that the command
2459 * request buffer is not overrun.
2460 *
2461 * Return Codes:
2462 * DFX_K_SUCCESS - Request succeeded
2463 * DFX_K_FAILURE - Request failed
2464 *
2465 * Assumptions:
2466 * All addresses being added (unicast and multicast) are in canonical
2467 * order.
2468 *
2469 * Side Effects:
2470 * On-board adapter CAM is updated.
2471 */
2472
dfx_ctl_update_cam(DFX_board_t * bp)2473 static int dfx_ctl_update_cam(DFX_board_t *bp)
2474 {
2475 int i; /* used as index */
2476 PI_LAN_ADDR *p_addr; /* pointer to CAM entry */
2477
2478 /*
2479 * Fill in command request information
2480 *
2481 * Note: Even though both the unicast and multicast address
2482 * table entries are stored as contiguous 6 byte entries,
2483 * the firmware address filter set command expects each
2484 * entry to be two longwords (8 bytes total). We must be
2485 * careful to only copy the six bytes of each unicast and
2486 * multicast table entry into each command entry. This
2487 * is also why we must first clear the entire command
2488 * request buffer.
2489 */
2490
2491 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX); /* first clear buffer */
2492 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2493 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2494
2495 /* Now add unicast addresses to command request buffer, if any */
2496
2497 for (i=0; i < (int)bp->uc_count; i++)
2498 {
2499 if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2500 {
2501 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2502 p_addr++; /* point to next command entry */
2503 }
2504 }
2505
2506 /* Now add multicast addresses to command request buffer, if any */
2507
2508 for (i=0; i < (int)bp->mc_count; i++)
2509 {
2510 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2511 {
2512 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2513 p_addr++; /* point to next command entry */
2514 }
2515 }
2516
2517 /* Issue command to update adapter CAM, then return */
2518
2519 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2520 return DFX_K_FAILURE;
2521 return DFX_K_SUCCESS;
2522 }
2523
2524
2525 /*
2526 * ==========================
2527 * = dfx_ctl_update_filters =
2528 * ==========================
2529 *
2530 * Overview:
2531 * Procedure to update adapter filters with desired
2532 * filter settings.
2533 *
2534 * Returns:
2535 * Condition code
2536 *
2537 * Arguments:
2538 * bp - pointer to board information
2539 *
2540 * Functional Description:
2541 * Enables or disables filter using current filter settings.
2542 *
2543 * Return Codes:
2544 * DFX_K_SUCCESS - Request succeeded.
2545 * DFX_K_FAILURE - Request failed.
2546 *
2547 * Assumptions:
2548 * We must always pass up packets destined to the broadcast
2549 * address (FF-FF-FF-FF-FF-FF), so we'll always keep the
2550 * broadcast filter enabled.
2551 *
2552 * Side Effects:
2553 * On-board adapter filters are updated.
2554 */
2555
dfx_ctl_update_filters(DFX_board_t * bp)2556 static int dfx_ctl_update_filters(DFX_board_t *bp)
2557 {
2558 int i = 0; /* used as index */
2559
2560 /* Fill in command request information */
2561
2562 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2563
2564 /* Initialize Broadcast filter - * ALWAYS ENABLED * */
2565
2566 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST;
2567 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS;
2568
2569 /* Initialize LLC Individual/Group Promiscuous filter */
2570
2571 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM;
2572 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom;
2573
2574 /* Initialize LLC Group Promiscuous filter */
2575
2576 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM;
2577 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom;
2578
2579 /* Terminate the item code list */
2580
2581 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL;
2582
2583 /* Issue command to update adapter filters, then return */
2584
2585 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2586 return DFX_K_FAILURE;
2587 return DFX_K_SUCCESS;
2588 }
2589
2590
2591 /*
2592 * ======================
2593 * = dfx_hw_dma_cmd_req =
2594 * ======================
2595 *
2596 * Overview:
2597 * Sends PDQ DMA command to adapter firmware
2598 *
2599 * Returns:
2600 * Condition code
2601 *
2602 * Arguments:
2603 * bp - pointer to board information
2604 *
2605 * Functional Description:
2606 * The command request and response buffers are posted to the adapter in the manner
2607 * described in the PDQ Port Specification:
2608 *
2609 * 1. Command Response Buffer is posted to adapter.
2610 * 2. Command Request Buffer is posted to adapter.
2611 * 3. Command Request consumer index is polled until it indicates that request
2612 * buffer has been DMA'd to adapter.
2613 * 4. Command Response consumer index is polled until it indicates that response
2614 * buffer has been DMA'd from adapter.
2615 *
2616 * This ordering ensures that a response buffer is already available for the firmware
2617 * to use once it's done processing the request buffer.
2618 *
2619 * Return Codes:
2620 * DFX_K_SUCCESS - DMA command succeeded
2621 * DFX_K_OUTSTATE - Adapter is NOT in proper state
2622 * DFX_K_HW_TIMEOUT - DMA command timed out
2623 *
2624 * Assumptions:
2625 * Command request buffer has already been filled with desired DMA command.
2626 *
2627 * Side Effects:
2628 * None
2629 */
2630
dfx_hw_dma_cmd_req(DFX_board_t * bp)2631 static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2632 {
2633 int status; /* adapter status */
2634 int timeout_cnt; /* used in for loops */
2635
2636 /* Make sure the adapter is in a state that we can issue the DMA command in */
2637
2638 status = dfx_hw_adap_state_rd(bp);
2639 if ((status == PI_STATE_K_RESET) ||
2640 (status == PI_STATE_K_HALTED) ||
2641 (status == PI_STATE_K_DMA_UNAVAIL) ||
2642 (status == PI_STATE_K_UPGRADE))
2643 return DFX_K_OUTSTATE;
2644
2645 /* Put response buffer on the command response queue */
2646
2647 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2648 ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2649 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2650
2651 /* Bump (and wrap) the producer index and write out to register */
2652
2653 bp->cmd_rsp_reg.index.prod += 1;
2654 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2655 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2656
2657 /* Put request buffer on the command request queue */
2658
2659 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2660 PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2661 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2662
2663 /* Bump (and wrap) the producer index and write out to register */
2664
2665 bp->cmd_req_reg.index.prod += 1;
2666 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2667 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2668
2669 /*
2670 * Here we wait for the command request consumer index to be equal
2671 * to the producer, indicating that the adapter has DMAed the request.
2672 */
2673
2674 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2675 {
2676 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2677 break;
2678 udelay(100); /* wait for 100 microseconds */
2679 }
2680 if (timeout_cnt == 0)
2681 return DFX_K_HW_TIMEOUT;
2682
2683 /* Bump (and wrap) the completion index and write out to register */
2684
2685 bp->cmd_req_reg.index.comp += 1;
2686 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2687 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2688
2689 /*
2690 * Here we wait for the command response consumer index to be equal
2691 * to the producer, indicating that the adapter has DMAed the response.
2692 */
2693
2694 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2695 {
2696 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2697 break;
2698 udelay(100); /* wait for 100 microseconds */
2699 }
2700 if (timeout_cnt == 0)
2701 return DFX_K_HW_TIMEOUT;
2702
2703 /* Bump (and wrap) the completion index and write out to register */
2704
2705 bp->cmd_rsp_reg.index.comp += 1;
2706 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2707 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2708 return DFX_K_SUCCESS;
2709 }
2710
2711
2712 /*
2713 * ========================
2714 * = dfx_hw_port_ctrl_req =
2715 * ========================
2716 *
2717 * Overview:
2718 * Sends PDQ port control command to adapter firmware
2719 *
2720 * Returns:
2721 * Host data register value in host_data if ptr is not NULL
2722 *
2723 * Arguments:
2724 * bp - pointer to board information
2725 * command - port control command
2726 * data_a - port data A register value
2727 * data_b - port data B register value
2728 * host_data - ptr to host data register value
2729 *
2730 * Functional Description:
2731 * Send generic port control command to adapter by writing
2732 * to various PDQ port registers, then polling for completion.
2733 *
2734 * Return Codes:
2735 * DFX_K_SUCCESS - port control command succeeded
2736 * DFX_K_HW_TIMEOUT - port control command timed out
2737 *
2738 * Assumptions:
2739 * None
2740 *
2741 * Side Effects:
2742 * None
2743 */
2744
dfx_hw_port_ctrl_req(DFX_board_t * bp,PI_UINT32 command,PI_UINT32 data_a,PI_UINT32 data_b,PI_UINT32 * host_data)2745 static int dfx_hw_port_ctrl_req(
2746 DFX_board_t *bp,
2747 PI_UINT32 command,
2748 PI_UINT32 data_a,
2749 PI_UINT32 data_b,
2750 PI_UINT32 *host_data
2751 )
2752
2753 {
2754 PI_UINT32 port_cmd; /* Port Control command register value */
2755 int timeout_cnt; /* used in for loops */
2756
2757 /* Set Command Error bit in command longword */
2758
2759 port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2760
2761 /* Issue port command to the adapter */
2762
2763 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2764 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2765 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2766
2767 /* Now wait for command to complete */
2768
2769 if (command == PI_PCTRL_M_BLAST_FLASH)
2770 timeout_cnt = 600000; /* set command timeout count to 60 seconds */
2771 else
2772 timeout_cnt = 20000; /* set command timeout count to 2 seconds */
2773
2774 for (; timeout_cnt > 0; timeout_cnt--)
2775 {
2776 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2777 if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2778 break;
2779 udelay(100); /* wait for 100 microseconds */
2780 }
2781 if (timeout_cnt == 0)
2782 return DFX_K_HW_TIMEOUT;
2783
2784 /*
2785 * If the address of host_data is non-zero, assume caller has supplied a
2786 * non NULL pointer, and return the contents of the HOST_DATA register in
2787 * it.
2788 */
2789
2790 if (host_data != NULL)
2791 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2792 return DFX_K_SUCCESS;
2793 }
2794
2795
2796 /*
2797 * =====================
2798 * = dfx_hw_adap_reset =
2799 * =====================
2800 *
2801 * Overview:
2802 * Resets adapter
2803 *
2804 * Returns:
2805 * None
2806 *
2807 * Arguments:
2808 * bp - pointer to board information
2809 * type - type of reset to perform
2810 *
2811 * Functional Description:
2812 * Issue soft reset to adapter by writing to PDQ Port Reset
2813 * register. Use incoming reset type to tell adapter what
2814 * kind of reset operation to perform.
2815 *
2816 * Return Codes:
2817 * None
2818 *
2819 * Assumptions:
2820 * This routine merely issues a soft reset to the adapter.
2821 * It is expected that after this routine returns, the caller
2822 * will appropriately poll the Port Status register for the
2823 * adapter to enter the proper state.
2824 *
2825 * Side Effects:
2826 * Internal adapter registers are cleared.
2827 */
2828
dfx_hw_adap_reset(DFX_board_t * bp,PI_UINT32 type)2829 static void dfx_hw_adap_reset(
2830 DFX_board_t *bp,
2831 PI_UINT32 type
2832 )
2833
2834 {
2835 /* Set Reset type and assert reset */
2836
2837 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type); /* tell adapter type of reset */
2838 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2839
2840 /* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */
2841
2842 udelay(20);
2843
2844 /* Deassert reset */
2845
2846 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2847 }
2848
2849
2850 /*
2851 * ========================
2852 * = dfx_hw_adap_state_rd =
2853 * ========================
2854 *
2855 * Overview:
2856 * Returns current adapter state
2857 *
2858 * Returns:
2859 * Adapter state per PDQ Port Specification
2860 *
2861 * Arguments:
2862 * bp - pointer to board information
2863 *
2864 * Functional Description:
2865 * Reads PDQ Port Status register and returns adapter state.
2866 *
2867 * Return Codes:
2868 * None
2869 *
2870 * Assumptions:
2871 * None
2872 *
2873 * Side Effects:
2874 * None
2875 */
2876
dfx_hw_adap_state_rd(DFX_board_t * bp)2877 static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2878 {
2879 PI_UINT32 port_status; /* Port Status register value */
2880
2881 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2882 return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2883 }
2884
2885
2886 /*
2887 * =====================
2888 * = dfx_hw_dma_uninit =
2889 * =====================
2890 *
2891 * Overview:
2892 * Brings adapter to DMA_UNAVAILABLE state
2893 *
2894 * Returns:
2895 * Condition code
2896 *
2897 * Arguments:
2898 * bp - pointer to board information
2899 * type - type of reset to perform
2900 *
2901 * Functional Description:
2902 * Bring adapter to DMA_UNAVAILABLE state by performing the following:
2903 * 1. Set reset type bit in Port Data A Register then reset adapter.
2904 * 2. Check that adapter is in DMA_UNAVAILABLE state.
2905 *
2906 * Return Codes:
2907 * DFX_K_SUCCESS - adapter is in DMA_UNAVAILABLE state
2908 * DFX_K_HW_TIMEOUT - adapter did not reset properly
2909 *
2910 * Assumptions:
2911 * None
2912 *
2913 * Side Effects:
2914 * Internal adapter registers are cleared.
2915 */
2916
dfx_hw_dma_uninit(DFX_board_t * bp,PI_UINT32 type)2917 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2918 {
2919 int timeout_cnt; /* used in for loops */
2920
2921 /* Set reset type bit and reset adapter */
2922
2923 dfx_hw_adap_reset(bp, type);
2924
2925 /* Now wait for adapter to enter DMA_UNAVAILABLE state */
2926
2927 for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2928 {
2929 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2930 break;
2931 udelay(100); /* wait for 100 microseconds */
2932 }
2933 if (timeout_cnt == 0)
2934 return DFX_K_HW_TIMEOUT;
2935 return DFX_K_SUCCESS;
2936 }
2937
2938 /*
2939 * Align an sk_buff to a boundary power of 2
2940 *
2941 */
2942 #ifdef DYNAMIC_BUFFERS
my_skb_align(struct sk_buff * skb,int n)2943 static void my_skb_align(struct sk_buff *skb, int n)
2944 {
2945 unsigned long x = (unsigned long)skb->data;
2946 unsigned long v;
2947
2948 v = ALIGN(x, n); /* Where we want to be */
2949
2950 skb_reserve(skb, v - x);
2951 }
2952 #endif
2953
2954 /*
2955 * ================
2956 * = dfx_rcv_init =
2957 * ================
2958 *
2959 * Overview:
2960 * Produces buffers to adapter LLC Host receive descriptor block
2961 *
2962 * Returns:
2963 * None
2964 *
2965 * Arguments:
2966 * bp - pointer to board information
2967 * get_buffers - non-zero if buffers to be allocated
2968 *
2969 * Functional Description:
2970 * This routine can be called during dfx_adap_init() or during an adapter
2971 * reset. It initializes the descriptor block and produces all allocated
2972 * LLC Host queue receive buffers.
2973 *
2974 * Return Codes:
2975 * Return 0 on success or -ENOMEM if buffer allocation failed (when using
2976 * dynamic buffer allocation). If the buffer allocation failed, the
2977 * already allocated buffers will not be released and the caller should do
2978 * this.
2979 *
2980 * Assumptions:
2981 * The PDQ has been reset and the adapter and driver maintained Type 2
2982 * register indices are cleared.
2983 *
2984 * Side Effects:
2985 * Receive buffers are posted to the adapter LLC queue and the adapter
2986 * is notified.
2987 */
2988
dfx_rcv_init(DFX_board_t * bp,int get_buffers)2989 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2990 {
2991 int i, j; /* used in for loop */
2992
2993 /*
2994 * Since each receive buffer is a single fragment of same length, initialize
2995 * first longword in each receive descriptor for entire LLC Host descriptor
2996 * block. Also initialize second longword in each receive descriptor with
2997 * physical address of receive buffer. We'll always allocate receive
2998 * buffers in powers of 2 so that we can easily fill the 256 entry descriptor
2999 * block and produce new receive buffers by simply updating the receive
3000 * producer index.
3001 *
3002 * Assumptions:
3003 * To support all shipping versions of PDQ, the receive buffer size
3004 * must be mod 128 in length and the physical address must be 128 byte
3005 * aligned. In other words, bits 0-6 of the length and address must
3006 * be zero for the following descriptor field entries to be correct on
3007 * all PDQ-based boards. We guaranteed both requirements during
3008 * driver initialization when we allocated memory for the receive buffers.
3009 */
3010
3011 if (get_buffers) {
3012 #ifdef DYNAMIC_BUFFERS
3013 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3014 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3015 {
3016 struct sk_buff *newskb;
3017 dma_addr_t dma_addr;
3018
3019 newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE,
3020 GFP_NOIO);
3021 if (!newskb)
3022 return -ENOMEM;
3023 /*
3024 * align to 128 bytes for compatibility with
3025 * the old EISA boards.
3026 */
3027
3028 my_skb_align(newskb, 128);
3029 dma_addr = dma_map_single(bp->bus_dev,
3030 newskb->data,
3031 PI_RCV_DATA_K_SIZE_MAX,
3032 DMA_FROM_DEVICE);
3033 if (dma_mapping_error(bp->bus_dev, dma_addr)) {
3034 dev_kfree_skb(newskb);
3035 return -ENOMEM;
3036 }
3037 bp->descr_block_virt->rcv_data[i + j].long_0 =
3038 (u32)(PI_RCV_DESCR_M_SOP |
3039 ((PI_RCV_DATA_K_SIZE_MAX /
3040 PI_ALIGN_K_RCV_DATA_BUFF) <<
3041 PI_RCV_DESCR_V_SEG_LEN));
3042 bp->descr_block_virt->rcv_data[i + j].long_1 =
3043 (u32)dma_addr;
3044
3045 /*
3046 * p_rcv_buff_va is only used inside the
3047 * kernel so we put the skb pointer here.
3048 */
3049 bp->p_rcv_buff_va[i+j] = (char *) newskb;
3050 }
3051 #else
3052 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
3053 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3054 {
3055 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
3056 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
3057 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
3058 bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
3059 }
3060 #endif
3061 }
3062
3063 /* Update receive producer and Type 2 register */
3064
3065 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
3066 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3067 return 0;
3068 }
3069
3070
3071 /*
3072 * =========================
3073 * = dfx_rcv_queue_process =
3074 * =========================
3075 *
3076 * Overview:
3077 * Process received LLC frames.
3078 *
3079 * Returns:
3080 * None
3081 *
3082 * Arguments:
3083 * bp - pointer to board information
3084 *
3085 * Functional Description:
3086 * Received LLC frames are processed until there are no more consumed frames.
3087 * Once all frames are processed, the receive buffers are returned to the
3088 * adapter. Note that this algorithm fixes the length of time that can be spent
3089 * in this routine, because there are a fixed number of receive buffers to
3090 * process and buffers are not produced until this routine exits and returns
3091 * to the ISR.
3092 *
3093 * Return Codes:
3094 * None
3095 *
3096 * Assumptions:
3097 * None
3098 *
3099 * Side Effects:
3100 * None
3101 */
3102
dfx_rcv_queue_process(DFX_board_t * bp)3103 static void dfx_rcv_queue_process(
3104 DFX_board_t *bp
3105 )
3106
3107 {
3108 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
3109 char *p_buff; /* ptr to start of packet receive buffer (FMC descriptor) */
3110 u32 descr, pkt_len; /* FMC descriptor field and packet length */
3111 struct sk_buff *skb = NULL; /* pointer to a sk_buff to hold incoming packet data */
3112
3113 /* Service all consumed LLC receive frames */
3114
3115 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3116 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
3117 {
3118 /* Process any errors */
3119 dma_addr_t dma_addr;
3120 int entry;
3121
3122 entry = bp->rcv_xmt_reg.index.rcv_comp;
3123 #ifdef DYNAMIC_BUFFERS
3124 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3125 #else
3126 p_buff = bp->p_rcv_buff_va[entry];
3127 #endif
3128 dma_addr = bp->descr_block_virt->rcv_data[entry].long_1;
3129 dma_sync_single_for_cpu(bp->bus_dev,
3130 dma_addr + RCV_BUFF_K_DESCR,
3131 sizeof(u32),
3132 DMA_FROM_DEVICE);
3133 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3134
3135 if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
3136 {
3137 if (descr & PI_FMC_DESCR_M_RCC_CRC)
3138 bp->rcv_crc_errors++;
3139 else
3140 bp->rcv_frame_status_errors++;
3141 }
3142 else
3143 {
3144 int rx_in_place = 0;
3145
3146 /* The frame was received without errors - verify packet length */
3147
3148 pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
3149 pkt_len -= 4; /* subtract 4 byte CRC */
3150 if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3151 bp->rcv_length_errors++;
3152 else{
3153 #ifdef DYNAMIC_BUFFERS
3154 struct sk_buff *newskb = NULL;
3155
3156 if (pkt_len > SKBUFF_RX_COPYBREAK) {
3157 dma_addr_t new_dma_addr;
3158
3159 newskb = netdev_alloc_skb(bp->dev,
3160 NEW_SKB_SIZE);
3161 if (newskb){
3162 my_skb_align(newskb, 128);
3163 new_dma_addr = dma_map_single(
3164 bp->bus_dev,
3165 newskb->data,
3166 PI_RCV_DATA_K_SIZE_MAX,
3167 DMA_FROM_DEVICE);
3168 if (dma_mapping_error(
3169 bp->bus_dev,
3170 new_dma_addr)) {
3171 dev_kfree_skb(newskb);
3172 newskb = NULL;
3173 }
3174 }
3175 if (newskb) {
3176 rx_in_place = 1;
3177
3178 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
3179 dma_unmap_single(bp->bus_dev,
3180 dma_addr,
3181 PI_RCV_DATA_K_SIZE_MAX,
3182 DMA_FROM_DEVICE);
3183 skb_reserve(skb, RCV_BUFF_K_PADDING);
3184 bp->p_rcv_buff_va[entry] = (char *)newskb;
3185 bp->descr_block_virt->rcv_data[entry].long_1 = (u32)new_dma_addr;
3186 }
3187 }
3188 if (!newskb)
3189 #endif
3190 /* Alloc new buffer to pass up,
3191 * add room for PRH. */
3192 skb = netdev_alloc_skb(bp->dev,
3193 pkt_len + 3);
3194 if (skb == NULL)
3195 {
3196 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name);
3197 bp->rcv_discards++;
3198 break;
3199 }
3200 else {
3201 if (!rx_in_place) {
3202 /* Receive buffer allocated, pass receive packet up */
3203 dma_sync_single_for_cpu(
3204 bp->bus_dev,
3205 dma_addr +
3206 RCV_BUFF_K_PADDING,
3207 pkt_len + 3,
3208 DMA_FROM_DEVICE);
3209
3210 skb_copy_to_linear_data(skb,
3211 p_buff + RCV_BUFF_K_PADDING,
3212 pkt_len + 3);
3213 }
3214
3215 skb_reserve(skb,3); /* adjust data field so that it points to FC byte */
3216 skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */
3217 skb->protocol = fddi_type_trans(skb, bp->dev);
3218 bp->rcv_total_bytes += skb->len;
3219 netif_rx(skb);
3220
3221 /* Update the rcv counters */
3222 bp->rcv_total_frames++;
3223 if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3224 bp->rcv_multicast_frames++;
3225 }
3226 }
3227 }
3228
3229 /*
3230 * Advance the producer (for recycling) and advance the completion
3231 * (for servicing received frames). Note that it is okay to
3232 * advance the producer without checking that it passes the
3233 * completion index because they are both advanced at the same
3234 * rate.
3235 */
3236
3237 bp->rcv_xmt_reg.index.rcv_prod += 1;
3238 bp->rcv_xmt_reg.index.rcv_comp += 1;
3239 }
3240 }
3241
3242
3243 /*
3244 * =====================
3245 * = dfx_xmt_queue_pkt =
3246 * =====================
3247 *
3248 * Overview:
3249 * Queues packets for transmission
3250 *
3251 * Returns:
3252 * Condition code
3253 *
3254 * Arguments:
3255 * skb - pointer to sk_buff to queue for transmission
3256 * dev - pointer to device information
3257 *
3258 * Functional Description:
3259 * Here we assume that an incoming skb transmit request
3260 * is contained in a single physically contiguous buffer
3261 * in which the virtual address of the start of packet
3262 * (skb->data) can be converted to a physical address
3263 * by using pci_map_single().
3264 *
3265 * Since the adapter architecture requires a three byte
3266 * packet request header to prepend the start of packet,
3267 * we'll write the three byte field immediately prior to
3268 * the FC byte. This assumption is valid because we've
3269 * ensured that dev->hard_header_len includes three pad
3270 * bytes. By posting a single fragment to the adapter,
3271 * we'll reduce the number of descriptor fetches and
3272 * bus traffic needed to send the request.
3273 *
3274 * Also, we can't free the skb until after it's been DMA'd
3275 * out by the adapter, so we'll queue it in the driver and
3276 * return it in dfx_xmt_done.
3277 *
3278 * Return Codes:
3279 * 0 - driver queued packet, link is unavailable, or skbuff was bad
3280 * 1 - caller should requeue the sk_buff for later transmission
3281 *
3282 * Assumptions:
3283 * First and foremost, we assume the incoming skb pointer
3284 * is NOT NULL and is pointing to a valid sk_buff structure.
3285 *
3286 * The outgoing packet is complete, starting with the
3287 * frame control byte including the last byte of data,
3288 * but NOT including the 4 byte CRC. We'll let the
3289 * adapter hardware generate and append the CRC.
3290 *
3291 * The entire packet is stored in one physically
3292 * contiguous buffer which is not cached and whose
3293 * 32-bit physical address can be determined.
3294 *
3295 * It's vital that this routine is NOT reentered for the
3296 * same board and that the OS is not in another section of
3297 * code (eg. dfx_int_common) for the same board on a
3298 * different thread.
3299 *
3300 * Side Effects:
3301 * None
3302 */
3303
dfx_xmt_queue_pkt(struct sk_buff * skb,struct net_device * dev)3304 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
3305 struct net_device *dev)
3306 {
3307 DFX_board_t *bp = netdev_priv(dev);
3308 u8 prod; /* local transmit producer index */
3309 PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */
3310 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3311 dma_addr_t dma_addr;
3312 unsigned long flags;
3313
3314 netif_stop_queue(dev);
3315
3316 /*
3317 * Verify that incoming transmit request is OK
3318 *
3319 * Note: The packet size check is consistent with other
3320 * Linux device drivers, although the correct packet
3321 * size should be verified before calling the
3322 * transmit routine.
3323 */
3324
3325 if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3326 {
3327 printk("%s: Invalid packet length - %u bytes\n",
3328 dev->name, skb->len);
3329 bp->xmt_length_errors++; /* bump error counter */
3330 netif_wake_queue(dev);
3331 dev_kfree_skb(skb);
3332 return NETDEV_TX_OK; /* return "success" */
3333 }
3334 /*
3335 * See if adapter link is available, if not, free buffer
3336 *
3337 * Note: If the link isn't available, free buffer and return 0
3338 * rather than tell the upper layer to requeue the packet.
3339 * The methodology here is that by the time the link
3340 * becomes available, the packet to be sent will be
3341 * fairly stale. By simply dropping the packet, the
3342 * higher layer protocols will eventually time out
3343 * waiting for response packets which it won't receive.
3344 */
3345
3346 if (bp->link_available == PI_K_FALSE)
3347 {
3348 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL) /* is link really available? */
3349 bp->link_available = PI_K_TRUE; /* if so, set flag and continue */
3350 else
3351 {
3352 bp->xmt_discards++; /* bump error counter */
3353 dev_kfree_skb(skb); /* free sk_buff now */
3354 netif_wake_queue(dev);
3355 return NETDEV_TX_OK; /* return "success" */
3356 }
3357 }
3358
3359 /* Write the three PRH bytes immediately before the FC byte */
3360
3361 skb_push(skb, 3);
3362 skb->data[0] = DFX_PRH0_BYTE; /* these byte values are defined */
3363 skb->data[1] = DFX_PRH1_BYTE; /* in the Motorola FDDI MAC chip */
3364 skb->data[2] = DFX_PRH2_BYTE; /* specification */
3365
3366 dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len,
3367 DMA_TO_DEVICE);
3368 if (dma_mapping_error(bp->bus_dev, dma_addr)) {
3369 skb_pull(skb, 3);
3370 return NETDEV_TX_BUSY;
3371 }
3372
3373 spin_lock_irqsave(&bp->lock, flags);
3374
3375 /* Get the current producer and the next free xmt data descriptor */
3376
3377 prod = bp->rcv_xmt_reg.index.xmt_prod;
3378 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3379
3380 /*
3381 * Get pointer to auxiliary queue entry to contain information
3382 * for this packet.
3383 *
3384 * Note: The current xmt producer index will become the
3385 * current xmt completion index when we complete this
3386 * packet later on. So, we'll get the pointer to the
3387 * next auxiliary queue entry now before we bump the
3388 * producer index.
3389 */
3390
3391 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]); /* also bump producer index */
3392
3393 /*
3394 * Write the descriptor with buffer info and bump producer
3395 *
3396 * Note: Since we need to start DMA from the packet request
3397 * header, we'll add 3 bytes to the DMA buffer length,
3398 * and we'll determine the physical address of the
3399 * buffer from the PRH, not skb->data.
3400 *
3401 * Assumptions:
3402 * 1. Packet starts with the frame control (FC) byte
3403 * at skb->data.
3404 * 2. The 4-byte CRC is not appended to the buffer or
3405 * included in the length.
3406 * 3. Packet length (skb->len) is from FC to end of
3407 * data, inclusive.
3408 * 4. The packet length does not exceed the maximum
3409 * FDDI LLC frame length of 4491 bytes.
3410 * 5. The entire packet is contained in a physically
3411 * contiguous, non-cached, locked memory space
3412 * comprised of a single buffer pointed to by
3413 * skb->data.
3414 * 6. The physical address of the start of packet
3415 * can be determined from the virtual address
3416 * by using pci_map_single() and is only 32-bits
3417 * wide.
3418 */
3419
3420 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3421 p_xmt_descr->long_1 = (u32)dma_addr;
3422
3423 /*
3424 * Verify that descriptor is actually available
3425 *
3426 * Note: If descriptor isn't available, return 1 which tells
3427 * the upper layer to requeue the packet for later
3428 * transmission.
3429 *
3430 * We need to ensure that the producer never reaches the
3431 * completion, except to indicate that the queue is empty.
3432 */
3433
3434 if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3435 {
3436 skb_pull(skb,3);
3437 spin_unlock_irqrestore(&bp->lock, flags);
3438 return NETDEV_TX_BUSY; /* requeue packet for later */
3439 }
3440
3441 /*
3442 * Save info for this packet for xmt done indication routine
3443 *
3444 * Normally, we'd save the producer index in the p_xmt_drv_descr
3445 * structure so that we'd have it handy when we complete this
3446 * packet later (in dfx_xmt_done). However, since the current
3447 * transmit architecture guarantees a single fragment for the
3448 * entire packet, we can simply bump the completion index by
3449 * one (1) for each completed packet.
3450 *
3451 * Note: If this assumption changes and we're presented with
3452 * an inconsistent number of transmit fragments for packet
3453 * data, we'll need to modify this code to save the current
3454 * transmit producer index.
3455 */
3456
3457 p_xmt_drv_descr->p_skb = skb;
3458
3459 /* Update Type 2 register */
3460
3461 bp->rcv_xmt_reg.index.xmt_prod = prod;
3462 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3463 spin_unlock_irqrestore(&bp->lock, flags);
3464 netif_wake_queue(dev);
3465 return NETDEV_TX_OK; /* packet queued to adapter */
3466 }
3467
3468
3469 /*
3470 * ================
3471 * = dfx_xmt_done =
3472 * ================
3473 *
3474 * Overview:
3475 * Processes all frames that have been transmitted.
3476 *
3477 * Returns:
3478 * None
3479 *
3480 * Arguments:
3481 * bp - pointer to board information
3482 *
3483 * Functional Description:
3484 * For all consumed transmit descriptors that have not
3485 * yet been completed, we'll free the skb we were holding
3486 * onto using dev_kfree_skb and bump the appropriate
3487 * counters.
3488 *
3489 * Return Codes:
3490 * None
3491 *
3492 * Assumptions:
3493 * The Type 2 register is not updated in this routine. It is
3494 * assumed that it will be updated in the ISR when dfx_xmt_done
3495 * returns.
3496 *
3497 * Side Effects:
3498 * None
3499 */
3500
dfx_xmt_done(DFX_board_t * bp)3501 static int dfx_xmt_done(DFX_board_t *bp)
3502 {
3503 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3504 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
3505 u8 comp; /* local transmit completion index */
3506 int freed = 0; /* buffers freed */
3507
3508 /* Service all consumed transmit frames */
3509
3510 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3511 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3512 {
3513 /* Get pointer to the transmit driver descriptor block information */
3514
3515 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3516
3517 /* Increment transmit counters */
3518
3519 bp->xmt_total_frames++;
3520 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3521
3522 /* Return skb to operating system */
3523 comp = bp->rcv_xmt_reg.index.xmt_comp;
3524 dma_unmap_single(bp->bus_dev,
3525 bp->descr_block_virt->xmt_data[comp].long_1,
3526 p_xmt_drv_descr->p_skb->len,
3527 DMA_TO_DEVICE);
3528 dev_consume_skb_irq(p_xmt_drv_descr->p_skb);
3529
3530 /*
3531 * Move to start of next packet by updating completion index
3532 *
3533 * Here we assume that a transmit packet request is always
3534 * serviced by posting one fragment. We can therefore
3535 * simplify the completion code by incrementing the
3536 * completion index by one. This code will need to be
3537 * modified if this assumption changes. See comments
3538 * in dfx_xmt_queue_pkt for more details.
3539 */
3540
3541 bp->rcv_xmt_reg.index.xmt_comp += 1;
3542 freed++;
3543 }
3544 return freed;
3545 }
3546
3547
3548 /*
3549 * =================
3550 * = dfx_rcv_flush =
3551 * =================
3552 *
3553 * Overview:
3554 * Remove all skb's in the receive ring.
3555 *
3556 * Returns:
3557 * None
3558 *
3559 * Arguments:
3560 * bp - pointer to board information
3561 *
3562 * Functional Description:
3563 * Free's all the dynamically allocated skb's that are
3564 * currently attached to the device receive ring. This
3565 * function is typically only used when the device is
3566 * initialized or reinitialized.
3567 *
3568 * Return Codes:
3569 * None
3570 *
3571 * Side Effects:
3572 * None
3573 */
3574 #ifdef DYNAMIC_BUFFERS
dfx_rcv_flush(DFX_board_t * bp)3575 static void dfx_rcv_flush( DFX_board_t *bp )
3576 {
3577 int i, j;
3578
3579 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3580 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3581 {
3582 struct sk_buff *skb;
3583 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3584 if (skb) {
3585 dma_unmap_single(bp->bus_dev,
3586 bp->descr_block_virt->rcv_data[i+j].long_1,
3587 PI_RCV_DATA_K_SIZE_MAX,
3588 DMA_FROM_DEVICE);
3589 dev_kfree_skb(skb);
3590 }
3591 bp->p_rcv_buff_va[i+j] = NULL;
3592 }
3593
3594 }
3595 #endif /* DYNAMIC_BUFFERS */
3596
3597 /*
3598 * =================
3599 * = dfx_xmt_flush =
3600 * =================
3601 *
3602 * Overview:
3603 * Processes all frames whether they've been transmitted
3604 * or not.
3605 *
3606 * Returns:
3607 * None
3608 *
3609 * Arguments:
3610 * bp - pointer to board information
3611 *
3612 * Functional Description:
3613 * For all produced transmit descriptors that have not
3614 * yet been completed, we'll free the skb we were holding
3615 * onto using dev_kfree_skb and bump the appropriate
3616 * counters. Of course, it's possible that some of
3617 * these transmit requests actually did go out, but we
3618 * won't make that distinction here. Finally, we'll
3619 * update the consumer index to match the producer.
3620 *
3621 * Return Codes:
3622 * None
3623 *
3624 * Assumptions:
3625 * This routine does NOT update the Type 2 register. It
3626 * is assumed that this routine is being called during a
3627 * transmit flush interrupt, or a shutdown or close routine.
3628 *
3629 * Side Effects:
3630 * None
3631 */
3632
dfx_xmt_flush(DFX_board_t * bp)3633 static void dfx_xmt_flush( DFX_board_t *bp )
3634 {
3635 u32 prod_cons; /* rcv/xmt consumer block longword */
3636 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3637 u8 comp; /* local transmit completion index */
3638
3639 /* Flush all outstanding transmit frames */
3640
3641 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3642 {
3643 /* Get pointer to the transmit driver descriptor block information */
3644
3645 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3646
3647 /* Return skb to operating system */
3648 comp = bp->rcv_xmt_reg.index.xmt_comp;
3649 dma_unmap_single(bp->bus_dev,
3650 bp->descr_block_virt->xmt_data[comp].long_1,
3651 p_xmt_drv_descr->p_skb->len,
3652 DMA_TO_DEVICE);
3653 dev_kfree_skb(p_xmt_drv_descr->p_skb);
3654
3655 /* Increment transmit error counter */
3656
3657 bp->xmt_discards++;
3658
3659 /*
3660 * Move to start of next packet by updating completion index
3661 *
3662 * Here we assume that a transmit packet request is always
3663 * serviced by posting one fragment. We can therefore
3664 * simplify the completion code by incrementing the
3665 * completion index by one. This code will need to be
3666 * modified if this assumption changes. See comments
3667 * in dfx_xmt_queue_pkt for more details.
3668 */
3669
3670 bp->rcv_xmt_reg.index.xmt_comp += 1;
3671 }
3672
3673 /* Update the transmit consumer index in the consumer block */
3674
3675 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3676 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3677 bp->cons_block_virt->xmt_rcv_data = prod_cons;
3678 }
3679
3680 /*
3681 * ==================
3682 * = dfx_unregister =
3683 * ==================
3684 *
3685 * Overview:
3686 * Shuts down an FDDI controller
3687 *
3688 * Returns:
3689 * Condition code
3690 *
3691 * Arguments:
3692 * bdev - pointer to device information
3693 *
3694 * Functional Description:
3695 *
3696 * Return Codes:
3697 * None
3698 *
3699 * Assumptions:
3700 * It compiles so it should work :-( (PCI cards do :-)
3701 *
3702 * Side Effects:
3703 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
3704 * freed.
3705 */
dfx_unregister(struct device * bdev)3706 static void dfx_unregister(struct device *bdev)
3707 {
3708 struct net_device *dev = dev_get_drvdata(bdev);
3709 DFX_board_t *bp = netdev_priv(dev);
3710 int dfx_bus_pci = dev_is_pci(bdev);
3711 int dfx_bus_tc = DFX_BUS_TC(bdev);
3712 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3713 resource_size_t bar_start[3] = {0}; /* pointers to ports */
3714 resource_size_t bar_len[3] = {0}; /* resource lengths */
3715 int alloc_size; /* total buffer size used */
3716
3717 unregister_netdev(dev);
3718
3719 alloc_size = sizeof(PI_DESCR_BLOCK) +
3720 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3721 #ifndef DYNAMIC_BUFFERS
3722 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3723 #endif
3724 sizeof(PI_CONSUMER_BLOCK) +
3725 (PI_ALIGN_K_DESC_BLK - 1);
3726 if (bp->kmalloced)
3727 dma_free_coherent(bdev, alloc_size,
3728 bp->kmalloced, bp->kmalloced_dma);
3729
3730 dfx_bus_uninit(dev);
3731
3732 dfx_get_bars(bdev, bar_start, bar_len);
3733 if (bar_start[2] != 0)
3734 release_region(bar_start[2], bar_len[2]);
3735 if (bar_start[1] != 0)
3736 release_region(bar_start[1], bar_len[1]);
3737 if (dfx_use_mmio) {
3738 iounmap(bp->base.mem);
3739 release_mem_region(bar_start[0], bar_len[0]);
3740 } else
3741 release_region(bar_start[0], bar_len[0]);
3742
3743 if (dfx_bus_pci)
3744 pci_disable_device(to_pci_dev(bdev));
3745
3746 free_netdev(dev);
3747 }
3748
3749
3750 static int __maybe_unused dfx_dev_register(struct device *);
3751 static int __maybe_unused dfx_dev_unregister(struct device *);
3752
3753 #ifdef CONFIG_PCI
3754 static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *);
3755 static void dfx_pci_unregister(struct pci_dev *);
3756
3757 static const struct pci_device_id dfx_pci_table[] = {
3758 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3759 { }
3760 };
3761 MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3762
3763 static struct pci_driver dfx_pci_driver = {
3764 .name = "defxx",
3765 .id_table = dfx_pci_table,
3766 .probe = dfx_pci_register,
3767 .remove = dfx_pci_unregister,
3768 };
3769
dfx_pci_register(struct pci_dev * pdev,const struct pci_device_id * ent)3770 static int dfx_pci_register(struct pci_dev *pdev,
3771 const struct pci_device_id *ent)
3772 {
3773 return dfx_register(&pdev->dev);
3774 }
3775
dfx_pci_unregister(struct pci_dev * pdev)3776 static void dfx_pci_unregister(struct pci_dev *pdev)
3777 {
3778 dfx_unregister(&pdev->dev);
3779 }
3780 #endif /* CONFIG_PCI */
3781
3782 #ifdef CONFIG_EISA
3783 static const struct eisa_device_id dfx_eisa_table[] = {
3784 { "DEC3001", DEFEA_PROD_ID_1 },
3785 { "DEC3002", DEFEA_PROD_ID_2 },
3786 { "DEC3003", DEFEA_PROD_ID_3 },
3787 { "DEC3004", DEFEA_PROD_ID_4 },
3788 { }
3789 };
3790 MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3791
3792 static struct eisa_driver dfx_eisa_driver = {
3793 .id_table = dfx_eisa_table,
3794 .driver = {
3795 .name = "defxx",
3796 .bus = &eisa_bus_type,
3797 .probe = dfx_dev_register,
3798 .remove = dfx_dev_unregister,
3799 },
3800 };
3801 #endif /* CONFIG_EISA */
3802
3803 #ifdef CONFIG_TC
3804 static struct tc_device_id const dfx_tc_table[] = {
3805 { "DEC ", "PMAF-FA " },
3806 { "DEC ", "PMAF-FD " },
3807 { "DEC ", "PMAF-FS " },
3808 { "DEC ", "PMAF-FU " },
3809 { }
3810 };
3811 MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3812
3813 static struct tc_driver dfx_tc_driver = {
3814 .id_table = dfx_tc_table,
3815 .driver = {
3816 .name = "defxx",
3817 .bus = &tc_bus_type,
3818 .probe = dfx_dev_register,
3819 .remove = dfx_dev_unregister,
3820 },
3821 };
3822 #endif /* CONFIG_TC */
3823
dfx_dev_register(struct device * dev)3824 static int __maybe_unused dfx_dev_register(struct device *dev)
3825 {
3826 int status;
3827
3828 status = dfx_register(dev);
3829 if (!status)
3830 get_device(dev);
3831 return status;
3832 }
3833
dfx_dev_unregister(struct device * dev)3834 static int __maybe_unused dfx_dev_unregister(struct device *dev)
3835 {
3836 put_device(dev);
3837 dfx_unregister(dev);
3838 return 0;
3839 }
3840
3841
dfx_init(void)3842 static int dfx_init(void)
3843 {
3844 int status;
3845
3846 status = pci_register_driver(&dfx_pci_driver);
3847 if (status)
3848 goto err_pci_register;
3849
3850 status = eisa_driver_register(&dfx_eisa_driver);
3851 if (status)
3852 goto err_eisa_register;
3853
3854 status = tc_register_driver(&dfx_tc_driver);
3855 if (status)
3856 goto err_tc_register;
3857
3858 return 0;
3859
3860 err_tc_register:
3861 eisa_driver_unregister(&dfx_eisa_driver);
3862 err_eisa_register:
3863 pci_unregister_driver(&dfx_pci_driver);
3864 err_pci_register:
3865 return status;
3866 }
3867
dfx_cleanup(void)3868 static void dfx_cleanup(void)
3869 {
3870 tc_unregister_driver(&dfx_tc_driver);
3871 eisa_driver_unregister(&dfx_eisa_driver);
3872 pci_unregister_driver(&dfx_pci_driver);
3873 }
3874
3875 module_init(dfx_init);
3876 module_exit(dfx_cleanup);
3877 MODULE_AUTHOR("Lawrence V. Stefani");
3878 MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3879 DRV_VERSION " " DRV_RELDATE);
3880 MODULE_LICENSE("GPL");
3881