• Home
  • Raw
  • Download

Lines Matching +full:wait +full:- +full:state

3  * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2012 Solarflare Communications Inc.
29 * - All IRQs may be disabled on a CPU for a *long* time by e.g. a
31 * - The PREEMPT_RT patches mostly deal with this, but also allow a
41 * The self-test should stress every RSS vector, and unfortunately
63 [EFX_INT_MODE_MSIX] = "MSI-X",
68 STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
71 * efx_loopback_state - persistent state during a loopback selftest
90 /* How long to wait for all the packets to arrive (in ms) */
103 if (efx->phy_op->test_alive) { in efx_test_phy_alive()
104 rc = efx->phy_op->test_alive(efx); in efx_test_phy_alive()
105 tests->phy_alive = rc ? -1 : 1; in efx_test_phy_alive()
115 if (efx->type->test_nvram) { in efx_test_nvram()
116 rc = efx->type->test_nvram(efx); in efx_test_nvram()
117 if (rc == -EPERM) in efx_test_nvram()
120 tests->nvram = rc ? -1 : 1; in efx_test_nvram()
136 unsigned long timeout, wait; in efx_test_interrupts() local
140 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); in efx_test_interrupts()
141 tests->interrupt = -1; in efx_test_interrupts()
144 if (rc == -ENOTSUPP) { in efx_test_interrupts()
145 netif_dbg(efx, drv, efx->net_dev, in efx_test_interrupts()
147 tests->interrupt = 0; in efx_test_interrupts()
152 wait = 1; in efx_test_interrupts()
154 /* Wait for arrival of test interrupt. */ in efx_test_interrupts()
155 netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); in efx_test_interrupts()
157 schedule_timeout_uninterruptible(wait); in efx_test_interrupts()
161 wait *= 2; in efx_test_interrupts()
164 netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); in efx_test_interrupts()
165 return -ETIMEDOUT; in efx_test_interrupts()
168 netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", in efx_test_interrupts()
170 tests->interrupt = 1; in efx_test_interrupts()
181 unsigned long timeout, wait; in efx_test_eventq_irq() local
186 read_ptr[channel->channel] = channel->eventq_read_ptr; in efx_test_eventq_irq()
187 set_bit(channel->channel, &dma_pend); in efx_test_eventq_irq()
188 set_bit(channel->channel, &int_pend); in efx_test_eventq_irq()
193 wait = 1; in efx_test_eventq_irq()
195 /* Wait for arrival of interrupts. NAPI processing may or may in efx_test_eventq_irq()
199 schedule_timeout_uninterruptible(wait); in efx_test_eventq_irq()
203 if (channel->eventq_read_ptr != in efx_test_eventq_irq()
204 read_ptr[channel->channel]) { in efx_test_eventq_irq()
205 set_bit(channel->channel, &napi_ran); in efx_test_eventq_irq()
206 clear_bit(channel->channel, &dma_pend); in efx_test_eventq_irq()
207 clear_bit(channel->channel, &int_pend); in efx_test_eventq_irq()
210 clear_bit(channel->channel, &dma_pend); in efx_test_eventq_irq()
212 clear_bit(channel->channel, &int_pend); in efx_test_eventq_irq()
217 wait *= 2; in efx_test_eventq_irq()
221 bool dma_seen = !test_bit(channel->channel, &dma_pend); in efx_test_eventq_irq()
222 bool int_seen = !test_bit(channel->channel, &int_pend); in efx_test_eventq_irq()
224 tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; in efx_test_eventq_irq()
225 tests->eventq_int[channel->channel] = int_seen ? 1 : -1; in efx_test_eventq_irq()
228 netif_dbg(efx, drv, efx->net_dev, in efx_test_eventq_irq()
230 channel->channel, in efx_test_eventq_irq()
231 test_bit(channel->channel, &napi_ran) ? in efx_test_eventq_irq()
237 netif_err(efx, drv, efx->net_dev, in efx_test_eventq_irq()
239 channel->channel); in efx_test_eventq_irq()
241 netif_err(efx, drv, efx->net_dev, in efx_test_eventq_irq()
244 channel->channel); in efx_test_eventq_irq()
246 netif_err(efx, drv, efx->net_dev, in efx_test_eventq_irq()
249 channel->channel); in efx_test_eventq_irq()
253 return (dma_pend || int_pend) ? -ETIMEDOUT : 0; in efx_test_eventq_irq()
261 if (!efx->phy_op->run_tests) in efx_test_phy()
264 mutex_lock(&efx->mac_lock); in efx_test_phy()
265 rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags); in efx_test_phy()
266 mutex_unlock(&efx->mac_lock); in efx_test_phy()
267 if (rc == -EPERM) in efx_test_phy()
270 netif_info(efx, drv, efx->net_dev, in efx_test_phy()
289 struct efx_loopback_state *state = efx->loopback_selftest; in efx_loopback_rx_packet() local
296 if ((state == NULL) || state->flush) in efx_loopback_rx_packet()
299 payload = &state->payload; in efx_loopback_rx_packet()
302 received->ip.saddr = payload->ip.saddr; in efx_loopback_rx_packet()
303 if (state->offload_csum) in efx_loopback_rx_packet()
304 received->ip.check = payload->ip.check; in efx_loopback_rx_packet()
307 if (pkt_len < sizeof(received->header)) { in efx_loopback_rx_packet()
308 netif_err(efx, drv, efx->net_dev, in efx_loopback_rx_packet()
315 if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { in efx_loopback_rx_packet()
316 netif_err(efx, drv, efx->net_dev, in efx_loopback_rx_packet()
317 "saw non-loopback RX packet in %s loopback test\n", in efx_loopback_rx_packet()
324 netif_err(efx, drv, efx->net_dev, in efx_loopback_rx_packet()
332 if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { in efx_loopback_rx_packet()
333 netif_err(efx, drv, efx->net_dev, in efx_loopback_rx_packet()
340 if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { in efx_loopback_rx_packet()
341 netif_err(efx, drv, efx->net_dev, in efx_loopback_rx_packet()
348 if (received->iteration != payload->iteration) { in efx_loopback_rx_packet()
349 netif_err(efx, drv, efx->net_dev, in efx_loopback_rx_packet()
351 "%s loopback test\n", ntohs(received->iteration), in efx_loopback_rx_packet()
352 ntohs(payload->iteration), LOOPBACK_MODE(efx)); in efx_loopback_rx_packet()
357 netif_vdbg(efx, drv, efx->net_dev, in efx_loopback_rx_packet()
360 atomic_inc(&state->rx_good); in efx_loopback_rx_packet()
365 if (atomic_read(&state->rx_bad) == 0) { in efx_loopback_rx_packet()
366 netif_err(efx, drv, efx->net_dev, "received packet:\n"); in efx_loopback_rx_packet()
369 netif_err(efx, drv, efx->net_dev, "expected packet:\n"); in efx_loopback_rx_packet()
371 &state->payload, sizeof(state->payload), 0); in efx_loopback_rx_packet()
374 atomic_inc(&state->rx_bad); in efx_loopback_rx_packet()
380 struct efx_loopback_state *state = efx->loopback_selftest; in efx_iterate_state() local
381 struct net_device *net_dev = efx->net_dev; in efx_iterate_state()
382 struct efx_loopback_payload *payload = &state->payload; in efx_iterate_state()
385 ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr); in efx_iterate_state()
386 ether_addr_copy((u8 *)&payload->header.h_source, payload_source); in efx_iterate_state()
387 payload->header.h_proto = htons(ETH_P_IP); in efx_iterate_state()
390 payload->ip.daddr = htonl(INADDR_LOOPBACK); in efx_iterate_state()
391 payload->ip.ihl = 5; in efx_iterate_state()
392 payload->ip.check = (__force __sum16) htons(0xdead); in efx_iterate_state()
393 payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); in efx_iterate_state()
394 payload->ip.version = IPVERSION; in efx_iterate_state()
395 payload->ip.protocol = IPPROTO_UDP; in efx_iterate_state()
398 payload->udp.source = 0; in efx_iterate_state()
399 payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - in efx_iterate_state()
401 payload->udp.check = 0; /* checksum ignored */ in efx_iterate_state()
404 payload->iteration = htons(ntohs(payload->iteration) + 1); in efx_iterate_state()
405 memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); in efx_iterate_state()
407 /* Fill out remaining state members */ in efx_iterate_state()
408 atomic_set(&state->rx_good, 0); in efx_iterate_state()
409 atomic_set(&state->rx_bad, 0); in efx_iterate_state()
415 struct efx_nic *efx = tx_queue->efx; in efx_begin_loopback()
416 struct efx_loopback_state *state = efx->loopback_selftest; in efx_begin_loopback() local
423 for (i = 0; i < state->packet_count; i++) { in efx_begin_loopback()
426 skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); in efx_begin_loopback()
428 return -ENOMEM; in efx_begin_loopback()
429 state->skbs[i] = skb; in efx_begin_loopback()
434 payload = skb_put(skb, sizeof(state->payload)); in efx_begin_loopback()
435 memcpy(payload, &state->payload, sizeof(state->payload)); in efx_begin_loopback()
436 payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); in efx_begin_loopback()
442 netif_tx_lock_bh(efx->net_dev); in efx_begin_loopback()
444 netif_tx_unlock_bh(efx->net_dev); in efx_begin_loopback()
447 netif_err(efx, drv, efx->net_dev, in efx_begin_loopback()
449 "%d in %s loopback test\n", tx_queue->queue, in efx_begin_loopback()
450 i + 1, state->packet_count, in efx_begin_loopback()
455 return -EPIPE; in efx_begin_loopback()
464 struct efx_loopback_state *state = efx->loopback_selftest; in efx_poll_loopback() local
466 return atomic_read(&state->rx_good) == state->packet_count; in efx_poll_loopback()
472 struct efx_nic *efx = tx_queue->efx; in efx_end_loopback()
473 struct efx_loopback_state *state = efx->loopback_selftest; in efx_end_loopback() local
478 netif_tx_lock_bh(efx->net_dev); in efx_end_loopback()
482 for (i = 0; i < state->packet_count; i++) { in efx_end_loopback()
483 skb = state->skbs[i]; in efx_end_loopback()
489 netif_tx_unlock_bh(efx->net_dev); in efx_end_loopback()
492 rx_good = atomic_read(&state->rx_good); in efx_end_loopback()
493 rx_bad = atomic_read(&state->rx_bad); in efx_end_loopback()
494 if (tx_done != state->packet_count) { in efx_end_loopback()
498 netif_err(efx, drv, efx->net_dev, in efx_end_loopback()
501 tx_queue->queue, tx_done, state->packet_count, in efx_end_loopback()
503 rc = -ETIMEDOUT; in efx_end_loopback()
508 if (rx_good != state->packet_count) { in efx_end_loopback()
509 netif_dbg(efx, drv, efx->net_dev, in efx_end_loopback()
512 tx_queue->queue, rx_good, state->packet_count, in efx_end_loopback()
514 rc = -ETIMEDOUT; in efx_end_loopback()
519 lb_tests->tx_sent[tx_queue->queue] += state->packet_count; in efx_end_loopback()
520 lb_tests->tx_done[tx_queue->queue] += tx_done; in efx_end_loopback()
521 lb_tests->rx_good += rx_good; in efx_end_loopback()
522 lb_tests->rx_bad += rx_bad; in efx_end_loopback()
531 struct efx_nic *efx = tx_queue->efx; in efx_test_loopback()
532 struct efx_loopback_state *state = efx->loopback_selftest; in efx_test_loopback() local
537 state->packet_count = efx->txq_entries / 3; in efx_test_loopback()
538 state->packet_count = min(1 << (i << 2), state->packet_count); in efx_test_loopback()
539 state->skbs = kcalloc(state->packet_count, in efx_test_loopback()
540 sizeof(state->skbs[0]), GFP_KERNEL); in efx_test_loopback()
541 if (!state->skbs) in efx_test_loopback()
542 return -ENOMEM; in efx_test_loopback()
543 state->flush = false; in efx_test_loopback()
545 netif_dbg(efx, drv, efx->net_dev, in efx_test_loopback()
547 tx_queue->queue, LOOPBACK_MODE(efx), in efx_test_loopback()
548 state->packet_count); in efx_test_loopback()
554 * prepared to wait much longer. */ in efx_test_loopback()
562 kfree(state->skbs); in efx_test_loopback()
565 /* Wait a while to ensure there are no packets in efx_test_loopback()
572 netif_dbg(efx, drv, efx->net_dev, in efx_test_loopback()
574 "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), in efx_test_loopback()
575 state->packet_count); in efx_test_loopback()
580 /* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
582 * to delay and retry. Therefore, it's safer to just poll directly. Wait
586 struct efx_link_state *link_state = &efx->link_state; in efx_wait_for_link()
593 if (efx->type->monitor != NULL) { in efx_wait_for_link()
594 mutex_lock(&efx->mac_lock); in efx_wait_for_link()
595 efx->type->monitor(efx); in efx_wait_for_link()
596 mutex_unlock(&efx->mac_lock); in efx_wait_for_link()
599 mutex_lock(&efx->mac_lock); in efx_wait_for_link()
600 link_up = link_state->up; in efx_wait_for_link()
602 link_up = !efx->type->check_mac_fault(efx); in efx_wait_for_link()
603 mutex_unlock(&efx->mac_lock); in efx_wait_for_link()
613 return -ETIMEDOUT; in efx_wait_for_link()
620 struct efx_loopback_state *state; in efx_test_loopbacks() local
622 efx_get_channel(efx, efx->tx_channel_offset); in efx_test_loopbacks()
627 * all received packets will be dropped. Mark the state as in efx_test_loopbacks()
629 state = kzalloc(sizeof(*state), GFP_KERNEL); in efx_test_loopbacks()
630 if (state == NULL) in efx_test_loopbacks()
631 return -ENOMEM; in efx_test_loopbacks()
632 BUG_ON(efx->loopback_selftest); in efx_test_loopbacks()
633 state->flush = true; in efx_test_loopbacks()
634 efx->loopback_selftest = state; in efx_test_loopbacks()
642 state->flush = true; in efx_test_loopbacks()
643 mutex_lock(&efx->mac_lock); in efx_test_loopbacks()
644 efx->loopback_mode = mode; in efx_test_loopbacks()
646 mutex_unlock(&efx->mac_lock); in efx_test_loopbacks()
648 netif_err(efx, drv, efx->net_dev, in efx_test_loopbacks()
656 netif_err(efx, drv, efx->net_dev, in efx_test_loopbacks()
664 state->offload_csum = (tx_queue->queue & in efx_test_loopbacks()
667 &tests->loopback[mode]); in efx_test_loopbacks()
675 state->flush = true; in efx_test_loopbacks()
676 efx->loopback_selftest = NULL; in efx_test_loopbacks()
678 kfree(state); in efx_test_loopbacks()
680 if (rc == -EPERM) in efx_test_loopbacks()
695 enum efx_loopback_mode loopback_mode = efx->loopback_mode; in efx_selftest()
696 int phy_mode = efx->phy_mode; in efx_selftest()
701 /* Online (i.e. non-disruptive) testing in efx_selftest()
734 if (efx->type->test_chip) { in efx_selftest()
735 rc_reset = efx->type->test_chip(efx, tests); in efx_selftest()
737 netif_err(efx, hw, efx->net_dev, in efx_selftest()
743 if ((tests->memory < 0 || tests->registers < 0) && !rc_test) in efx_selftest()
744 rc_test = -EIO; in efx_selftest()
749 mutex_lock(&efx->mac_lock); in efx_selftest()
750 efx->phy_mode &= ~PHY_MODE_LOW_POWER; in efx_selftest()
751 efx->loopback_mode = LOOPBACK_NONE; in efx_selftest()
753 mutex_unlock(&efx->mac_lock); in efx_selftest()
759 rc = efx_test_loopbacks(efx, tests, efx->loopback_modes); in efx_selftest()
763 /* restore the PHY to the previous state */ in efx_selftest()
764 mutex_lock(&efx->mac_lock); in efx_selftest()
765 efx->phy_mode = phy_mode; in efx_selftest()
766 efx->loopback_mode = loopback_mode; in efx_selftest()
768 mutex_unlock(&efx->mac_lock); in efx_selftest()
781 schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT); in efx_selftest_async_start()
786 cancel_delayed_work_sync(&efx->selftest_work); in efx_selftest_async_cancel()
799 netif_err(efx, ifup, efx->net_dev, in efx_selftest_async_work()
801 channel->channel); in efx_selftest_async_work()
803 netif_dbg(efx, ifup, efx->net_dev, in efx_selftest_async_work()
805 channel->channel, cpu); in efx_selftest_async_work()