• Home
  • Raw
  • Download

Lines Matching +full:pci +full:- +full:host1

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2006-2013 Solarflare Communications Inc.
11 #include <linux/pci.h>
26 /* Falcon-architecture (SFC9000-family) support */
68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
77 (_tx_queue)->queue)
90 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, in efx_write_buf_tbl()
97 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || in efx_masked_compare_oword()
98 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); in efx_masked_compare_oword()
148 netif_err(efx, hw, efx->net_dev, in efx_farch_test_registers()
152 return -EIO; in efx_farch_test_registers()
178 EFX_WARN_ON_PARANOID(!buffer->buf.addr); in efx_init_special_buffer()
181 for (i = 0; i < buffer->entries; i++) { in efx_init_special_buffer()
182 index = buffer->index + i; in efx_init_special_buffer()
183 dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); in efx_init_special_buffer()
184 netif_dbg(efx, probe, efx->net_dev, in efx_init_special_buffer()
200 unsigned int start = buffer->index; in efx_fini_special_buffer()
201 unsigned int end = (buffer->index + buffer->entries - 1); in efx_fini_special_buffer()
203 if (!buffer->entries) in efx_fini_special_buffer()
206 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", in efx_fini_special_buffer()
207 buffer->index, buffer->index + buffer->entries - 1); in efx_fini_special_buffer()
231 struct siena_nic_data *nic_data = efx->nic_data; in efx_alloc_special_buffer()
235 if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) in efx_alloc_special_buffer()
236 return -ENOMEM; in efx_alloc_special_buffer()
237 buffer->entries = len / EFX_BUF_SIZE; in efx_alloc_special_buffer()
238 BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); in efx_alloc_special_buffer()
241 buffer->index = efx->next_buffer_table; in efx_alloc_special_buffer()
242 efx->next_buffer_table += buffer->entries; in efx_alloc_special_buffer()
245 nic_data->vf_buftbl_base < efx->next_buffer_table); in efx_alloc_special_buffer()
248 netif_dbg(efx, probe, efx->net_dev, in efx_alloc_special_buffer()
249 "allocating special buffers %d-%d at %llx+%x " in efx_alloc_special_buffer()
250 "(virt %p phys %llx)\n", buffer->index, in efx_alloc_special_buffer()
251 buffer->index + buffer->entries - 1, in efx_alloc_special_buffer()
252 (u64)buffer->buf.dma_addr, len, in efx_alloc_special_buffer()
253 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); in efx_alloc_special_buffer()
261 if (!buffer->buf.addr) in efx_free_special_buffer()
264 netif_dbg(efx, hw, efx->net_dev, in efx_free_special_buffer()
265 "deallocating special buffers %d-%d at %llx+%x " in efx_free_special_buffer()
266 "(virt %p phys %llx)\n", buffer->index, in efx_free_special_buffer()
267 buffer->index + buffer->entries - 1, in efx_free_special_buffer()
268 (u64)buffer->buf.dma_addr, buffer->buf.len, in efx_free_special_buffer()
269 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); in efx_free_special_buffer()
271 efx_nic_free_buffer(efx, &buffer->buf); in efx_free_special_buffer()
272 buffer->entries = 0; in efx_free_special_buffer()
287 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_notify_tx_desc()
289 efx_writed_page(tx_queue->efx, &reg, in efx_farch_notify_tx_desc()
290 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); in efx_farch_notify_tx_desc()
303 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_push_tx_desc()
307 efx_writeo_page(tx_queue->efx, &reg, in efx_farch_push_tx_desc()
308 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); in efx_farch_push_tx_desc()
321 unsigned old_write_count = tx_queue->write_count; in efx_farch_tx_write()
323 tx_queue->xmit_pending = false; in efx_farch_tx_write()
324 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) in efx_farch_tx_write()
328 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_tx_write()
329 buffer = &tx_queue->buffer[write_ptr]; in efx_farch_tx_write()
331 ++tx_queue->write_count; in efx_farch_tx_write()
333 EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION); in efx_farch_tx_write()
339 buffer->flags & EFX_TX_BUF_CONT, in efx_farch_tx_write()
340 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, in efx_farch_tx_write()
342 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); in efx_farch_tx_write()
343 } while (tx_queue->write_count != tx_queue->insert_count); in efx_farch_tx_write()
349 old_write_count & tx_queue->ptr_mask); in efx_farch_tx_write()
351 ++tx_queue->pushes; in efx_farch_tx_write()
361 unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; in efx_farch_tx_limit_len()
372 struct efx_nic *efx = tx_queue->efx; in efx_farch_tx_probe()
375 tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OUTER_CSUM : 0) | in efx_farch_tx_probe()
376 ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0); in efx_farch_tx_probe()
377 entries = tx_queue->ptr_mask + 1; in efx_farch_tx_probe()
378 return efx_alloc_special_buffer(efx, &tx_queue->txd, in efx_farch_tx_probe()
384 int csum = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM; in efx_farch_tx_init()
385 struct efx_nic *efx = tx_queue->efx; in efx_farch_tx_init()
389 efx_init_special_buffer(efx, &tx_queue->txd); in efx_farch_tx_init()
396 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, in efx_farch_tx_init()
398 tx_queue->channel->channel, in efx_farch_tx_init()
400 FRF_AZ_TX_DESCQ_LABEL, tx_queue->label, in efx_farch_tx_init()
402 __ffs(tx_queue->txd.entries), in efx_farch_tx_init()
409 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base, in efx_farch_tx_init()
410 tx_queue->queue); in efx_farch_tx_init()
414 (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? in efx_farch_tx_init()
417 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue); in efx_farch_tx_init()
419 tx_queue->tso_version = 1; in efx_farch_tx_init()
424 struct efx_nic *efx = tx_queue->efx; in efx_farch_flush_tx_queue()
427 WARN_ON(atomic_read(&tx_queue->flush_outstanding)); in efx_farch_flush_tx_queue()
428 atomic_set(&tx_queue->flush_outstanding, 1); in efx_farch_flush_tx_queue()
432 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); in efx_farch_flush_tx_queue()
438 struct efx_nic *efx = tx_queue->efx; in efx_farch_tx_fini()
443 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, in efx_farch_tx_fini()
444 tx_queue->queue); in efx_farch_tx_fini()
447 efx_fini_special_buffer(efx, &tx_queue->txd); in efx_farch_tx_fini()
453 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); in efx_farch_tx_remove()
473 rx_buf->len - in efx_farch_build_rx_desc()
474 rx_queue->efx->type->rx_buffer_padding, in efx_farch_build_rx_desc()
476 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); in efx_farch_build_rx_desc()
484 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_write()
488 while (rx_queue->notified_count != rx_queue->added_count) { in efx_farch_rx_write()
491 rx_queue->notified_count & rx_queue->ptr_mask); in efx_farch_rx_write()
492 ++rx_queue->notified_count; in efx_farch_rx_write()
496 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; in efx_farch_rx_write()
504 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_probe()
507 entries = rx_queue->ptr_mask + 1; in efx_farch_rx_probe()
508 return efx_alloc_special_buffer(efx, &rx_queue->rxd, in efx_farch_rx_probe()
515 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_init()
518 /* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */ in efx_farch_rx_init()
519 jumbo_en = efx->rx_scatter; in efx_farch_rx_init()
521 netif_dbg(efx, hw, efx->net_dev, in efx_farch_rx_init()
522 "RX queue %d ring in special buffers %d-%d\n", in efx_farch_rx_init()
523 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, in efx_farch_rx_init()
524 rx_queue->rxd.index + rx_queue->rxd.entries - 1); in efx_farch_rx_init()
526 rx_queue->scatter_n = 0; in efx_farch_rx_init()
529 efx_init_special_buffer(efx, &rx_queue->rxd); in efx_farch_rx_init()
535 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, in efx_farch_rx_init()
537 efx_rx_queue_channel(rx_queue)->channel, in efx_farch_rx_init()
542 __ffs(rx_queue->rxd.entries), in efx_farch_rx_init()
546 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, in efx_farch_rx_init()
552 struct efx_nic *efx = rx_queue->efx; in efx_farch_flush_rx_queue()
565 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_fini()
569 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, in efx_farch_rx_fini()
573 efx_fini_special_buffer(efx, &rx_queue->rxd); in efx_farch_rx_fini()
579 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); in efx_farch_rx_remove()
596 return (atomic_read(&efx->active_queues) == 0 || in efx_farch_flush_wake()
597 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT in efx_farch_flush_wake()
598 && atomic_read(&efx->rxq_flush_pending) > 0)); in efx_farch_flush_wake()
611 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); in efx_check_tx_flush_complete()
616 netif_dbg(efx, hw, efx->net_dev, in efx_check_tx_flush_complete()
618 tx_queue->queue); in efx_check_tx_flush_complete()
620 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, in efx_check_tx_flush_complete()
625 netif_dbg(efx, hw, efx->net_dev, in efx_check_tx_flush_complete()
627 "the queue\n", tx_queue->queue); in efx_check_tx_flush_complete()
658 rx_queue->flush_pending = true; in efx_farch_do_flush()
659 atomic_inc(&efx->rxq_flush_pending); in efx_farch_do_flush()
663 while (timeout && atomic_read(&efx->active_queues) > 0) { in efx_farch_do_flush()
680 if (atomic_read(&efx->rxq_flush_outstanding) >= in efx_farch_do_flush()
684 if (rx_queue->flush_pending) { in efx_farch_do_flush()
685 rx_queue->flush_pending = false; in efx_farch_do_flush()
686 atomic_dec(&efx->rxq_flush_pending); in efx_farch_do_flush()
687 atomic_inc(&efx->rxq_flush_outstanding); in efx_farch_do_flush()
694 timeout = wait_event_timeout(efx->flush_wq, in efx_farch_do_flush()
699 if (atomic_read(&efx->active_queues) && in efx_farch_do_flush()
701 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " in efx_farch_do_flush()
702 "(rx %d+%d)\n", atomic_read(&efx->active_queues), in efx_farch_do_flush()
703 atomic_read(&efx->rxq_flush_outstanding), in efx_farch_do_flush()
704 atomic_read(&efx->rxq_flush_pending)); in efx_farch_do_flush()
705 rc = -ETIMEDOUT; in efx_farch_do_flush()
707 atomic_set(&efx->active_queues, 0); in efx_farch_do_flush()
708 atomic_set(&efx->rxq_flush_pending, 0); in efx_farch_do_flush()
709 atomic_set(&efx->rxq_flush_outstanding, 0); in efx_farch_do_flush()
723 if (efx->state != STATE_RECOVERY) { in efx_farch_fini_dmaq()
725 if (efx->pci_dev->is_busmaster) { in efx_farch_fini_dmaq()
726 efx->type->prepare_flush(efx); in efx_farch_fini_dmaq()
728 efx->type->finish_flush(efx); in efx_farch_fini_dmaq()
746 * completion events. This means that efx->rxq_flush_outstanding remained at 4
747 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
750 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
751 * for batched flush requests; and the efx->active_queues gets messed up because
758 atomic_set(&efx->rxq_flush_pending, 0); in efx_farch_finish_flr()
759 atomic_set(&efx->rxq_flush_outstanding, 0); in efx_farch_finish_flr()
760 atomic_set(&efx->active_queues, 0); in efx_farch_finish_flr()
767 * Event queues are processed by per-channel tasklets.
779 struct efx_nic *efx = channel->efx; in efx_farch_ev_read_ack()
782 channel->eventq_read_ptr & channel->eventq_mask); in efx_farch_ev_read_ack()
788 efx->type->evq_rptr_tbl_base + in efx_farch_ev_read_ack()
789 FR_BZ_EVQ_RPTR_STEP * channel->channel); in efx_farch_ev_read_ack()
800 drv_ev_reg.u32[0] = event->u32[0]; in efx_farch_generate_event()
801 drv_ev_reg.u32[1] = event->u32[1]; in efx_farch_generate_event()
815 efx_farch_generate_event(channel->efx, channel->channel, &event); in efx_farch_magic_event()
829 struct efx_nic *efx = channel->efx; in efx_farch_handle_tx_event()
831 if (unlikely(READ_ONCE(efx->reset_pending))) in efx_farch_handle_tx_event()
838 tx_queue = channel->tx_queue + in efx_farch_handle_tx_event()
844 tx_queue = channel->tx_queue + in efx_farch_handle_tx_event()
847 netif_tx_lock(efx->net_dev); in efx_farch_handle_tx_event()
849 netif_tx_unlock(efx->net_dev); in efx_farch_handle_tx_event()
853 netif_err(efx, tx_err, efx->net_dev, in efx_farch_handle_tx_event()
855 EFX_QWORD_FMT"\n", channel->channel, in efx_farch_handle_tx_event()
865 struct efx_nic *efx = rx_queue->efx; in efx_farch_handle_rx_not_ok()
888 * checksum errors during self-test. */ in efx_farch_handle_rx_not_ok()
890 ++channel->n_rx_frm_trunc; in efx_farch_handle_rx_not_ok()
892 ++channel->n_rx_tobe_disc; in efx_farch_handle_rx_not_ok()
893 else if (!efx->loopback_selftest) { in efx_farch_handle_rx_not_ok()
895 ++channel->n_rx_ip_hdr_chksum_err; in efx_farch_handle_rx_not_ok()
897 ++channel->n_rx_tcp_udp_chksum_err; in efx_farch_handle_rx_not_ok()
906 netif_dbg(efx, rx_err, efx->net_dev, in efx_farch_handle_rx_not_ok()
924 if (efx->net_dev->features & NETIF_F_RXALL) in efx_farch_handle_rx_not_ok()
934 /* Handle receive events that are not in-order. Return true if this
942 struct efx_nic *efx = rx_queue->efx; in efx_farch_handle_rx_bad_index()
945 if (rx_queue->scatter_n && in efx_farch_handle_rx_bad_index()
946 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & in efx_farch_handle_rx_bad_index()
947 rx_queue->ptr_mask)) { in efx_farch_handle_rx_bad_index()
948 ++channel->n_rx_nodesc_trunc; in efx_farch_handle_rx_bad_index()
952 expected = rx_queue->removed_count & rx_queue->ptr_mask; in efx_farch_handle_rx_bad_index()
953 dropped = (index - expected) & rx_queue->ptr_mask; in efx_farch_handle_rx_bad_index()
954 netif_info(efx, rx_err, efx->net_dev, in efx_farch_handle_rx_bad_index()
967 * discard non-matching multicast packets.
978 struct efx_nic *efx = channel->efx; in efx_farch_handle_rx_event()
980 if (unlikely(READ_ONCE(efx->reset_pending))) in efx_farch_handle_rx_event()
986 channel->channel); in efx_farch_handle_rx_event()
991 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & in efx_farch_handle_rx_event()
992 rx_queue->ptr_mask); in efx_farch_handle_rx_event()
996 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { in efx_farch_handle_rx_event()
1002 if (rx_queue->scatter_n) { in efx_farch_handle_rx_event()
1005 rx_queue->removed_count & rx_queue->ptr_mask, in efx_farch_handle_rx_event()
1006 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); in efx_farch_handle_rx_event()
1007 rx_queue->removed_count += rx_queue->scatter_n; in efx_farch_handle_rx_event()
1008 rx_queue->scatter_n = 0; in efx_farch_handle_rx_event()
1019 rx_queue->removed_count & rx_queue->ptr_mask, in efx_farch_handle_rx_event()
1021 ++rx_queue->removed_count; in efx_farch_handle_rx_event()
1026 ++rx_queue->scatter_n; in efx_farch_handle_rx_event()
1061 ++channel->n_rx_mcast_mismatch; in efx_farch_handle_rx_event()
1066 channel->irq_mod_score += 2; in efx_farch_handle_rx_event()
1070 rx_queue->removed_count & rx_queue->ptr_mask, in efx_farch_handle_rx_event()
1071 rx_queue->scatter_n, rx_ev_byte_cnt, flags); in efx_farch_handle_rx_event()
1072 rx_queue->removed_count += rx_queue->scatter_n; in efx_farch_handle_rx_event()
1073 rx_queue->scatter_n = 0; in efx_farch_handle_rx_event()
1088 if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) { in efx_farch_handle_tx_flush_done()
1090 tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL); in efx_farch_handle_tx_flush_done()
1091 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) in efx_farch_handle_tx_flush_done()
1092 efx_farch_magic_event(tx_queue->channel, in efx_farch_handle_tx_flush_done()
1111 if (qid >= efx->n_channels) in efx_farch_handle_rx_flush_done()
1119 netif_info(efx, hw, efx->net_dev, in efx_farch_handle_rx_flush_done()
1121 rx_queue->flush_pending = true; in efx_farch_handle_rx_flush_done()
1122 atomic_inc(&efx->rxq_flush_pending); in efx_farch_handle_rx_flush_done()
1127 atomic_dec(&efx->rxq_flush_outstanding); in efx_farch_handle_rx_flush_done()
1129 wake_up(&efx->flush_wq); in efx_farch_handle_rx_flush_done()
1135 struct efx_nic *efx = channel->efx; in efx_farch_handle_drain_event()
1137 WARN_ON(atomic_read(&efx->active_queues) == 0); in efx_farch_handle_drain_event()
1138 atomic_dec(&efx->active_queues); in efx_farch_handle_drain_event()
1140 wake_up(&efx->flush_wq); in efx_farch_handle_drain_event()
1146 struct efx_nic *efx = channel->efx; in efx_farch_handle_generated_event()
1156 channel->event_test_cpu = raw_smp_processor_id(); in efx_farch_handle_generated_event()
1167 netif_dbg(efx, hw, efx->net_dev, "channel %d received " in efx_farch_handle_generated_event()
1169 channel->channel, EFX_QWORD_VAL(*event)); in efx_farch_handle_generated_event()
1176 struct efx_nic *efx = channel->efx; in efx_farch_handle_driver_event()
1185 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", in efx_farch_handle_driver_event()
1186 channel->channel, ev_sub_data); in efx_farch_handle_driver_event()
1193 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", in efx_farch_handle_driver_event()
1194 channel->channel, ev_sub_data); in efx_farch_handle_driver_event()
1201 netif_dbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1203 channel->channel, ev_sub_data); in efx_farch_handle_driver_event()
1206 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1207 "channel %d SRAM update done\n", channel->channel); in efx_farch_handle_driver_event()
1210 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1212 channel->channel, ev_sub_data); in efx_farch_handle_driver_event()
1215 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1217 channel->channel, ev_sub_data); in efx_farch_handle_driver_event()
1220 netif_err(efx, rx_err, efx->net_dev, in efx_farch_handle_driver_event()
1222 "Resetting.\n", channel->channel); in efx_farch_handle_driver_event()
1223 atomic_inc(&efx->rx_reset); in efx_farch_handle_driver_event()
1228 netif_err(efx, rx_err, efx->net_dev, in efx_farch_handle_driver_event()
1241 netif_err(efx, tx_err, efx->net_dev, in efx_farch_handle_driver_event()
1253 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1255 "data %04x\n", channel->channel, ev_sub_code, in efx_farch_handle_driver_event()
1263 struct efx_nic *efx = channel->efx; in efx_farch_ev_process()
1272 read_ptr = channel->eventq_read_ptr; in efx_farch_ev_process()
1282 netif_vdbg(channel->efx, intr, channel->efx->net_dev, in efx_farch_ev_process()
1284 channel->channel, EFX_QWORD_VAL(event)); in efx_farch_ev_process()
1317 if (efx->type->handle_global_event && in efx_farch_ev_process()
1318 efx->type->handle_global_event(channel, &event)) in efx_farch_ev_process()
1322 netif_err(channel->efx, hw, channel->efx->net_dev, in efx_farch_ev_process()
1324 EFX_QWORD_FMT ")\n", channel->channel, in efx_farch_ev_process()
1330 channel->eventq_read_ptr = read_ptr; in efx_farch_ev_process()
1337 struct efx_nic *efx = channel->efx; in efx_farch_ev_probe()
1340 entries = channel->eventq_mask + 1; in efx_farch_ev_probe()
1341 return efx_alloc_special_buffer(efx, &channel->eventq, in efx_farch_ev_probe()
1348 struct efx_nic *efx = channel->efx; in efx_farch_ev_init()
1350 netif_dbg(efx, hw, efx->net_dev, in efx_farch_ev_init()
1351 "channel %d event queue in special buffers %d-%d\n", in efx_farch_ev_init()
1352 channel->channel, channel->eventq.index, in efx_farch_ev_init()
1353 channel->eventq.index + channel->eventq.entries - 1); in efx_farch_ev_init()
1359 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel); in efx_farch_ev_init()
1362 efx_init_special_buffer(efx, &channel->eventq); in efx_farch_ev_init()
1365 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); in efx_farch_ev_init()
1370 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), in efx_farch_ev_init()
1371 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); in efx_farch_ev_init()
1372 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base, in efx_farch_ev_init()
1373 channel->channel); in efx_farch_ev_init()
1381 struct efx_nic *efx = channel->efx; in efx_farch_ev_fini()
1385 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base, in efx_farch_ev_fini()
1386 channel->channel); in efx_farch_ev_fini()
1387 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel); in efx_farch_ev_fini()
1390 efx_fini_special_buffer(efx, &channel->eventq); in efx_farch_ev_fini()
1396 efx_free_special_buffer(channel->efx, &channel->eventq); in efx_farch_ev_remove()
1415 * queue processing is carried out by per-channel tasklets.
1426 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, in efx_farch_interrupts()
1434 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); in efx_farch_irq_enable_master()
1461 efx_oword_t *int_ker = efx->irq_status.addr; in efx_farch_fatal_interrupt()
1468 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " in efx_farch_fatal_interrupt()
1479 netif_err(efx, hw, efx->net_dev, in efx_farch_fatal_interrupt()
1485 pci_clear_master(efx->pci_dev); in efx_farch_fatal_interrupt()
1489 if (efx->int_error_count == 0 || in efx_farch_fatal_interrupt()
1490 time_after(jiffies, efx->int_error_expire)) { in efx_farch_fatal_interrupt()
1491 efx->int_error_count = 0; in efx_farch_fatal_interrupt()
1492 efx->int_error_expire = in efx_farch_fatal_interrupt()
1495 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { in efx_farch_fatal_interrupt()
1496 netif_err(efx, hw, efx->net_dev, in efx_farch_fatal_interrupt()
1497 "SYSTEM ERROR - reset scheduled\n"); in efx_farch_fatal_interrupt()
1500 netif_err(efx, hw, efx->net_dev, in efx_farch_fatal_interrupt()
1501 "SYSTEM ERROR - max number of errors seen." in efx_farch_fatal_interrupt()
1515 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); in efx_farch_legacy_interrupt()
1516 efx_oword_t *int_ker = efx->irq_status.addr; in efx_farch_legacy_interrupt()
1532 !efx->eeh_disabled_legacy_irq) { in efx_farch_legacy_interrupt()
1533 disable_irq_nosync(efx->legacy_irq); in efx_farch_legacy_interrupt()
1534 efx->eeh_disabled_legacy_irq = true; in efx_farch_legacy_interrupt()
1537 /* Handle non-event-queue sources */ in efx_farch_legacy_interrupt()
1538 if (queues & (1U << efx->irq_level) && soft_enabled) { in efx_farch_legacy_interrupt()
1542 efx->last_irq_cpu = raw_smp_processor_id(); in efx_farch_legacy_interrupt()
1546 efx->irq_zero_count = 0; in efx_farch_legacy_interrupt()
1565 if (efx->irq_zero_count++ == 0) in efx_farch_legacy_interrupt()
1572 channel->eventq_read_ptr); in efx_farch_legacy_interrupt()
1582 netif_vdbg(efx, intr, efx->net_dev, in efx_farch_legacy_interrupt()
1599 struct efx_nic *efx = context->efx; in efx_farch_msi_interrupt()
1600 efx_oword_t *int_ker = efx->irq_status.addr; in efx_farch_msi_interrupt()
1603 netif_vdbg(efx, intr, efx->net_dev, in efx_farch_msi_interrupt()
1607 if (!likely(READ_ONCE(efx->irq_soft_enabled))) in efx_farch_msi_interrupt()
1610 /* Handle non-event-queue sources */ in efx_farch_msi_interrupt()
1611 if (context->index == efx->irq_level) { in efx_farch_msi_interrupt()
1615 efx->last_irq_cpu = raw_smp_processor_id(); in efx_farch_msi_interrupt()
1619 efx_schedule_channel_irq(efx->channel[context->index]); in efx_farch_msi_interrupt()
1632 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != in efx_farch_rx_push_indir_table()
1637 efx->rss_context.rx_indir_table[i]); in efx_farch_rx_push_indir_table()
1649 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != in efx_farch_rx_pull_indir_table()
1656 efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); in efx_farch_rx_pull_indir_table()
1665 * efx->vf_buftbl_base buftbl entries for SR-IOV
1666 * efx->rx_dc_base RX descriptor caches
1667 * efx->tx_dc_base TX descriptor caches
1674 struct siena_nic_data *nic_data = efx->nic_data; in efx_farch_dimension_resources()
1677 total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels; in efx_farch_dimension_resources()
1681 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + in efx_farch_dimension_resources()
1683 efx->n_channels * EFX_MAX_EVQ_SIZE) in efx_farch_dimension_resources()
1685 vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL); in efx_farch_dimension_resources()
1688 if (efx->type->sriov_wanted) { in efx_farch_dimension_resources()
1689 if (efx->type->sriov_wanted(efx)) { in efx_farch_dimension_resources()
1693 nic_data->vf_buftbl_base = buftbl_min; in efx_farch_dimension_resources()
1697 buftbl_free = (sram_lim_qw - buftbl_min - in efx_farch_dimension_resources()
1704 (1024U - EFX_VI_BASE) >> efx->vi_scale); in efx_farch_dimension_resources()
1706 if (efx->vf_count > vf_limit) { in efx_farch_dimension_resources()
1707 netif_err(efx, probe, efx->net_dev, in efx_farch_dimension_resources()
1709 efx->vf_count, vf_limit); in efx_farch_dimension_resources()
1710 efx->vf_count = vf_limit; in efx_farch_dimension_resources()
1712 vi_count += efx->vf_count * efx_vf_size(efx); in efx_farch_dimension_resources()
1717 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; in efx_farch_dimension_resources()
1718 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; in efx_farch_dimension_resources()
1733 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); in efx_farch_init_common()
1735 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); in efx_farch_init_common()
1743 /* Set RX descriptor cache size. Set low watermark to size-8, as in efx_farch_init_common()
1749 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); in efx_farch_init_common()
1756 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); in efx_farch_init_common()
1761 efx->irq_level = 0x1f; in efx_farch_init_common()
1763 /* Use a valid MSI-X vector */ in efx_farch_init_common()
1764 efx->irq_level = 0; in efx_farch_init_common()
1780 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be in efx_farch_init_common()
1789 /* Enable SW_EV to inherit in char driver - assume harmless here */ in efx_farch_init_common()
1817 /* "Fudge factors" - difference between programmed value and actual depth.
1824 /* Hard maximum search limit. Hardware will time-out beyond 200-something.
1831 * counter-productive. */
1889 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1890 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1909 return key * 2 - 1; in efx_farch_filter_increment()
1929 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); in efx_farch_filter_spec_table_id()
1934 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_push_rx_config()
1940 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; in efx_farch_filter_push_rx_config()
1942 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + in efx_farch_filter_push_rx_config()
1945 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + in efx_farch_filter_push_rx_config()
1948 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + in efx_farch_filter_push_rx_config()
1951 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + in efx_farch_filter_push_rx_config()
1954 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; in efx_farch_filter_push_rx_config()
1955 if (table->size) { in efx_farch_filter_push_rx_config()
1958 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + in efx_farch_filter_push_rx_config()
1962 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + in efx_farch_filter_push_rx_config()
1966 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; in efx_farch_filter_push_rx_config()
1967 if (table->size) { in efx_farch_filter_push_rx_config()
1970 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); in efx_farch_filter_push_rx_config()
1973 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & in efx_farch_filter_push_rx_config()
1977 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); in efx_farch_filter_push_rx_config()
1980 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & in efx_farch_filter_push_rx_config()
1989 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & in efx_farch_filter_push_rx_config()
1990 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & in efx_farch_filter_push_rx_config()
2000 efx->rx_scatter); in efx_farch_filter_push_rx_config()
2008 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_push_tx_limits()
2014 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; in efx_farch_filter_push_tx_limits()
2015 if (table->size) { in efx_farch_filter_push_tx_limits()
2018 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + in efx_farch_filter_push_tx_limits()
2022 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + in efx_farch_filter_push_tx_limits()
2035 if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context) in efx_farch_filter_from_gen_spec()
2036 return -EINVAL; in efx_farch_filter_from_gen_spec()
2038 spec->priority = gen_spec->priority; in efx_farch_filter_from_gen_spec()
2039 spec->flags = gen_spec->flags; in efx_farch_filter_from_gen_spec()
2040 spec->dmaq_id = gen_spec->dmaq_id; in efx_farch_filter_from_gen_spec()
2042 switch (gen_spec->match_flags) { in efx_farch_filter_from_gen_spec()
2050 __be32 rhost, host1, host2; in efx_farch_filter_from_gen_spec() local
2053 EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); in efx_farch_filter_from_gen_spec()
2055 if (gen_spec->ether_type != htons(ETH_P_IP)) in efx_farch_filter_from_gen_spec()
2056 return -EPROTONOSUPPORT; in efx_farch_filter_from_gen_spec()
2057 if (gen_spec->loc_port == 0 || in efx_farch_filter_from_gen_spec()
2058 (is_full && gen_spec->rem_port == 0)) in efx_farch_filter_from_gen_spec()
2059 return -EADDRNOTAVAIL; in efx_farch_filter_from_gen_spec()
2060 switch (gen_spec->ip_proto) { in efx_farch_filter_from_gen_spec()
2062 spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : in efx_farch_filter_from_gen_spec()
2066 spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : in efx_farch_filter_from_gen_spec()
2070 return -EPROTONOSUPPORT; in efx_farch_filter_from_gen_spec()
2078 rhost = is_full ? gen_spec->rem_host[0] : 0; in efx_farch_filter_from_gen_spec()
2079 rport = is_full ? gen_spec->rem_port : 0; in efx_farch_filter_from_gen_spec()
2080 host1 = rhost; in efx_farch_filter_from_gen_spec()
2081 host2 = gen_spec->loc_host[0]; in efx_farch_filter_from_gen_spec()
2082 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { in efx_farch_filter_from_gen_spec()
2083 port1 = gen_spec->loc_port; in efx_farch_filter_from_gen_spec()
2087 port2 = gen_spec->loc_port; in efx_farch_filter_from_gen_spec()
2089 spec->data[0] = ntohl(host1) << 16 | ntohs(port1); in efx_farch_filter_from_gen_spec()
2090 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; in efx_farch_filter_from_gen_spec()
2091 spec->data[2] = ntohl(host2); in efx_farch_filter_from_gen_spec()
2100 spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : in efx_farch_filter_from_gen_spec()
2102 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; in efx_farch_filter_from_gen_spec()
2103 spec->data[1] = (gen_spec->loc_mac[2] << 24 | in efx_farch_filter_from_gen_spec()
2104 gen_spec->loc_mac[3] << 16 | in efx_farch_filter_from_gen_spec()
2105 gen_spec->loc_mac[4] << 8 | in efx_farch_filter_from_gen_spec()
2106 gen_spec->loc_mac[5]); in efx_farch_filter_from_gen_spec()
2107 spec->data[2] = (gen_spec->loc_mac[0] << 8 | in efx_farch_filter_from_gen_spec()
2108 gen_spec->loc_mac[1]); in efx_farch_filter_from_gen_spec()
2112 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? in efx_farch_filter_from_gen_spec()
2115 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ in efx_farch_filter_from_gen_spec()
2119 return -EPROTONOSUPPORT; in efx_farch_filter_from_gen_spec()
2137 gen_spec->priority = spec->priority; in efx_farch_filter_to_gen_spec()
2138 gen_spec->flags = spec->flags; in efx_farch_filter_to_gen_spec()
2139 gen_spec->dmaq_id = spec->dmaq_id; in efx_farch_filter_to_gen_spec()
2141 switch (spec->type) { in efx_farch_filter_to_gen_spec()
2148 __be32 host1, host2; in efx_farch_filter_to_gen_spec() local
2151 gen_spec->match_flags = in efx_farch_filter_to_gen_spec()
2156 gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | in efx_farch_filter_to_gen_spec()
2158 gen_spec->ether_type = htons(ETH_P_IP); in efx_farch_filter_to_gen_spec()
2159 gen_spec->ip_proto = in efx_farch_filter_to_gen_spec()
2160 (spec->type == EFX_FARCH_FILTER_TCP_FULL || in efx_farch_filter_to_gen_spec()
2161 spec->type == EFX_FARCH_FILTER_TCP_WILD) ? in efx_farch_filter_to_gen_spec()
2164 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); in efx_farch_filter_to_gen_spec()
2165 port1 = htons(spec->data[0]); in efx_farch_filter_to_gen_spec()
2166 host2 = htonl(spec->data[2]); in efx_farch_filter_to_gen_spec()
2167 port2 = htons(spec->data[1] >> 16); in efx_farch_filter_to_gen_spec()
2168 if (spec->flags & EFX_FILTER_FLAG_TX) { in efx_farch_filter_to_gen_spec()
2169 gen_spec->loc_host[0] = host1; in efx_farch_filter_to_gen_spec()
2170 gen_spec->rem_host[0] = host2; in efx_farch_filter_to_gen_spec()
2172 gen_spec->loc_host[0] = host2; in efx_farch_filter_to_gen_spec()
2173 gen_spec->rem_host[0] = host1; in efx_farch_filter_to_gen_spec()
2175 if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ in efx_farch_filter_to_gen_spec()
2176 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { in efx_farch_filter_to_gen_spec()
2177 gen_spec->loc_port = port1; in efx_farch_filter_to_gen_spec()
2178 gen_spec->rem_port = port2; in efx_farch_filter_to_gen_spec()
2180 gen_spec->loc_port = port2; in efx_farch_filter_to_gen_spec()
2181 gen_spec->rem_port = port1; in efx_farch_filter_to_gen_spec()
2191 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; in efx_farch_filter_to_gen_spec()
2193 gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; in efx_farch_filter_to_gen_spec()
2194 gen_spec->loc_mac[0] = spec->data[2] >> 8; in efx_farch_filter_to_gen_spec()
2195 gen_spec->loc_mac[1] = spec->data[2]; in efx_farch_filter_to_gen_spec()
2196 gen_spec->loc_mac[2] = spec->data[1] >> 24; in efx_farch_filter_to_gen_spec()
2197 gen_spec->loc_mac[3] = spec->data[1] >> 16; in efx_farch_filter_to_gen_spec()
2198 gen_spec->loc_mac[4] = spec->data[1] >> 8; in efx_farch_filter_to_gen_spec()
2199 gen_spec->loc_mac[5] = spec->data[1]; in efx_farch_filter_to_gen_spec()
2200 gen_spec->outer_vid = htons(spec->data[0]); in efx_farch_filter_to_gen_spec()
2205 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; in efx_farch_filter_to_gen_spec()
2206 gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; in efx_farch_filter_to_gen_spec()
2222 spec->priority = EFX_FILTER_PRI_AUTO; in efx_farch_filter_init_rx_auto()
2223 spec->flags = (EFX_FILTER_FLAG_RX | in efx_farch_filter_init_rx_auto()
2225 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); in efx_farch_filter_init_rx_auto()
2226 spec->dmaq_id = 0; in efx_farch_filter_init_rx_auto()
2229 /* Build a filter entry and return its n-tuple key. */
2237 bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || in efx_farch_filter_build()
2238 spec->type == EFX_FARCH_FILTER_UDP_WILD); in efx_farch_filter_build()
2242 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), in efx_farch_filter_build()
2244 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), in efx_farch_filter_build()
2246 FRF_BZ_RXQ_ID, spec->dmaq_id, in efx_farch_filter_build()
2247 EFX_DWORD_2, spec->data[2], in efx_farch_filter_build()
2248 EFX_DWORD_1, spec->data[1], in efx_farch_filter_build()
2249 EFX_DWORD_0, spec->data[0]); in efx_farch_filter_build()
2255 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; in efx_farch_filter_build()
2259 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), in efx_farch_filter_build()
2261 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), in efx_farch_filter_build()
2262 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, in efx_farch_filter_build()
2264 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], in efx_farch_filter_build()
2265 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], in efx_farch_filter_build()
2266 FRF_CZ_RMFT_VLAN_ID, spec->data[0]); in efx_farch_filter_build()
2272 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; in efx_farch_filter_build()
2274 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, in efx_farch_filter_build()
2276 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], in efx_farch_filter_build()
2277 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], in efx_farch_filter_build()
2278 FRF_CZ_TMFT_VLAN_ID, spec->data[0]); in efx_farch_filter_build()
2279 data3 = is_wild | spec->dmaq_id << 1; in efx_farch_filter_build()
2287 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; in efx_farch_filter_build()
2293 if (left->type != right->type || in efx_farch_filter_equal()
2294 memcmp(left->data, right->data, sizeof(left->data))) in efx_farch_filter_equal()
2297 if (left->flags & EFX_FILTER_FLAG_TX && in efx_farch_filter_equal()
2298 left->dmaq_id != right->dmaq_id) in efx_farch_filter_equal()
2310 * accept user-provided IDs.
2337 #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2345 range = efx_farch_filter_type_match_pri[spec->type]; in efx_farch_filter_make_id()
2346 if (!(spec->flags & EFX_FILTER_FLAG_RX)) in efx_farch_filter_make_id()
2370 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_get_rx_id_limit()
2371 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; in efx_farch_filter_get_rx_id_limit()
2376 if (state->table[table_id].size != 0) in efx_farch_filter_get_rx_id_limit()
2378 state->table[table_id].size; in efx_farch_filter_get_rx_id_limit()
2379 } while (range--); in efx_farch_filter_get_rx_id_limit()
2388 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_insert()
2400 down_write(&state->lock); in efx_farch_filter_insert()
2402 table = &state->table[efx_farch_filter_spec_table_id(&spec)]; in efx_farch_filter_insert()
2403 if (table->size == 0) { in efx_farch_filter_insert()
2404 rc = -EINVAL; in efx_farch_filter_insert()
2408 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_filter_insert()
2410 table->search_limit[spec.type]); in efx_farch_filter_insert()
2412 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { in efx_farch_filter_insert()
2416 EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); in efx_farch_filter_insert()
2417 rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; in efx_farch_filter_insert()
2438 unsigned int max_rep_depth = table->search_limit[spec.type]; in efx_farch_filter_insert()
2443 unsigned int i = hash & (table->size - 1); in efx_farch_filter_insert()
2445 ins_index = -1; in efx_farch_filter_insert()
2449 if (!test_bit(i, table->used_bitmap)) { in efx_farch_filter_insert()
2453 &table->spec[i])) { in efx_farch_filter_insert()
2465 rc = -EBUSY; in efx_farch_filter_insert()
2468 rep_index = -1; in efx_farch_filter_insert()
2472 i = (i + incr) & (table->size - 1); in efx_farch_filter_insert()
2482 &table->spec[rep_index]; in efx_farch_filter_insert()
2484 if (spec.priority == saved_spec->priority && !replace_equal) { in efx_farch_filter_insert()
2485 rc = -EEXIST; in efx_farch_filter_insert()
2488 if (spec.priority < saved_spec->priority) { in efx_farch_filter_insert()
2489 rc = -EPERM; in efx_farch_filter_insert()
2492 if (saved_spec->priority == EFX_FILTER_PRI_AUTO || in efx_farch_filter_insert()
2493 saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) in efx_farch_filter_insert()
2499 __set_bit(ins_index, table->used_bitmap); in efx_farch_filter_insert()
2500 ++table->used; in efx_farch_filter_insert()
2502 table->spec[ins_index] = spec; in efx_farch_filter_insert()
2504 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { in efx_farch_filter_insert()
2507 if (table->search_limit[spec.type] < depth) { in efx_farch_filter_insert()
2508 table->search_limit[spec.type] = depth; in efx_farch_filter_insert()
2516 table->offset + table->step * ins_index); in efx_farch_filter_insert()
2526 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_filter_insert()
2532 up_write(&state->lock); in efx_farch_filter_insert()
2543 EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); in efx_farch_filter_table_clear_entry()
2544 BUG_ON(table->offset == 0); /* can't clear MAC default filters */ in efx_farch_filter_table_clear_entry()
2546 __clear_bit(filter_idx, table->used_bitmap); in efx_farch_filter_table_clear_entry()
2547 --table->used; in efx_farch_filter_table_clear_entry()
2548 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); in efx_farch_filter_table_clear_entry()
2550 efx_writeo(efx, &filter, table->offset + table->step * filter_idx); in efx_farch_filter_table_clear_entry()
2555 * unless the table has become completely empty - in in efx_farch_filter_table_clear_entry()
2558 if (unlikely(table->used == 0)) { in efx_farch_filter_table_clear_entry()
2559 memset(table->search_limit, 0, sizeof(table->search_limit)); in efx_farch_filter_table_clear_entry()
2560 if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) in efx_farch_filter_table_clear_entry()
2572 struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; in efx_farch_filter_remove()
2574 if (!test_bit(filter_idx, table->used_bitmap) || in efx_farch_filter_remove()
2575 spec->priority != priority) in efx_farch_filter_remove()
2576 return -ENOENT; in efx_farch_filter_remove()
2578 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { in efx_farch_filter_remove()
2592 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_remove_safe()
2600 return -ENOENT; in efx_farch_filter_remove_safe()
2601 table = &state->table[table_id]; in efx_farch_filter_remove_safe()
2604 if (filter_idx >= table->size) in efx_farch_filter_remove_safe()
2605 return -ENOENT; in efx_farch_filter_remove_safe()
2606 down_write(&state->lock); in efx_farch_filter_remove_safe()
2609 up_write(&state->lock); in efx_farch_filter_remove_safe()
2618 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_get_safe()
2623 int rc = -ENOENT; in efx_farch_filter_get_safe()
2625 down_read(&state->lock); in efx_farch_filter_get_safe()
2630 table = &state->table[table_id]; in efx_farch_filter_get_safe()
2633 if (filter_idx >= table->size) in efx_farch_filter_get_safe()
2635 spec = &table->spec[filter_idx]; in efx_farch_filter_get_safe()
2637 if (test_bit(filter_idx, table->used_bitmap) && in efx_farch_filter_get_safe()
2638 spec->priority == priority) { in efx_farch_filter_get_safe()
2644 up_read(&state->lock); in efx_farch_filter_get_safe()
2653 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_table_clear()
2654 struct efx_farch_filter_table *table = &state->table[table_id]; in efx_farch_filter_table_clear()
2657 down_write(&state->lock); in efx_farch_filter_table_clear()
2658 for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { in efx_farch_filter_table_clear()
2659 if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO) in efx_farch_filter_table_clear()
2663 up_write(&state->lock); in efx_farch_filter_table_clear()
2681 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_count_rx_used()
2687 down_read(&state->lock); in efx_farch_filter_count_rx_used()
2692 table = &state->table[table_id]; in efx_farch_filter_count_rx_used()
2693 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { in efx_farch_filter_count_rx_used()
2694 if (test_bit(filter_idx, table->used_bitmap) && in efx_farch_filter_count_rx_used()
2695 table->spec[filter_idx].priority == priority) in efx_farch_filter_count_rx_used()
2700 up_read(&state->lock); in efx_farch_filter_count_rx_used()
2709 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_get_rx_ids()
2715 down_read(&state->lock); in efx_farch_filter_get_rx_ids()
2720 table = &state->table[table_id]; in efx_farch_filter_get_rx_ids()
2721 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { in efx_farch_filter_get_rx_ids()
2722 if (test_bit(filter_idx, table->used_bitmap) && in efx_farch_filter_get_rx_ids()
2723 table->spec[filter_idx].priority == priority) { in efx_farch_filter_get_rx_ids()
2725 count = -EMSGSIZE; in efx_farch_filter_get_rx_ids()
2729 &table->spec[filter_idx], filter_idx); in efx_farch_filter_get_rx_ids()
2734 up_read(&state->lock); in efx_farch_filter_get_rx_ids()
2742 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_table_restore()
2748 down_write(&state->lock); in efx_farch_filter_table_restore()
2751 table = &state->table[table_id]; in efx_farch_filter_table_restore()
2754 if (table->step == 0) in efx_farch_filter_table_restore()
2757 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { in efx_farch_filter_table_restore()
2758 if (!test_bit(filter_idx, table->used_bitmap)) in efx_farch_filter_table_restore()
2760 efx_farch_filter_build(&filter, &table->spec[filter_idx]); in efx_farch_filter_table_restore()
2762 table->offset + table->step * filter_idx); in efx_farch_filter_table_restore()
2769 up_write(&state->lock); in efx_farch_filter_table_restore()
2774 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_table_remove()
2778 kfree(state->table[table_id].used_bitmap); in efx_farch_filter_table_remove()
2779 vfree(state->table[table_id].spec); in efx_farch_filter_table_remove()
2792 return -ENOMEM; in efx_farch_filter_table_probe()
2793 efx->filter_state = state; in efx_farch_filter_table_probe()
2794 init_rwsem(&state->lock); in efx_farch_filter_table_probe()
2796 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; in efx_farch_filter_table_probe()
2797 table->id = EFX_FARCH_FILTER_TABLE_RX_IP; in efx_farch_filter_table_probe()
2798 table->offset = FR_BZ_RX_FILTER_TBL0; in efx_farch_filter_table_probe()
2799 table->size = FR_BZ_RX_FILTER_TBL0_ROWS; in efx_farch_filter_table_probe()
2800 table->step = FR_BZ_RX_FILTER_TBL0_STEP; in efx_farch_filter_table_probe()
2802 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; in efx_farch_filter_table_probe()
2803 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; in efx_farch_filter_table_probe()
2804 table->offset = FR_CZ_RX_MAC_FILTER_TBL0; in efx_farch_filter_table_probe()
2805 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; in efx_farch_filter_table_probe()
2806 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; in efx_farch_filter_table_probe()
2808 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; in efx_farch_filter_table_probe()
2809 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; in efx_farch_filter_table_probe()
2810 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; in efx_farch_filter_table_probe()
2812 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; in efx_farch_filter_table_probe()
2813 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; in efx_farch_filter_table_probe()
2814 table->offset = FR_CZ_TX_MAC_FILTER_TBL0; in efx_farch_filter_table_probe()
2815 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; in efx_farch_filter_table_probe()
2816 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; in efx_farch_filter_table_probe()
2819 table = &state->table[table_id]; in efx_farch_filter_table_probe()
2820 if (table->size == 0) in efx_farch_filter_table_probe()
2822 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), in efx_farch_filter_table_probe()
2825 if (!table->used_bitmap) in efx_farch_filter_table_probe()
2827 table->spec = vzalloc(array_size(sizeof(*table->spec), in efx_farch_filter_table_probe()
2828 table->size)); in efx_farch_filter_table_probe()
2829 if (!table->spec) in efx_farch_filter_table_probe()
2833 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; in efx_farch_filter_table_probe()
2834 if (table->size) { in efx_farch_filter_table_probe()
2840 spec = &table->spec[i]; in efx_farch_filter_table_probe()
2841 spec->type = EFX_FARCH_FILTER_UC_DEF + i; in efx_farch_filter_table_probe()
2843 __set_bit(i, table->used_bitmap); in efx_farch_filter_table_probe()
2853 return -ENOMEM; in efx_farch_filter_table_probe()
2859 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_update_rx_scatter()
2865 down_write(&state->lock); in efx_farch_filter_update_rx_scatter()
2870 table = &state->table[table_id]; in efx_farch_filter_update_rx_scatter()
2872 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { in efx_farch_filter_update_rx_scatter()
2873 if (!test_bit(filter_idx, table->used_bitmap) || in efx_farch_filter_update_rx_scatter()
2874 table->spec[filter_idx].dmaq_id >= in efx_farch_filter_update_rx_scatter()
2875 efx->n_rx_channels) in efx_farch_filter_update_rx_scatter()
2878 if (efx->rx_scatter) in efx_farch_filter_update_rx_scatter()
2879 table->spec[filter_idx].flags |= in efx_farch_filter_update_rx_scatter()
2882 table->spec[filter_idx].flags &= in efx_farch_filter_update_rx_scatter()
2889 efx_farch_filter_build(&filter, &table->spec[filter_idx]); in efx_farch_filter_update_rx_scatter()
2891 table->offset + table->step * filter_idx); in efx_farch_filter_update_rx_scatter()
2897 up_write(&state->lock); in efx_farch_filter_update_rx_scatter()
2905 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_rfs_expire_one()
2910 down_write(&state->lock); in efx_farch_filter_rfs_expire_one()
2911 spin_lock_bh(&efx->rps_hash_lock); in efx_farch_filter_rfs_expire_one()
2912 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; in efx_farch_filter_rfs_expire_one()
2913 if (test_bit(index, table->used_bitmap) && in efx_farch_filter_rfs_expire_one()
2914 table->spec[index].priority == EFX_FILTER_PRI_HINT) { in efx_farch_filter_rfs_expire_one()
2918 efx_farch_filter_to_gen_spec(&spec, &table->spec[index]); in efx_farch_filter_rfs_expire_one()
2919 if (!efx->rps_hash_table) { in efx_farch_filter_rfs_expire_one()
2930 arfs_id = rule->arfs_id; in efx_farch_filter_rfs_expire_one()
2935 if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id, in efx_farch_filter_rfs_expire_one()
2938 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; in efx_farch_filter_rfs_expire_one()
2945 spin_unlock_bh(&efx->rps_hash_lock); in efx_farch_filter_rfs_expire_one()
2946 up_write(&state->lock); in efx_farch_filter_rfs_expire_one()
2954 struct net_device *net_dev = efx->net_dev; in efx_farch_filter_sync_rx_mode()
2956 union efx_multicast_hash *mc_hash = &efx->multicast_hash; in efx_farch_filter_sync_rx_mode()
2965 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); in efx_farch_filter_sync_rx_mode()
2968 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { in efx_farch_filter_sync_rx_mode()
2973 crc = ether_crc_le(ETH_ALEN, ha->addr); in efx_farch_filter_sync_rx_mode()
2974 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); in efx_farch_filter_sync_rx_mode()