1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Linux network driver for QLogic BR-series Converged Network Adapter. 4 */ 5 /* 6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 7 * Copyright (c) 2014-2015 QLogic Corporation 8 * All rights reserved 9 * www.qlogic.com 10 */ 11 #ifndef __BNA_TYPES_H__ 12 #define __BNA_TYPES_H__ 13 14 #include "cna.h" 15 #include "bna_hw_defs.h" 16 #include "bfa_cee.h" 17 #include "bfa_msgq.h" 18 19 /* Forward declarations */ 20 21 struct bna_mcam_handle; 22 struct bna_txq; 23 struct bna_tx; 24 struct bna_rxq; 25 struct bna_cq; 26 struct bna_rx; 27 struct bna_rxf; 28 struct bna_enet; 29 struct bna; 30 struct bnad; 31 32 /* Enums, primitive data types */ 33 34 enum bna_status { 35 BNA_STATUS_T_DISABLED = 0, 36 BNA_STATUS_T_ENABLED = 1 37 }; 38 39 enum bna_cleanup_type { 40 BNA_HARD_CLEANUP = 0, 41 BNA_SOFT_CLEANUP = 1 42 }; 43 44 enum bna_cb_status { 45 BNA_CB_SUCCESS = 0, 46 BNA_CB_FAIL = 1, 47 BNA_CB_INTERRUPT = 2, 48 BNA_CB_BUSY = 3, 49 BNA_CB_INVALID_MAC = 4, 50 BNA_CB_MCAST_LIST_FULL = 5, 51 BNA_CB_UCAST_CAM_FULL = 6, 52 BNA_CB_WAITING = 7, 53 BNA_CB_NOT_EXEC = 8 54 }; 55 56 enum bna_res_type { 57 BNA_RES_T_MEM = 1, 58 BNA_RES_T_INTR = 2 59 }; 60 61 enum bna_mem_type { 62 BNA_MEM_T_KVA = 1, 63 BNA_MEM_T_DMA = 2 64 }; 65 66 enum bna_intr_type { 67 BNA_INTR_T_INTX = 1, 68 BNA_INTR_T_MSIX = 2 69 }; 70 71 enum bna_res_req_type { 72 BNA_RES_MEM_T_COM = 0, 73 BNA_RES_MEM_T_ATTR = 1, 74 BNA_RES_MEM_T_FWTRC = 2, 75 BNA_RES_MEM_T_STATS = 3, 76 BNA_RES_T_MAX 77 }; 78 79 enum bna_mod_res_req_type { 80 BNA_MOD_RES_MEM_T_TX_ARRAY = 0, 81 BNA_MOD_RES_MEM_T_TXQ_ARRAY = 1, 82 BNA_MOD_RES_MEM_T_RX_ARRAY = 2, 83 BNA_MOD_RES_MEM_T_RXP_ARRAY = 3, 84 BNA_MOD_RES_MEM_T_RXQ_ARRAY = 4, 85 BNA_MOD_RES_MEM_T_UCMAC_ARRAY = 5, 86 BNA_MOD_RES_MEM_T_MCMAC_ARRAY = 6, 87 BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY = 7, 88 BNA_MOD_RES_T_MAX 89 }; 90 91 enum bna_tx_res_req_type { 92 BNA_TX_RES_MEM_T_TCB = 0, 93 BNA_TX_RES_MEM_T_UNMAPQ = 1, 94 BNA_TX_RES_MEM_T_QPT = 2, 95 BNA_TX_RES_MEM_T_SWQPT = 3, 96 BNA_TX_RES_MEM_T_PAGE = 4, 97 BNA_TX_RES_MEM_T_IBIDX = 5, 98 BNA_TX_RES_INTR_T_TXCMPL = 6, 99 BNA_TX_RES_T_MAX, 100 }; 101 102 enum bna_rx_mem_type { 103 BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */ 104 BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */ 105 BNA_RX_RES_MEM_T_UNMAPHQ = 2, 106 BNA_RX_RES_MEM_T_UNMAPDQ = 3, 107 BNA_RX_RES_MEM_T_CQPT = 4, 108 BNA_RX_RES_MEM_T_CSWQPT = 5, 109 BNA_RX_RES_MEM_T_CQPT_PAGE = 6, 110 BNA_RX_RES_MEM_T_HQPT = 7, 111 BNA_RX_RES_MEM_T_DQPT = 8, 112 BNA_RX_RES_MEM_T_HSWQPT = 9, 113 BNA_RX_RES_MEM_T_DSWQPT = 10, 114 BNA_RX_RES_MEM_T_DPAGE = 11, 115 BNA_RX_RES_MEM_T_HPAGE = 12, 116 BNA_RX_RES_MEM_T_IBIDX = 13, 117 BNA_RX_RES_MEM_T_RIT = 14, 118 BNA_RX_RES_T_INTR = 15, 119 BNA_RX_RES_T_MAX = 16 120 }; 121 122 enum bna_tx_type { 123 BNA_TX_T_REGULAR = 0, 124 BNA_TX_T_LOOPBACK = 1, 125 }; 126 127 enum bna_tx_flags { 128 BNA_TX_F_ENET_STARTED = 1, 129 BNA_TX_F_ENABLED = 2, 130 BNA_TX_F_BW_UPDATED = 8, 131 }; 132 133 enum bna_tx_mod_flags { 134 BNA_TX_MOD_F_ENET_STARTED = 1, 135 BNA_TX_MOD_F_ENET_LOOPBACK = 2, 136 }; 137 138 enum bna_rx_type { 139 BNA_RX_T_REGULAR = 0, 140 BNA_RX_T_LOOPBACK = 1, 141 }; 142 143 enum bna_rxp_type { 144 BNA_RXP_SINGLE = 1, 145 BNA_RXP_SLR = 2, 146 BNA_RXP_HDS = 3 147 }; 148 149 enum bna_rxmode { 150 BNA_RXMODE_PROMISC = 1, 151 BNA_RXMODE_DEFAULT = 2, 152 BNA_RXMODE_ALLMULTI = 4 153 }; 154 155 enum bna_rx_event { 156 RX_E_START = 1, 157 RX_E_STOP = 2, 158 RX_E_FAIL = 3, 159 RX_E_STARTED = 4, 160 RX_E_STOPPED = 5, 161 RX_E_RXF_STARTED = 6, 162 RX_E_RXF_STOPPED = 7, 163 RX_E_CLEANUP_DONE = 8, 164 }; 165 166 enum bna_rx_flags { 167 BNA_RX_F_ENET_STARTED = 1, 168 BNA_RX_F_ENABLED = 2, 169 }; 170 171 enum bna_rx_mod_flags { 172 BNA_RX_MOD_F_ENET_STARTED = 1, 173 BNA_RX_MOD_F_ENET_LOOPBACK = 2, 174 }; 175 176 enum bna_rxf_event { 177 RXF_E_START = 1, 178 RXF_E_STOP = 2, 179 RXF_E_FAIL = 3, 180 RXF_E_CONFIG = 4, 181 RXF_E_FW_RESP = 7, 182 }; 183 184 enum bna_enet_type { 185 BNA_ENET_T_REGULAR = 0, 186 BNA_ENET_T_LOOPBACK_INTERNAL = 1, 187 BNA_ENET_T_LOOPBACK_EXTERNAL = 2, 188 }; 189 190 enum bna_link_status { 191 BNA_LINK_DOWN = 0, 192 BNA_LINK_UP = 1, 193 BNA_CEE_UP = 2 194 }; 195 196 enum bna_ethport_flags { 197 BNA_ETHPORT_F_ADMIN_UP = 1, 198 BNA_ETHPORT_F_PORT_ENABLED = 2, 199 BNA_ETHPORT_F_RX_STARTED = 4, 200 }; 201 202 enum bna_enet_flags { 203 BNA_ENET_F_IOCETH_READY = 1, 204 BNA_ENET_F_ENABLED = 2, 205 BNA_ENET_F_PAUSE_CHANGED = 4, 206 BNA_ENET_F_MTU_CHANGED = 8 207 }; 208 209 enum bna_rss_flags { 210 BNA_RSS_F_RIT_PENDING = 1, 211 BNA_RSS_F_CFG_PENDING = 2, 212 BNA_RSS_F_STATUS_PENDING = 4, 213 }; 214 215 enum bna_mod_flags { 216 BNA_MOD_F_INIT_DONE = 1, 217 }; 218 219 enum bna_pkt_rates { 220 BNA_PKT_RATE_10K = 10000, 221 BNA_PKT_RATE_20K = 20000, 222 BNA_PKT_RATE_30K = 30000, 223 BNA_PKT_RATE_40K = 40000, 224 BNA_PKT_RATE_50K = 50000, 225 BNA_PKT_RATE_60K = 60000, 226 BNA_PKT_RATE_70K = 70000, 227 BNA_PKT_RATE_80K = 80000, 228 }; 229 230 enum bna_dim_load_types { 231 BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */ 232 BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */ 233 BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */ 234 BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */ 235 BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */ 236 BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */ 237 BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */ 238 BNA_LOAD_T_LOW_4 = 7, /* r < 10K */ 239 BNA_LOAD_T_MAX = 8 240 }; 241 242 enum bna_dim_bias_types { 243 BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */ 244 BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */ 245 BNA_BIAS_T_MAX = 2 246 }; 247 248 #define BNA_MAX_NAME_SIZE 64 249 struct bna_ident { 250 int id; 251 char name[BNA_MAX_NAME_SIZE]; 252 }; 253 254 struct bna_mac { 255 /* This should be the first one */ 256 struct list_head qe; 257 u8 addr[ETH_ALEN]; 258 struct bna_mcam_handle *handle; 259 }; 260 261 struct bna_mem_descr { 262 u32 len; 263 void *kva; 264 struct bna_dma_addr dma; 265 }; 266 267 struct bna_mem_info { 268 enum bna_mem_type mem_type; 269 u32 len; 270 u32 num; 271 u32 align_sz; /* 0/1 = no alignment */ 272 struct bna_mem_descr *mdl; 273 void *cookie; /* For bnad to unmap dma later */ 274 }; 275 276 struct bna_intr_descr { 277 int vector; 278 }; 279 280 struct bna_intr_info { 281 enum bna_intr_type intr_type; 282 int num; 283 struct bna_intr_descr *idl; 284 }; 285 286 union bna_res_u { 287 struct bna_mem_info mem_info; 288 struct bna_intr_info intr_info; 289 }; 290 291 struct bna_res_info { 292 enum bna_res_type res_type; 293 union bna_res_u res_u; 294 }; 295 296 /* HW QPT */ 297 struct bna_qpt { 298 struct bna_dma_addr hw_qpt_ptr; 299 void *kv_qpt_ptr; 300 u32 page_count; 301 u32 page_size; 302 }; 303 304 struct bna_attr { 305 bool fw_query_complete; 306 int num_txq; 307 int num_rxp; 308 int num_ucmac; 309 int num_mcmac; 310 int max_rit_size; 311 }; 312 313 /* IOCEth */ 314 315 struct bna_ioceth { 316 bfa_fsm_t fsm; 317 struct bfa_ioc ioc; 318 319 struct bna_attr attr; 320 struct bfa_msgq_cmd_entry msgq_cmd; 321 struct bfi_enet_attr_req attr_req; 322 323 void (*stop_cbfn)(struct bnad *bnad); 324 struct bnad *stop_cbarg; 325 326 struct bna *bna; 327 }; 328 329 /* Enet */ 330 331 /* Pause configuration */ 332 struct bna_pause_config { 333 enum bna_status tx_pause; 334 enum bna_status rx_pause; 335 }; 336 337 struct bna_enet { 338 bfa_fsm_t fsm; 339 enum bna_enet_flags flags; 340 341 enum bna_enet_type type; 342 343 struct bna_pause_config pause_config; 344 int mtu; 345 346 /* Callback for bna_enet_disable(), enet_stop() */ 347 void (*stop_cbfn)(void *); 348 void *stop_cbarg; 349 350 /* Callback for bna_enet_mtu_set() */ 351 void (*mtu_cbfn)(struct bnad *); 352 353 struct bfa_wc chld_stop_wc; 354 355 struct bfa_msgq_cmd_entry msgq_cmd; 356 struct bfi_enet_set_pause_req pause_req; 357 358 struct bna *bna; 359 }; 360 361 /* Ethport */ 362 363 struct bna_ethport { 364 bfa_fsm_t fsm; 365 enum bna_ethport_flags flags; 366 367 enum bna_link_status link_status; 368 369 int rx_started_count; 370 371 void (*stop_cbfn)(struct bna_enet *); 372 373 void (*adminup_cbfn)(struct bnad *, enum bna_cb_status); 374 375 void (*link_cbfn)(struct bnad *, enum bna_link_status); 376 377 struct bfa_msgq_cmd_entry msgq_cmd; 378 union { 379 struct bfi_enet_enable_req admin_req; 380 struct bfi_enet_diag_lb_req lpbk_req; 381 } bfi_enet_cmd; 382 383 struct bna *bna; 384 }; 385 386 /* Interrupt Block */ 387 388 /* Doorbell structure */ 389 struct bna_ib_dbell { 390 void __iomem *doorbell_addr; 391 u32 doorbell_ack; 392 }; 393 394 /* IB structure */ 395 struct bna_ib { 396 struct bna_dma_addr ib_seg_host_addr; 397 void *ib_seg_host_addr_kva; 398 399 struct bna_ib_dbell door_bell; 400 401 enum bna_intr_type intr_type; 402 int intr_vector; 403 404 u8 coalescing_timeo; /* Unit is 5usec. */ 405 406 int interpkt_count; 407 int interpkt_timeo; 408 }; 409 410 /* Tx object */ 411 412 /* Tx datapath control structure */ 413 #define BNA_Q_NAME_SIZE 16 414 struct bna_tcb { 415 /* Fast path */ 416 void **sw_qpt; 417 void *sw_q; 418 void *unmap_q; 419 u32 producer_index; 420 u32 consumer_index; 421 volatile u32 *hw_consumer_index; 422 u32 q_depth; 423 void __iomem *q_dbell; 424 struct bna_ib_dbell *i_dbell; 425 /* Control path */ 426 struct bna_txq *txq; 427 struct bnad *bnad; 428 void *priv; /* BNAD's cookie */ 429 enum bna_intr_type intr_type; 430 int intr_vector; 431 u8 priority; /* Current priority */ 432 unsigned long flags; /* Used by bnad as required */ 433 int id; 434 char name[BNA_Q_NAME_SIZE]; 435 }; 436 437 /* TxQ QPT and configuration */ 438 struct bna_txq { 439 /* This should be the first one */ 440 struct list_head qe; 441 442 u8 priority; 443 444 struct bna_qpt qpt; 445 struct bna_tcb *tcb; 446 struct bna_ib ib; 447 448 struct bna_tx *tx; 449 450 int hw_id; 451 452 u64 tx_packets; 453 u64 tx_bytes; 454 }; 455 456 /* Tx object */ 457 struct bna_tx { 458 /* This should be the first one */ 459 struct list_head qe; 460 int rid; 461 int hw_id; 462 463 bfa_fsm_t fsm; 464 enum bna_tx_flags flags; 465 466 enum bna_tx_type type; 467 int num_txq; 468 469 struct list_head txq_q; 470 u16 txf_vlan_id; 471 472 /* Tx event handlers */ 473 void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *); 474 void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *); 475 void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *); 476 void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *); 477 void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *); 478 479 /* callback for bna_tx_disable(), bna_tx_stop() */ 480 void (*stop_cbfn)(void *arg, struct bna_tx *tx); 481 void *stop_cbarg; 482 483 struct bfa_msgq_cmd_entry msgq_cmd; 484 union { 485 struct bfi_enet_tx_cfg_req cfg_req; 486 struct bfi_enet_req req; 487 struct bfi_enet_tx_cfg_rsp cfg_rsp; 488 } bfi_enet_cmd; 489 490 struct bna *bna; 491 void *priv; /* bnad's cookie */ 492 }; 493 494 /* Tx object configuration used during creation */ 495 struct bna_tx_config { 496 int num_txq; 497 int txq_depth; 498 int coalescing_timeo; 499 enum bna_tx_type tx_type; 500 }; 501 502 struct bna_tx_event_cbfn { 503 /* Optional */ 504 void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *); 505 void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *); 506 /* Mandatory */ 507 void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *); 508 void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *); 509 void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *); 510 }; 511 512 /* Tx module - keeps track of free, active tx objects */ 513 struct bna_tx_mod { 514 struct bna_tx *tx; /* BFI_MAX_TXQ entries */ 515 struct bna_txq *txq; /* BFI_MAX_TXQ entries */ 516 517 struct list_head tx_free_q; 518 struct list_head tx_active_q; 519 520 struct list_head txq_free_q; 521 522 /* callback for bna_tx_mod_stop() */ 523 void (*stop_cbfn)(struct bna_enet *enet); 524 525 struct bfa_wc tx_stop_wc; 526 527 enum bna_tx_mod_flags flags; 528 529 u8 prio_map; 530 int default_prio; 531 int iscsi_over_cee; 532 int iscsi_prio; 533 int prio_reconfigured; 534 535 u32 rid_mask; 536 537 struct bna *bna; 538 }; 539 540 /* Rx object */ 541 542 /* Rx datapath control structure */ 543 struct bna_rcb { 544 /* Fast path */ 545 void **sw_qpt; 546 void *sw_q; 547 void *unmap_q; 548 u32 producer_index; 549 u32 consumer_index; 550 u32 q_depth; 551 void __iomem *q_dbell; 552 /* Control path */ 553 struct bna_rxq *rxq; 554 struct bna_ccb *ccb; 555 struct bnad *bnad; 556 void *priv; /* BNAD's cookie */ 557 unsigned long flags; 558 int id; 559 }; 560 561 /* RxQ structure - QPT, configuration */ 562 struct bna_rxq { 563 struct list_head qe; 564 565 int buffer_size; 566 int q_depth; 567 u32 num_vecs; 568 enum bna_status multi_buffer; 569 570 struct bna_qpt qpt; 571 struct bna_rcb *rcb; 572 573 struct bna_rxp *rxp; 574 struct bna_rx *rx; 575 576 int hw_id; 577 578 u64 rx_packets; 579 u64 rx_bytes; 580 u64 rx_packets_with_error; 581 u64 rxbuf_alloc_failed; 582 u64 rxbuf_map_failed; 583 }; 584 585 /* RxQ pair */ 586 union bna_rxq_u { 587 struct { 588 struct bna_rxq *hdr; 589 struct bna_rxq *data; 590 } hds; 591 struct { 592 struct bna_rxq *small; 593 struct bna_rxq *large; 594 } slr; 595 struct { 596 struct bna_rxq *only; 597 struct bna_rxq *reserved; 598 } single; 599 }; 600 601 /* Packet rate for Dynamic Interrupt Moderation */ 602 struct bna_pkt_rate { 603 u32 small_pkt_cnt; 604 u32 large_pkt_cnt; 605 }; 606 607 /* Completion control structure */ 608 struct bna_ccb { 609 /* Fast path */ 610 void **sw_qpt; 611 void *sw_q; 612 u32 producer_index; 613 volatile u32 *hw_producer_index; 614 u32 q_depth; 615 struct bna_ib_dbell *i_dbell; 616 struct bna_rcb *rcb[2]; 617 void *ctrl; /* For bnad */ 618 struct bna_pkt_rate pkt_rate; 619 u32 pkts_una; 620 u32 bytes_per_intr; 621 622 /* Control path */ 623 struct bna_cq *cq; 624 struct bnad *bnad; 625 void *priv; /* BNAD's cookie */ 626 enum bna_intr_type intr_type; 627 int intr_vector; 628 u8 rx_coalescing_timeo; /* For NAPI */ 629 int id; 630 char name[BNA_Q_NAME_SIZE]; 631 }; 632 633 /* CQ QPT, configuration */ 634 struct bna_cq { 635 struct bna_qpt qpt; 636 struct bna_ccb *ccb; 637 638 struct bna_ib ib; 639 640 struct bna_rx *rx; 641 }; 642 643 struct bna_rss_config { 644 enum bfi_enet_rss_type hash_type; 645 u8 hash_mask; 646 u32 toeplitz_hash_key[BFI_ENET_RSS_KEY_LEN]; 647 }; 648 649 struct bna_hds_config { 650 enum bfi_enet_hds_type hdr_type; 651 int forced_offset; 652 }; 653 654 /* Rx object configuration used during creation */ 655 struct bna_rx_config { 656 enum bna_rx_type rx_type; 657 int num_paths; 658 enum bna_rxp_type rxp_type; 659 int coalescing_timeo; 660 /* 661 * Small/Large (or Header/Data) buffer size to be configured 662 * for SLR and HDS queue type. 663 */ 664 u32 frame_size; 665 666 /* header or small queue */ 667 u32 q1_depth; 668 u32 q1_buf_size; 669 670 /* data or large queue */ 671 u32 q0_depth; 672 u32 q0_buf_size; 673 u32 q0_num_vecs; 674 enum bna_status q0_multi_buf; 675 676 enum bna_status rss_status; 677 struct bna_rss_config rss_config; 678 679 struct bna_hds_config hds_config; 680 681 enum bna_status vlan_strip_status; 682 }; 683 684 /* Rx Path structure - one per MSIX vector/CPU */ 685 struct bna_rxp { 686 /* This should be the first one */ 687 struct list_head qe; 688 689 enum bna_rxp_type type; 690 union bna_rxq_u rxq; 691 struct bna_cq cq; 692 693 struct bna_rx *rx; 694 695 /* MSI-x vector number for configuring RSS */ 696 int vector; 697 int hw_id; 698 }; 699 700 /* RxF structure (hardware Rx Function) */ 701 struct bna_rxf { 702 bfa_fsm_t fsm; 703 704 struct bfa_msgq_cmd_entry msgq_cmd; 705 union { 706 struct bfi_enet_enable_req req; 707 struct bfi_enet_rss_cfg_req rss_req; 708 struct bfi_enet_rit_req rit_req; 709 struct bfi_enet_rx_vlan_req vlan_req; 710 struct bfi_enet_mcast_add_req mcast_add_req; 711 struct bfi_enet_mcast_del_req mcast_del_req; 712 struct bfi_enet_ucast_req ucast_req; 713 } bfi_enet_cmd; 714 715 /* callback for bna_rxf_start() */ 716 void (*start_cbfn) (struct bna_rx *rx); 717 struct bna_rx *start_cbarg; 718 719 /* callback for bna_rxf_stop() */ 720 void (*stop_cbfn) (struct bna_rx *rx); 721 struct bna_rx *stop_cbarg; 722 723 /** 724 * callback for: 725 * bna_rxf_ucast_set() 726 * bna_rxf_{ucast/mcast}_add(), 727 * bna_rxf_{ucast/mcast}_del(), 728 * bna_rxf_mode_set() 729 */ 730 void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx); 731 struct bnad *cam_fltr_cbarg; 732 733 /* List of unicast addresses yet to be applied to h/w */ 734 struct list_head ucast_pending_add_q; 735 struct list_head ucast_pending_del_q; 736 struct bna_mac *ucast_pending_mac; 737 int ucast_pending_set; 738 /* ucast addresses applied to the h/w */ 739 struct list_head ucast_active_q; 740 struct bna_mac ucast_active_mac; 741 int ucast_active_set; 742 743 /* List of multicast addresses yet to be applied to h/w */ 744 struct list_head mcast_pending_add_q; 745 struct list_head mcast_pending_del_q; 746 /* multicast addresses applied to the h/w */ 747 struct list_head mcast_active_q; 748 struct list_head mcast_handle_q; 749 750 /* Rx modes yet to be applied to h/w */ 751 enum bna_rxmode rxmode_pending; 752 enum bna_rxmode rxmode_pending_bitmask; 753 /* Rx modes applied to h/w */ 754 enum bna_rxmode rxmode_active; 755 756 u8 vlan_pending_bitmask; 757 enum bna_status vlan_filter_status; 758 u32 vlan_filter_table[(BFI_ENET_VLAN_ID_MAX) / 32]; 759 bool vlan_strip_pending; 760 enum bna_status vlan_strip_status; 761 762 enum bna_rss_flags rss_pending; 763 enum bna_status rss_status; 764 struct bna_rss_config rss_cfg; 765 u8 *rit; 766 int rit_size; 767 768 struct bna_rx *rx; 769 }; 770 771 /* Rx object */ 772 struct bna_rx { 773 /* This should be the first one */ 774 struct list_head qe; 775 int rid; 776 int hw_id; 777 778 bfa_fsm_t fsm; 779 780 enum bna_rx_type type; 781 782 int num_paths; 783 struct list_head rxp_q; 784 785 struct bna_hds_config hds_cfg; 786 787 struct bna_rxf rxf; 788 789 enum bna_rx_flags rx_flags; 790 791 struct bfa_msgq_cmd_entry msgq_cmd; 792 union { 793 struct bfi_enet_rx_cfg_req cfg_req; 794 struct bfi_enet_req req; 795 struct bfi_enet_rx_cfg_rsp cfg_rsp; 796 } bfi_enet_cmd; 797 798 /* Rx event handlers */ 799 void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *); 800 void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *); 801 void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); 802 void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); 803 void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *); 804 void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *); 805 void (*rx_post_cbfn)(struct bnad *, struct bna_rx *); 806 807 /* callback for bna_rx_disable(), bna_rx_stop() */ 808 void (*stop_cbfn)(void *arg, struct bna_rx *rx); 809 void *stop_cbarg; 810 811 struct bna *bna; 812 void *priv; /* bnad's cookie */ 813 }; 814 815 struct bna_rx_event_cbfn { 816 /* Optional */ 817 void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *); 818 void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *); 819 void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); 820 void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); 821 void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *); 822 /* Mandatory */ 823 void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *); 824 void (*rx_post_cbfn)(struct bnad *, struct bna_rx *); 825 }; 826 827 /* Rx module - keeps track of free, active rx objects */ 828 struct bna_rx_mod { 829 struct bna *bna; /* back pointer to parent */ 830 struct bna_rx *rx; /* BFI_MAX_RXQ entries */ 831 struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */ 832 struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */ 833 834 struct list_head rx_free_q; 835 struct list_head rx_active_q; 836 int rx_free_count; 837 838 struct list_head rxp_free_q; 839 int rxp_free_count; 840 841 struct list_head rxq_free_q; 842 int rxq_free_count; 843 844 enum bna_rx_mod_flags flags; 845 846 /* callback for bna_rx_mod_stop() */ 847 void (*stop_cbfn)(struct bna_enet *enet); 848 849 struct bfa_wc rx_stop_wc; 850 u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX]; 851 u32 rid_mask; 852 }; 853 854 /* CAM */ 855 856 struct bna_ucam_mod { 857 struct bna_mac *ucmac; /* num_ucmac * 2 entries */ 858 struct list_head free_q; 859 struct list_head del_q; 860 861 struct bna *bna; 862 }; 863 864 struct bna_mcam_handle { 865 /* This should be the first one */ 866 struct list_head qe; 867 int handle; 868 int refcnt; 869 }; 870 871 struct bna_mcam_mod { 872 struct bna_mac *mcmac; /* num_mcmac * 2 entries */ 873 struct bna_mcam_handle *mchandle; /* num_mcmac entries */ 874 struct list_head free_q; 875 struct list_head del_q; 876 struct list_head free_handle_q; 877 878 struct bna *bna; 879 }; 880 881 /* Statistics */ 882 883 struct bna_stats { 884 struct bna_dma_addr hw_stats_dma; 885 struct bfi_enet_stats *hw_stats_kva; 886 struct bfi_enet_stats hw_stats; 887 }; 888 889 struct bna_stats_mod { 890 bool ioc_ready; 891 bool stats_get_busy; 892 bool stats_clr_busy; 893 struct bfa_msgq_cmd_entry stats_get_cmd; 894 struct bfa_msgq_cmd_entry stats_clr_cmd; 895 struct bfi_enet_stats_req stats_get; 896 struct bfi_enet_stats_req stats_clr; 897 }; 898 899 /* BNA */ 900 901 struct bna { 902 struct bna_ident ident; 903 struct bfa_pcidev pcidev; 904 905 struct bna_reg regs; 906 struct bna_bit_defn bits; 907 908 struct bna_stats stats; 909 910 struct bna_ioceth ioceth; 911 struct bfa_cee cee; 912 struct bfa_flash flash; 913 struct bfa_msgq msgq; 914 915 struct bna_ethport ethport; 916 struct bna_enet enet; 917 struct bna_stats_mod stats_mod; 918 919 struct bna_tx_mod tx_mod; 920 struct bna_rx_mod rx_mod; 921 struct bna_ucam_mod ucam_mod; 922 struct bna_mcam_mod mcam_mod; 923 924 enum bna_mod_flags mod_flags; 925 926 int default_mode_rid; 927 int promisc_rid; 928 929 struct bnad *bnad; 930 }; 931 #endif /* __BNA_TYPES_H__ */ 932