1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /**************************************************************************/ 3 /* */ 4 /* IBM System i and System p Virtual NIC Device Driver */ 5 /* Copyright (C) 2014 IBM Corp. */ 6 /* Santiago Leon (santi_leon@yahoo.com) */ 7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 8 /* John Allen (jallen@linux.vnet.ibm.com) */ 9 /* */ 10 /* */ 11 /* This module contains the implementation of a virtual ethernet device */ 12 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ 13 /* option of the RS/6000 Platform Architecture to interface with virtual */ 14 /* ethernet NICs that are presented to the partition by the hypervisor. */ 15 /* */ 16 /**************************************************************************/ 17 18 #define IBMVNIC_NAME "ibmvnic" 19 #define IBMVNIC_DRIVER_VERSION "1.0.1" 20 #define IBMVNIC_INVALID_MAP -1 21 #define IBMVNIC_STATS_TIMEOUT 1 22 #define IBMVNIC_INIT_FAILED 2 23 #define IBMVNIC_OPEN_FAILED 3 24 25 /* basic structures plus 100 2k buffers */ 26 #define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305 27 28 /* Initial module_parameters */ 29 #define IBMVNIC_RX_WEIGHT 16 30 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */ 31 #define IBMVNIC_BUFFS_PER_POOL 100 32 #define IBMVNIC_MAX_QUEUES 16 33 #define IBMVNIC_MAX_QUEUE_SZ 4096 34 #define IBMVNIC_MAX_IND_DESCS 16 35 #define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32) 36 37 #define IBMVNIC_TSO_BUF_SZ 65536 38 #define IBMVNIC_TSO_BUFS 64 39 #define IBMVNIC_TSO_POOL_MASK 0x80000000 40 41 #define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE) 42 #define IBMVNIC_BUFFER_HLEN 500 43 44 #define IBMVNIC_RESET_DELAY 100 45 46 struct ibmvnic_login_buffer { 47 __be32 len; 48 __be32 version; 49 #define INITIAL_VERSION_LB 1 50 __be32 num_txcomp_subcrqs; 51 __be32 off_txcomp_subcrqs; 52 __be32 num_rxcomp_subcrqs; 53 __be32 off_rxcomp_subcrqs; 54 __be32 login_rsp_ioba; 55 __be32 login_rsp_len; 56 __be32 client_data_offset; 57 __be32 client_data_len; 58 } __packed __aligned(8); 59 60 struct ibmvnic_login_rsp_buffer { 61 __be32 len; 62 __be32 version; 63 #define INITIAL_VERSION_LRB 1 64 __be32 num_txsubm_subcrqs; 65 __be32 off_txsubm_subcrqs; 66 __be32 num_rxadd_subcrqs; 67 __be32 off_rxadd_subcrqs; 68 __be32 off_rxadd_buff_size; 69 __be32 num_supp_tx_desc; 70 __be32 off_supp_tx_desc; 71 } __packed __aligned(8); 72 73 struct ibmvnic_query_ip_offload_buffer { 74 __be32 len; 75 __be32 version; 76 #define INITIAL_VERSION_IOB 1 77 u8 ipv4_chksum; 78 u8 ipv6_chksum; 79 u8 tcp_ipv4_chksum; 80 u8 tcp_ipv6_chksum; 81 u8 udp_ipv4_chksum; 82 u8 udp_ipv6_chksum; 83 u8 large_tx_ipv4; 84 u8 large_tx_ipv6; 85 u8 large_rx_ipv4; 86 u8 large_rx_ipv6; 87 u8 reserved1[14]; 88 __be16 max_ipv4_header_size; 89 __be16 max_ipv6_header_size; 90 __be16 max_tcp_header_size; 91 __be16 max_udp_header_size; 92 __be32 max_large_tx_size; 93 __be32 max_large_rx_size; 94 u8 reserved2[16]; 95 u8 ipv6_extension_header; 96 #define IPV6_EH_NOT_SUPPORTED 0x00 97 #define IPV6_EH_SUPPORTED_LIM 0x01 98 #define IPV6_EH_SUPPORTED 0xFF 99 u8 tcp_pseudosum_req; 100 #define TCP_PS_NOT_REQUIRED 0x00 101 #define TCP_PS_REQUIRED 0x01 102 u8 reserved3[30]; 103 __be16 num_ipv6_ext_headers; 104 __be32 off_ipv6_ext_headers; 105 u8 reserved4[154]; 106 } __packed __aligned(8); 107 108 struct ibmvnic_control_ip_offload_buffer { 109 __be32 len; 110 __be32 version; 111 #define INITIAL_VERSION_IOB 1 112 u8 ipv4_chksum; 113 u8 ipv6_chksum; 114 u8 tcp_ipv4_chksum; 115 u8 tcp_ipv6_chksum; 116 u8 udp_ipv4_chksum; 117 u8 udp_ipv6_chksum; 118 u8 large_tx_ipv4; 119 u8 large_tx_ipv6; 120 u8 bad_packet_rx; 121 u8 large_rx_ipv4; 122 u8 large_rx_ipv6; 123 u8 reserved4[111]; 124 } __packed __aligned(8); 125 126 struct ibmvnic_fw_component { 127 u8 name[48]; 128 __be32 trace_buff_size; 129 u8 correlator; 130 u8 trace_level; 131 u8 parent_correlator; 132 u8 error_check_level; 133 u8 trace_on; 134 u8 reserved[7]; 135 u8 description[192]; 136 } __packed __aligned(8); 137 138 struct ibmvnic_fw_trace_entry { 139 __be32 trace_id; 140 u8 num_valid_data; 141 u8 reserved[3]; 142 __be64 pmc_registers; 143 __be64 timebase; 144 __be64 trace_data[5]; 145 } __packed __aligned(8); 146 147 struct ibmvnic_statistics { 148 __be32 version; 149 __be32 promiscuous; 150 __be64 rx_packets; 151 __be64 rx_bytes; 152 __be64 tx_packets; 153 __be64 tx_bytes; 154 __be64 ucast_tx_packets; 155 __be64 ucast_rx_packets; 156 __be64 mcast_tx_packets; 157 __be64 mcast_rx_packets; 158 __be64 bcast_tx_packets; 159 __be64 bcast_rx_packets; 160 __be64 align_errors; 161 __be64 fcs_errors; 162 __be64 single_collision_frames; 163 __be64 multi_collision_frames; 164 __be64 sqe_test_errors; 165 __be64 deferred_tx; 166 __be64 late_collisions; 167 __be64 excess_collisions; 168 __be64 internal_mac_tx_errors; 169 __be64 carrier_sense; 170 __be64 too_long_frames; 171 __be64 internal_mac_rx_errors; 172 u8 reserved[72]; 173 } __packed __aligned(8); 174 175 #define NUM_TX_STATS 3 176 struct ibmvnic_tx_queue_stats { 177 u64 packets; 178 u64 bytes; 179 u64 dropped_packets; 180 }; 181 182 #define NUM_RX_STATS 3 183 struct ibmvnic_rx_queue_stats { 184 u64 packets; 185 u64 bytes; 186 u64 interrupts; 187 }; 188 189 struct ibmvnic_acl_buffer { 190 __be32 len; 191 __be32 version; 192 #define INITIAL_VERSION_IOB 1 193 u8 mac_acls_restrict; 194 u8 vlan_acls_restrict; 195 u8 reserved1[22]; 196 __be32 num_mac_addrs; 197 __be32 offset_mac_addrs; 198 __be32 num_vlan_ids; 199 __be32 offset_vlan_ids; 200 u8 reserved2[80]; 201 } __packed __aligned(8); 202 203 /* descriptors have been changed, how should this be defined? 1? 4? */ 204 205 #define IBMVNIC_TX_DESC_VERSIONS 3 206 207 /* is this still needed? */ 208 struct ibmvnic_tx_comp_desc { 209 u8 first; 210 u8 num_comps; 211 __be16 rcs[5]; 212 __be32 correlators[5]; 213 } __packed __aligned(8); 214 215 /* some flags that included in v0 descriptor, which is gone 216 * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM 217 * and only in some offload_flags variable that doesn't seem 218 * to be used anywhere, can probably be removed? 219 */ 220 221 #define IBMVNIC_TCP_CHKSUM 0x20 222 #define IBMVNIC_UDP_CHKSUM 0x08 223 224 struct ibmvnic_tx_desc { 225 u8 first; 226 u8 type; 227 228 #define IBMVNIC_TX_DESC 0x10 229 u8 n_crq_elem; 230 u8 n_sge; 231 u8 flags1; 232 #define IBMVNIC_TX_COMP_NEEDED 0x80 233 #define IBMVNIC_TX_CHKSUM_OFFLOAD 0x40 234 #define IBMVNIC_TX_LSO 0x20 235 #define IBMVNIC_TX_PROT_TCP 0x10 236 #define IBMVNIC_TX_PROT_UDP 0x08 237 #define IBMVNIC_TX_PROT_IPV4 0x04 238 #define IBMVNIC_TX_PROT_IPV6 0x02 239 #define IBMVNIC_TX_VLAN_PRESENT 0x01 240 u8 flags2; 241 #define IBMVNIC_TX_VLAN_INSERT 0x80 242 __be16 mss; 243 u8 reserved[4]; 244 __be32 correlator; 245 __be16 vlan_id; 246 __be16 dma_reg; 247 __be32 sge_len; 248 __be64 ioba; 249 } __packed __aligned(8); 250 251 struct ibmvnic_hdr_desc { 252 u8 first; 253 u8 type; 254 #define IBMVNIC_HDR_DESC 0x11 255 u8 len; 256 u8 l2_len; 257 __be16 l3_len; 258 u8 l4_len; 259 u8 flag; 260 u8 data[24]; 261 } __packed __aligned(8); 262 263 struct ibmvnic_hdr_ext_desc { 264 u8 first; 265 u8 type; 266 #define IBMVNIC_HDR_EXT_DESC 0x12 267 u8 len; 268 u8 data[29]; 269 } __packed __aligned(8); 270 271 struct ibmvnic_sge_desc { 272 u8 first; 273 u8 type; 274 #define IBMVNIC_SGE_DESC 0x30 275 __be16 sge1_dma_reg; 276 __be32 sge1_len; 277 __be64 sge1_ioba; 278 __be16 reserved; 279 __be16 sge2_dma_reg; 280 __be32 sge2_len; 281 __be64 sge2_ioba; 282 } __packed __aligned(8); 283 284 struct ibmvnic_rx_comp_desc { 285 u8 first; 286 u8 flags; 287 #define IBMVNIC_IP_CHKSUM_GOOD 0x80 288 #define IBMVNIC_TCP_UDP_CHKSUM_GOOD 0x40 289 #define IBMVNIC_END_FRAME 0x20 290 #define IBMVNIC_EXACT_MC 0x10 291 #define IBMVNIC_VLAN_STRIPPED 0x08 292 __be16 off_frame_data; 293 __be32 len; 294 __be64 correlator; 295 __be16 vlan_tci; 296 __be16 rc; 297 u8 reserved[12]; 298 } __packed __aligned(8); 299 300 struct ibmvnic_generic_scrq { 301 u8 first; 302 u8 reserved[31]; 303 } __packed __aligned(8); 304 305 struct ibmvnic_rx_buff_add_desc { 306 u8 first; 307 u8 reserved[7]; 308 __be64 correlator; 309 __be32 ioba; 310 u8 map_id; 311 __be32 len:24; 312 u8 reserved2[8]; 313 } __packed __aligned(8); 314 315 struct ibmvnic_rc { 316 u8 code; /* one of enum ibmvnic_rc_codes */ 317 u8 detailed_data[3]; 318 } __packed __aligned(4); 319 320 struct ibmvnic_generic_crq { 321 u8 first; 322 u8 cmd; 323 u8 params[10]; 324 struct ibmvnic_rc rc; 325 } __packed __aligned(8); 326 327 struct ibmvnic_version_exchange { 328 u8 first; 329 u8 cmd; 330 __be16 version; 331 #define IBMVNIC_INITIAL_VERSION 1 332 u8 reserved[8]; 333 struct ibmvnic_rc rc; 334 } __packed __aligned(8); 335 336 struct ibmvnic_capability { 337 u8 first; 338 u8 cmd; 339 __be16 capability; /* one of ibmvnic_capabilities */ 340 __be64 number; 341 struct ibmvnic_rc rc; 342 } __packed __aligned(8); 343 344 struct ibmvnic_login { 345 u8 first; 346 u8 cmd; 347 u8 reserved[6]; 348 __be32 ioba; 349 __be32 len; 350 } __packed __aligned(8); 351 352 struct ibmvnic_phys_parms { 353 u8 first; 354 u8 cmd; 355 u8 flags1; 356 #define IBMVNIC_EXTERNAL_LOOPBACK 0x80 357 #define IBMVNIC_INTERNAL_LOOPBACK 0x40 358 #define IBMVNIC_PROMISC 0x20 359 #define IBMVNIC_PHYS_LINK_ACTIVE 0x10 360 #define IBMVNIC_AUTONEG_DUPLEX 0x08 361 #define IBMVNIC_FULL_DUPLEX 0x04 362 #define IBMVNIC_HALF_DUPLEX 0x02 363 #define IBMVNIC_CAN_CHG_PHYS_PARMS 0x01 364 u8 flags2; 365 #define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80 366 __be32 speed; 367 #define IBMVNIC_AUTONEG 0x80000000 368 #define IBMVNIC_10MBPS 0x40000000 369 #define IBMVNIC_100MBPS 0x20000000 370 #define IBMVNIC_1GBPS 0x10000000 371 #define IBMVNIC_10GBPS 0x08000000 372 #define IBMVNIC_40GBPS 0x04000000 373 #define IBMVNIC_100GBPS 0x02000000 374 #define IBMVNIC_25GBPS 0x01000000 375 #define IBMVNIC_50GBPS 0x00800000 376 #define IBMVNIC_200GBPS 0x00400000 377 __be32 mtu; 378 struct ibmvnic_rc rc; 379 } __packed __aligned(8); 380 381 struct ibmvnic_logical_link_state { 382 u8 first; 383 u8 cmd; 384 u8 link_state; 385 #define IBMVNIC_LOGICAL_LNK_DN 0x00 386 #define IBMVNIC_LOGICAL_LNK_UP 0x01 387 #define IBMVNIC_LOGICAL_LNK_QUERY 0xff 388 u8 reserved[9]; 389 struct ibmvnic_rc rc; 390 } __packed __aligned(8); 391 392 struct ibmvnic_query_ip_offload { 393 u8 first; 394 u8 cmd; 395 u8 reserved[2]; 396 __be32 len; 397 __be32 ioba; 398 struct ibmvnic_rc rc; 399 } __packed __aligned(8); 400 401 struct ibmvnic_control_ip_offload { 402 u8 first; 403 u8 cmd; 404 u8 reserved[2]; 405 __be32 ioba; 406 __be32 len; 407 struct ibmvnic_rc rc; 408 } __packed __aligned(8); 409 410 struct ibmvnic_request_statistics { 411 u8 first; 412 u8 cmd; 413 u8 flags; 414 #define IBMVNIC_PHYSICAL_PORT 0x80 415 u8 reserved1; 416 __be32 ioba; 417 __be32 len; 418 u8 reserved[4]; 419 } __packed __aligned(8); 420 421 struct ibmvnic_error_indication { 422 u8 first; 423 u8 cmd; 424 u8 flags; 425 #define IBMVNIC_FATAL_ERROR 0x80 426 u8 reserved1; 427 __be32 error_id; 428 __be32 detail_error_sz; 429 __be16 error_cause; 430 u8 reserved2[2]; 431 } __packed __aligned(8); 432 433 struct ibmvnic_link_state_indication { 434 u8 first; 435 u8 cmd; 436 u8 reserved1[2]; 437 u8 phys_link_state; 438 u8 logical_link_state; 439 u8 reserved2[10]; 440 } __packed __aligned(8); 441 442 struct ibmvnic_change_mac_addr { 443 u8 first; 444 u8 cmd; 445 u8 mac_addr[6]; 446 u8 reserved[4]; 447 struct ibmvnic_rc rc; 448 } __packed __aligned(8); 449 450 struct ibmvnic_multicast_ctrl { 451 u8 first; 452 u8 cmd; 453 u8 mac_addr[6]; 454 u8 flags; 455 #define IBMVNIC_ENABLE_MC 0x80 456 #define IBMVNIC_DISABLE_MC 0x40 457 #define IBMVNIC_ENABLE_ALL 0x20 458 #define IBMVNIC_DISABLE_ALL 0x10 459 u8 reserved1; 460 __be16 reserved2; /* was num_enabled_mc_addr; */ 461 struct ibmvnic_rc rc; 462 } __packed __aligned(8); 463 464 struct ibmvnic_get_vpd_size { 465 u8 first; 466 u8 cmd; 467 u8 reserved[14]; 468 } __packed __aligned(8); 469 470 struct ibmvnic_get_vpd_size_rsp { 471 u8 first; 472 u8 cmd; 473 u8 reserved[2]; 474 __be64 len; 475 struct ibmvnic_rc rc; 476 } __packed __aligned(8); 477 478 struct ibmvnic_get_vpd { 479 u8 first; 480 u8 cmd; 481 u8 reserved1[2]; 482 __be32 ioba; 483 __be32 len; 484 u8 reserved[4]; 485 } __packed __aligned(8); 486 487 struct ibmvnic_get_vpd_rsp { 488 u8 first; 489 u8 cmd; 490 u8 reserved[10]; 491 struct ibmvnic_rc rc; 492 } __packed __aligned(8); 493 494 struct ibmvnic_acl_change_indication { 495 u8 first; 496 u8 cmd; 497 __be16 change_type; 498 #define IBMVNIC_MAC_ACL 0 499 #define IBMVNIC_VLAN_ACL 1 500 u8 reserved[12]; 501 } __packed __aligned(8); 502 503 struct ibmvnic_acl_query { 504 u8 first; 505 u8 cmd; 506 u8 reserved1[2]; 507 __be32 ioba; 508 __be32 len; 509 u8 reserved2[4]; 510 } __packed __aligned(8); 511 512 struct ibmvnic_tune { 513 u8 first; 514 u8 cmd; 515 u8 reserved1[2]; 516 __be32 ioba; 517 __be32 len; 518 u8 reserved2[4]; 519 } __packed __aligned(8); 520 521 struct ibmvnic_request_map { 522 u8 first; 523 u8 cmd; 524 u8 reserved1; 525 u8 map_id; 526 __be32 ioba; 527 __be32 len; 528 u8 reserved2[4]; 529 } __packed __aligned(8); 530 531 struct ibmvnic_request_map_rsp { 532 u8 first; 533 u8 cmd; 534 u8 reserved1; 535 u8 map_id; 536 u8 reserved2[8]; 537 struct ibmvnic_rc rc; 538 } __packed __aligned(8); 539 540 struct ibmvnic_request_unmap { 541 u8 first; 542 u8 cmd; 543 u8 reserved1; 544 u8 map_id; 545 u8 reserved2[12]; 546 } __packed __aligned(8); 547 548 struct ibmvnic_request_unmap_rsp { 549 u8 first; 550 u8 cmd; 551 u8 reserved1; 552 u8 map_id; 553 u8 reserved2[8]; 554 struct ibmvnic_rc rc; 555 } __packed __aligned(8); 556 557 struct ibmvnic_query_map { 558 u8 first; 559 u8 cmd; 560 u8 reserved[14]; 561 } __packed __aligned(8); 562 563 struct ibmvnic_query_map_rsp { 564 u8 first; 565 u8 cmd; 566 u8 reserved; 567 u8 page_size; 568 __be32 tot_pages; 569 __be32 free_pages; 570 struct ibmvnic_rc rc; 571 } __packed __aligned(8); 572 573 union ibmvnic_crq { 574 struct ibmvnic_generic_crq generic; 575 struct ibmvnic_version_exchange version_exchange; 576 struct ibmvnic_version_exchange version_exchange_rsp; 577 struct ibmvnic_capability query_capability; 578 struct ibmvnic_capability query_capability_rsp; 579 struct ibmvnic_capability request_capability; 580 struct ibmvnic_capability request_capability_rsp; 581 struct ibmvnic_login login; 582 struct ibmvnic_generic_crq login_rsp; 583 struct ibmvnic_phys_parms query_phys_parms; 584 struct ibmvnic_phys_parms query_phys_parms_rsp; 585 struct ibmvnic_phys_parms query_phys_capabilities; 586 struct ibmvnic_phys_parms query_phys_capabilities_rsp; 587 struct ibmvnic_phys_parms set_phys_parms; 588 struct ibmvnic_phys_parms set_phys_parms_rsp; 589 struct ibmvnic_logical_link_state logical_link_state; 590 struct ibmvnic_logical_link_state logical_link_state_rsp; 591 struct ibmvnic_query_ip_offload query_ip_offload; 592 struct ibmvnic_query_ip_offload query_ip_offload_rsp; 593 struct ibmvnic_control_ip_offload control_ip_offload; 594 struct ibmvnic_control_ip_offload control_ip_offload_rsp; 595 struct ibmvnic_request_statistics request_statistics; 596 struct ibmvnic_generic_crq request_statistics_rsp; 597 struct ibmvnic_error_indication error_indication; 598 struct ibmvnic_link_state_indication link_state_indication; 599 struct ibmvnic_change_mac_addr change_mac_addr; 600 struct ibmvnic_change_mac_addr change_mac_addr_rsp; 601 struct ibmvnic_multicast_ctrl multicast_ctrl; 602 struct ibmvnic_multicast_ctrl multicast_ctrl_rsp; 603 struct ibmvnic_get_vpd_size get_vpd_size; 604 struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp; 605 struct ibmvnic_get_vpd get_vpd; 606 struct ibmvnic_get_vpd_rsp get_vpd_rsp; 607 struct ibmvnic_acl_change_indication acl_change_indication; 608 struct ibmvnic_acl_query acl_query; 609 struct ibmvnic_generic_crq acl_query_rsp; 610 struct ibmvnic_tune tune; 611 struct ibmvnic_generic_crq tune_rsp; 612 struct ibmvnic_request_map request_map; 613 struct ibmvnic_request_map_rsp request_map_rsp; 614 struct ibmvnic_request_unmap request_unmap; 615 struct ibmvnic_request_unmap_rsp request_unmap_rsp; 616 struct ibmvnic_query_map query_map; 617 struct ibmvnic_query_map_rsp query_map_rsp; 618 }; 619 620 enum ibmvnic_rc_codes { 621 SUCCESS = 0, 622 PARTIALSUCCESS = 1, 623 PERMISSION = 2, 624 NOMEMORY = 3, 625 PARAMETER = 4, 626 UNKNOWNCOMMAND = 5, 627 ABORTED = 6, 628 INVALIDSTATE = 7, 629 INVALIDIOBA = 8, 630 INVALIDLENGTH = 9, 631 UNSUPPORTEDOPTION = 10, 632 }; 633 634 enum ibmvnic_capabilities { 635 MIN_TX_QUEUES = 1, 636 MIN_RX_QUEUES = 2, 637 MIN_RX_ADD_QUEUES = 3, 638 MAX_TX_QUEUES = 4, 639 MAX_RX_QUEUES = 5, 640 MAX_RX_ADD_QUEUES = 6, 641 REQ_TX_QUEUES = 7, 642 REQ_RX_QUEUES = 8, 643 REQ_RX_ADD_QUEUES = 9, 644 MIN_TX_ENTRIES_PER_SUBCRQ = 10, 645 MIN_RX_ADD_ENTRIES_PER_SUBCRQ = 11, 646 MAX_TX_ENTRIES_PER_SUBCRQ = 12, 647 MAX_RX_ADD_ENTRIES_PER_SUBCRQ = 13, 648 REQ_TX_ENTRIES_PER_SUBCRQ = 14, 649 REQ_RX_ADD_ENTRIES_PER_SUBCRQ = 15, 650 TCP_IP_OFFLOAD = 16, 651 PROMISC_REQUESTED = 17, 652 PROMISC_SUPPORTED = 18, 653 MIN_MTU = 19, 654 MAX_MTU = 20, 655 REQ_MTU = 21, 656 MAX_MULTICAST_FILTERS = 22, 657 VLAN_HEADER_INSERTION = 23, 658 RX_VLAN_HEADER_INSERTION = 24, 659 MAX_TX_SG_ENTRIES = 25, 660 RX_SG_SUPPORTED = 26, 661 RX_SG_REQUESTED = 27, 662 OPT_TX_COMP_SUB_QUEUES = 28, 663 OPT_RX_COMP_QUEUES = 29, 664 OPT_RX_BUFADD_Q_PER_RX_COMP_Q = 30, 665 OPT_TX_ENTRIES_PER_SUBCRQ = 31, 666 OPT_RXBA_ENTRIES_PER_SUBCRQ = 32, 667 TX_RX_DESC_REQ = 33, 668 }; 669 670 enum ibmvnic_error_cause { 671 ADAPTER_PROBLEM = 0, 672 BUS_PROBLEM = 1, 673 FW_PROBLEM = 2, 674 DD_PROBLEM = 3, 675 EEH_RECOVERY = 4, 676 FW_UPDATED = 5, 677 LOW_MEMORY = 6, 678 }; 679 680 enum ibmvnic_commands { 681 VERSION_EXCHANGE = 0x01, 682 VERSION_EXCHANGE_RSP = 0x81, 683 QUERY_CAPABILITY = 0x02, 684 QUERY_CAPABILITY_RSP = 0x82, 685 REQUEST_CAPABILITY = 0x03, 686 REQUEST_CAPABILITY_RSP = 0x83, 687 LOGIN = 0x04, 688 LOGIN_RSP = 0x84, 689 QUERY_PHYS_PARMS = 0x05, 690 QUERY_PHYS_PARMS_RSP = 0x85, 691 QUERY_PHYS_CAPABILITIES = 0x06, 692 QUERY_PHYS_CAPABILITIES_RSP = 0x86, 693 SET_PHYS_PARMS = 0x07, 694 SET_PHYS_PARMS_RSP = 0x87, 695 ERROR_INDICATION = 0x08, 696 LOGICAL_LINK_STATE = 0x0C, 697 LOGICAL_LINK_STATE_RSP = 0x8C, 698 REQUEST_STATISTICS = 0x0D, 699 REQUEST_STATISTICS_RSP = 0x8D, 700 COLLECT_FW_TRACE = 0x11, 701 COLLECT_FW_TRACE_RSP = 0x91, 702 LINK_STATE_INDICATION = 0x12, 703 CHANGE_MAC_ADDR = 0x13, 704 CHANGE_MAC_ADDR_RSP = 0x93, 705 MULTICAST_CTRL = 0x14, 706 MULTICAST_CTRL_RSP = 0x94, 707 GET_VPD_SIZE = 0x15, 708 GET_VPD_SIZE_RSP = 0x95, 709 GET_VPD = 0x16, 710 GET_VPD_RSP = 0x96, 711 TUNE = 0x17, 712 TUNE_RSP = 0x97, 713 QUERY_IP_OFFLOAD = 0x18, 714 QUERY_IP_OFFLOAD_RSP = 0x98, 715 CONTROL_IP_OFFLOAD = 0x19, 716 CONTROL_IP_OFFLOAD_RSP = 0x99, 717 ACL_CHANGE_INDICATION = 0x1A, 718 ACL_QUERY = 0x1B, 719 ACL_QUERY_RSP = 0x9B, 720 QUERY_MAP = 0x1D, 721 QUERY_MAP_RSP = 0x9D, 722 REQUEST_MAP = 0x1E, 723 REQUEST_MAP_RSP = 0x9E, 724 REQUEST_UNMAP = 0x1F, 725 REQUEST_UNMAP_RSP = 0x9F, 726 VLAN_CTRL = 0x20, 727 VLAN_CTRL_RSP = 0xA0, 728 }; 729 730 enum ibmvnic_crq_type { 731 IBMVNIC_CRQ_CMD = 0x80, 732 IBMVNIC_CRQ_CMD_RSP = 0x80, 733 IBMVNIC_CRQ_INIT_CMD = 0xC0, 734 IBMVNIC_CRQ_INIT_RSP = 0xC0, 735 IBMVNIC_CRQ_XPORT_EVENT = 0xFF, 736 }; 737 738 enum ibmvfc_crq_format { 739 IBMVNIC_CRQ_INIT = 0x01, 740 IBMVNIC_CRQ_INIT_COMPLETE = 0x02, 741 IBMVNIC_PARTITION_MIGRATED = 0x06, 742 IBMVNIC_DEVICE_FAILOVER = 0x08, 743 }; 744 745 struct ibmvnic_crq_queue { 746 union ibmvnic_crq *msgs; 747 int size, cur; 748 dma_addr_t msg_token; 749 /* Used for serialization of msgs, cur */ 750 spinlock_t lock; 751 bool active; 752 char name[32]; 753 }; 754 755 union sub_crq { 756 struct ibmvnic_generic_scrq generic; 757 struct ibmvnic_tx_comp_desc tx_comp; 758 struct ibmvnic_tx_desc v1; 759 struct ibmvnic_hdr_desc hdr; 760 struct ibmvnic_hdr_ext_desc hdr_ext; 761 struct ibmvnic_sge_desc sge; 762 struct ibmvnic_rx_comp_desc rx_comp; 763 struct ibmvnic_rx_buff_add_desc rx_add; 764 }; 765 766 struct ibmvnic_ind_xmit_queue { 767 union sub_crq *indir_arr; 768 dma_addr_t indir_dma; 769 int index; 770 }; 771 772 struct ibmvnic_sub_crq_queue { 773 union sub_crq *msgs; 774 int size, cur; 775 dma_addr_t msg_token; 776 unsigned long crq_num; 777 unsigned long hw_irq; 778 unsigned int irq; 779 unsigned int pool_index; 780 int scrq_num; 781 /* Used for serialization of msgs, cur */ 782 spinlock_t lock; 783 struct sk_buff *rx_skb_top; 784 struct ibmvnic_adapter *adapter; 785 struct ibmvnic_ind_xmit_queue ind_buf; 786 atomic_t used; 787 char name[32]; 788 u64 handle; 789 } ____cacheline_aligned; 790 791 struct ibmvnic_long_term_buff { 792 unsigned char *buff; 793 dma_addr_t addr; 794 u64 size; 795 u8 map_id; 796 }; 797 798 struct ibmvnic_tx_buff { 799 struct sk_buff *skb; 800 int index; 801 int pool_index; 802 int num_entries; 803 }; 804 805 struct ibmvnic_tx_pool { 806 struct ibmvnic_tx_buff *tx_buff; 807 int *free_map; 808 int consumer_index; 809 int producer_index; 810 struct ibmvnic_long_term_buff long_term_buff; 811 int num_buffers; 812 int buf_size; 813 } ____cacheline_aligned; 814 815 struct ibmvnic_rx_buff { 816 struct sk_buff *skb; 817 dma_addr_t dma; 818 unsigned char *data; 819 int size; 820 int pool_index; 821 }; 822 823 struct ibmvnic_rx_pool { 824 struct ibmvnic_rx_buff *rx_buff; 825 int size; 826 int index; 827 int buff_size; 828 atomic_t available; 829 int *free_map; 830 int next_free; 831 int next_alloc; 832 int active; 833 struct ibmvnic_long_term_buff long_term_buff; 834 } ____cacheline_aligned; 835 836 struct ibmvnic_vpd { 837 unsigned char *buff; 838 dma_addr_t dma_addr; 839 u64 len; 840 }; 841 842 enum vnic_state {VNIC_PROBING = 1, 843 VNIC_PROBED, 844 VNIC_OPENING, 845 VNIC_OPEN, 846 VNIC_CLOSING, 847 VNIC_CLOSED, 848 VNIC_REMOVING, 849 VNIC_REMOVED, 850 VNIC_DOWN}; 851 852 enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1, 853 VNIC_RESET_MOBILITY, 854 VNIC_RESET_FATAL, 855 VNIC_RESET_NON_FATAL, 856 VNIC_RESET_TIMEOUT, 857 VNIC_RESET_CHANGE_PARAM, 858 VNIC_RESET_PASSIVE_INIT}; 859 860 struct ibmvnic_rwi { 861 enum ibmvnic_reset_reason reset_reason; 862 struct list_head list; 863 }; 864 865 struct ibmvnic_tunables { 866 u64 rx_queues; 867 u64 tx_queues; 868 u64 rx_entries; 869 u64 tx_entries; 870 u64 mtu; 871 }; 872 873 struct ibmvnic_adapter { 874 struct vio_dev *vdev; 875 struct net_device *netdev; 876 struct ibmvnic_crq_queue crq; 877 u8 mac_addr[ETH_ALEN]; 878 struct ibmvnic_query_ip_offload_buffer ip_offload_buf; 879 dma_addr_t ip_offload_tok; 880 struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl; 881 dma_addr_t ip_offload_ctrl_tok; 882 u32 msg_enable; 883 884 /* Vital Product Data (VPD) */ 885 struct ibmvnic_vpd *vpd; 886 char fw_version[32]; 887 888 /* Statistics */ 889 struct ibmvnic_statistics stats; 890 dma_addr_t stats_token; 891 struct completion stats_done; 892 int replenish_no_mem; 893 int replenish_add_buff_success; 894 int replenish_add_buff_failure; 895 int replenish_task_cycles; 896 int tx_send_failed; 897 int tx_map_failed; 898 899 struct ibmvnic_tx_queue_stats *tx_stats_buffers; 900 struct ibmvnic_rx_queue_stats *rx_stats_buffers; 901 902 int phys_link_state; 903 int logical_link_state; 904 905 u32 speed; 906 u8 duplex; 907 908 /* login data */ 909 struct ibmvnic_login_buffer *login_buf; 910 dma_addr_t login_buf_token; 911 int login_buf_sz; 912 913 struct ibmvnic_login_rsp_buffer *login_rsp_buf; 914 dma_addr_t login_rsp_buf_token; 915 int login_rsp_buf_sz; 916 917 atomic_t running_cap_crqs; 918 bool wait_capability; 919 920 struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned; 921 struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned; 922 923 /* rx structs */ 924 struct napi_struct *napi; 925 struct ibmvnic_rx_pool *rx_pool; 926 u64 promisc; 927 928 struct ibmvnic_tx_pool *tx_pool; 929 struct ibmvnic_tx_pool *tso_pool; 930 struct completion probe_done; 931 struct completion init_done; 932 int init_done_rc; 933 934 struct completion fw_done; 935 /* Used for serialization of device commands */ 936 struct mutex fw_lock; 937 int fw_done_rc; 938 939 struct completion reset_done; 940 int reset_done_rc; 941 bool wait_for_reset; 942 943 /* partner capabilities */ 944 u64 min_tx_queues; 945 u64 min_rx_queues; 946 u64 min_rx_add_queues; 947 u64 max_tx_queues; 948 u64 max_rx_queues; 949 u64 max_rx_add_queues; 950 u64 req_tx_queues; 951 u64 req_rx_queues; 952 u64 req_rx_add_queues; 953 u64 min_tx_entries_per_subcrq; 954 u64 min_rx_add_entries_per_subcrq; 955 u64 max_tx_entries_per_subcrq; 956 u64 max_rx_add_entries_per_subcrq; 957 u64 req_tx_entries_per_subcrq; 958 u64 req_rx_add_entries_per_subcrq; 959 u64 tcp_ip_offload; 960 u64 promisc_requested; 961 u64 promisc_supported; 962 u64 min_mtu; 963 u64 max_mtu; 964 u64 req_mtu; 965 u64 max_multicast_filters; 966 u64 vlan_header_insertion; 967 u64 rx_vlan_header_insertion; 968 u64 max_tx_sg_entries; 969 u64 rx_sg_supported; 970 u64 rx_sg_requested; 971 u64 opt_tx_comp_sub_queues; 972 u64 opt_rx_comp_queues; 973 u64 opt_rx_bufadd_q_per_rx_comp_q; 974 u64 opt_tx_entries_per_subcrq; 975 u64 opt_rxba_entries_per_subcrq; 976 __be64 tx_rx_desc_req; 977 u8 map_id; 978 u32 num_active_rx_scrqs; 979 u32 num_active_rx_pools; 980 u32 num_active_rx_napi; 981 u32 num_active_tx_scrqs; 982 u32 num_active_tx_pools; 983 u32 cur_rx_buf_sz; 984 985 struct tasklet_struct tasklet; 986 enum vnic_state state; 987 /* Used for serialization of state field. When taking both state 988 * and rwi locks, take state lock first. 989 */ 990 spinlock_t state_lock; 991 enum ibmvnic_reset_reason reset_reason; 992 struct list_head rwi_list; 993 /* Used for serialization of rwi_list. When taking both state 994 * and rwi locks, take state lock first 995 */ 996 spinlock_t rwi_lock; 997 struct work_struct ibmvnic_reset; 998 struct delayed_work ibmvnic_delayed_reset; 999 unsigned long resetting; 1000 /* last device reset time */ 1001 unsigned long last_reset_time; 1002 1003 bool napi_enabled; 1004 bool from_passive_init; 1005 bool login_pending; 1006 /* protected by rcu */ 1007 bool tx_queues_active; 1008 bool failover_pending; 1009 bool force_reset_recovery; 1010 1011 struct ibmvnic_tunables desired; 1012 struct ibmvnic_tunables fallback; 1013 }; 1014