1 /* 2 * aQuantia Corporation Network Driver 3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 */ 9 10 /* File hw_atl_llh.h: Declarations of bitfield and register access functions for 11 * Atlantic registers. 12 */ 13 14 #ifndef HW_ATL_LLH_H 15 #define HW_ATL_LLH_H 16 17 #include <linux/types.h> 18 19 struct aq_hw_s; 20 21 /* global */ 22 23 /* set global microprocessor semaphore */ 24 void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, 25 u32 semaphore); 26 27 /* get global microprocessor semaphore */ 28 u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore); 29 30 /* set global register reset disable */ 31 void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis); 32 33 /* set soft reset */ 34 void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res); 35 36 /* get soft reset */ 37 u32 glb_soft_res_get(struct aq_hw_s *aq_hw); 38 39 /* stats */ 40 41 u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw); 42 43 /* get rx dma good octet counter lsw */ 44 u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); 45 46 /* get rx dma good packet counter lsw */ 47 u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); 48 49 /* get tx dma good octet counter lsw */ 50 u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); 51 52 /* get tx dma good packet counter lsw */ 53 u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); 54 55 /* get rx dma good octet counter msw */ 56 u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); 57 58 /* get rx dma good packet counter msw */ 59 u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); 60 61 /* get tx dma good octet counter msw */ 62 u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); 63 64 /* get tx dma good packet counter msw */ 65 u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); 66 67 /* get msm rx errors counter register */ 68 u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw); 69 70 /* get msm rx unicast frames counter register */ 71 u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); 72 73 /* get msm rx multicast frames counter register */ 74 u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); 75 76 /* get msm rx broadcast frames counter register */ 77 u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); 78 79 /* get msm rx broadcast octets counter register 1 */ 80 u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); 81 82 /* get msm rx unicast octets counter register 0 */ 83 u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); 84 85 /* get rx dma statistics counter 7 */ 86 u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw); 87 88 /* get msm tx errors counter register */ 89 u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw); 90 91 /* get msm tx unicast frames counter register */ 92 u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); 93 94 /* get msm tx multicast frames counter register */ 95 u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); 96 97 /* get msm tx broadcast frames counter register */ 98 u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); 99 100 /* get msm tx multicast octets counter register 1 */ 101 u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw); 102 103 /* get msm tx broadcast octets counter register 1 */ 104 u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); 105 106 /* get msm tx unicast octets counter register 0 */ 107 u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); 108 109 /* get global mif identification */ 110 u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw); 111 112 /* interrupt */ 113 114 /* set interrupt auto mask lsw */ 115 void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw); 116 117 /* set interrupt mapping enable rx */ 118 void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx); 119 120 /* set interrupt mapping enable tx */ 121 void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx); 122 123 /* set interrupt mapping rx */ 124 void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx); 125 126 /* set interrupt mapping tx */ 127 void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx); 128 129 /* set interrupt mask clear lsw */ 130 void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw); 131 132 /* set interrupt mask set lsw */ 133 void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw); 134 135 /* set interrupt register reset disable */ 136 void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis); 137 138 /* set interrupt status clear lsw */ 139 void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, 140 u32 irq_status_clearlsw); 141 142 /* get interrupt status lsw */ 143 u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw); 144 145 /* get reset interrupt */ 146 u32 itr_res_irq_get(struct aq_hw_s *aq_hw); 147 148 /* set reset interrupt */ 149 void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq); 150 151 /* rdm */ 152 153 /* set cpu id */ 154 void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); 155 156 /* set rx dca enable */ 157 void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en); 158 159 /* set rx dca mode */ 160 void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode); 161 162 /* set rx descriptor data buffer size */ 163 void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, 164 u32 rx_desc_data_buff_size, 165 u32 descriptor); 166 167 /* set rx descriptor dca enable */ 168 void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, 169 u32 dca); 170 171 /* set rx descriptor enable */ 172 void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, 173 u32 descriptor); 174 175 /* set rx descriptor header splitting */ 176 void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, 177 u32 rx_desc_head_splitting, 178 u32 descriptor); 179 180 /* get rx descriptor head pointer */ 181 u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); 182 183 /* set rx descriptor length */ 184 void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, 185 u32 descriptor); 186 187 /* set rx descriptor write-back interrupt enable */ 188 void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, 189 u32 rx_desc_wr_wb_irq_en); 190 191 /* set rx header dca enable */ 192 void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, 193 u32 dca); 194 195 /* set rx payload dca enable */ 196 void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca); 197 198 /* set rx descriptor header buffer size */ 199 void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, 200 u32 rx_desc_head_buff_size, 201 u32 descriptor); 202 203 /* set rx descriptor reset */ 204 void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, 205 u32 descriptor); 206 207 /* Set RDM Interrupt Moderation Enable */ 208 void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en); 209 210 /* reg */ 211 212 /* set general interrupt mapping register */ 213 void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx); 214 215 /* get general interrupt status register */ 216 u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw); 217 218 /* set interrupt global control register */ 219 void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl); 220 221 /* set interrupt throttle register */ 222 void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle); 223 224 /* set rx dma descriptor base address lsw */ 225 void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, 226 u32 rx_dma_desc_base_addrlsw, 227 u32 descriptor); 228 229 /* set rx dma descriptor base address msw */ 230 void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, 231 u32 rx_dma_desc_base_addrmsw, 232 u32 descriptor); 233 234 /* get rx dma descriptor status register */ 235 u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor); 236 237 /* set rx dma descriptor tail pointer register */ 238 void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, 239 u32 rx_dma_desc_tail_ptr, 240 u32 descriptor); 241 242 /* set rx filter multicast filter mask register */ 243 void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, 244 u32 rx_flr_mcst_flr_msk); 245 246 /* set rx filter multicast filter register */ 247 void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, 248 u32 filter); 249 250 /* set rx filter rss control register 1 */ 251 void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, 252 u32 rx_flr_rss_control1); 253 254 /* Set RX Filter Control Register 2 */ 255 void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2); 256 257 /* Set RX Interrupt Moderation Control Register */ 258 void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, 259 u32 rx_intr_moderation_ctl, 260 u32 queue); 261 262 /* set tx dma debug control */ 263 void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl); 264 265 /* set tx dma descriptor base address lsw */ 266 void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, 267 u32 tx_dma_desc_base_addrlsw, 268 u32 descriptor); 269 270 /* set tx dma descriptor base address msw */ 271 void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, 272 u32 tx_dma_desc_base_addrmsw, 273 u32 descriptor); 274 275 /* set tx dma descriptor tail pointer register */ 276 void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, 277 u32 tx_dma_desc_tail_ptr, 278 u32 descriptor); 279 280 /* Set TX Interrupt Moderation Control Register */ 281 void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, 282 u32 tx_intr_moderation_ctl, 283 u32 queue); 284 285 /* set global microprocessor scratch pad */ 286 void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, 287 u32 glb_cpu_scratch_scp, u32 scratch_scp); 288 289 /* rpb */ 290 291 /* set dma system loopback */ 292 void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk); 293 294 /* set rx traffic class mode */ 295 void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, 296 u32 rx_traf_class_mode); 297 298 /* set rx buffer enable */ 299 void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en); 300 301 /* set rx buffer high threshold (per tc) */ 302 void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, 303 u32 rx_buff_hi_threshold_per_tc, 304 u32 buffer); 305 306 /* set rx buffer low threshold (per tc) */ 307 void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, 308 u32 rx_buff_lo_threshold_per_tc, 309 u32 buffer); 310 311 /* set rx flow control mode */ 312 void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode); 313 314 /* set rx packet buffer size (per tc) */ 315 void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, 316 u32 rx_pkt_buff_size_per_tc, 317 u32 buffer); 318 319 /* set rx xoff enable (per tc) */ 320 void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, 321 u32 buffer); 322 323 /* rpf */ 324 325 /* set l2 broadcast count threshold */ 326 void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, 327 u32 l2broadcast_count_threshold); 328 329 /* set l2 broadcast enable */ 330 void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en); 331 332 /* set l2 broadcast filter action */ 333 void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, 334 u32 l2broadcast_flr_act); 335 336 /* set l2 multicast filter enable */ 337 void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en, 338 u32 filter); 339 340 /* set l2 promiscuous mode enable */ 341 void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, 342 u32 l2promiscuous_mode_en); 343 344 /* set l2 unicast filter action */ 345 void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act, 346 u32 filter); 347 348 /* set l2 unicast filter enable */ 349 void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, 350 u32 filter); 351 352 /* set l2 unicast destination address lsw */ 353 void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, 354 u32 l2unicast_dest_addresslsw, 355 u32 filter); 356 357 /* set l2 unicast destination address msw */ 358 void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, 359 u32 l2unicast_dest_addressmsw, 360 u32 filter); 361 362 /* Set L2 Accept all Multicast packets */ 363 void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, 364 u32 l2_accept_all_mc_packets); 365 366 /* set user-priority tc mapping */ 367 void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, 368 u32 user_priority_tc_map, u32 tc); 369 370 /* set rss key address */ 371 void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr); 372 373 /* set rss key write data */ 374 void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data); 375 376 /* get rss key write enable */ 377 u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw); 378 379 /* set rss key write enable */ 380 void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en); 381 382 /* set rss redirection table address */ 383 void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, 384 u32 rss_redir_tbl_addr); 385 386 /* set rss redirection table write data */ 387 void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, 388 u32 rss_redir_tbl_wr_data); 389 390 /* get rss redirection write enable */ 391 u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw); 392 393 /* set rss redirection write enable */ 394 void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en); 395 396 /* set tpo to rpf system loopback */ 397 void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, 398 u32 tpo_to_rpf_sys_lbk); 399 400 /* set vlan inner ethertype */ 401 void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht); 402 403 /* set vlan outer ethertype */ 404 void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht); 405 406 /* set vlan promiscuous mode enable */ 407 void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en); 408 409 /* Set VLAN untagged action */ 410 void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act); 411 412 /* Set VLAN accept untagged packets */ 413 void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, 414 u32 vlan_accept_untagged_packets); 415 416 /* Set VLAN filter enable */ 417 void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter); 418 419 /* Set VLAN Filter Action */ 420 void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act, 421 u32 filter); 422 423 /* Set VLAN ID Filter */ 424 void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter); 425 426 /* set ethertype filter enable */ 427 void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter); 428 429 /* set ethertype user-priority enable */ 430 void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, 431 u32 etht_user_priority_en, u32 filter); 432 433 /* set ethertype rx queue enable */ 434 void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en, 435 u32 filter); 436 437 /* set ethertype rx queue */ 438 void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, 439 u32 filter); 440 441 /* set ethertype user-priority */ 442 void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority, 443 u32 filter); 444 445 /* set ethertype management queue */ 446 void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, 447 u32 filter); 448 449 /* set ethertype filter action */ 450 void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, 451 u32 filter); 452 453 /* set ethertype filter */ 454 void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter); 455 456 /* rpo */ 457 458 /* set ipv4 header checksum offload enable */ 459 void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, 460 u32 ipv4header_crc_offload_en); 461 462 /* set rx descriptor vlan stripping */ 463 void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, 464 u32 rx_desc_vlan_stripping, 465 u32 descriptor); 466 467 /* set tcp/udp checksum offload enable */ 468 void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, 469 u32 tcp_udp_crc_offload_en); 470 471 /* Set LRO Patch Optimization Enable. */ 472 void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, 473 u32 lro_patch_optimization_en); 474 475 /* Set Large Receive Offload Enable */ 476 void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en); 477 478 /* Set LRO Q Sessions Limit */ 479 void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim); 480 481 /* Set LRO Total Descriptor Limit */ 482 void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim); 483 484 /* Set LRO Min Payload of First Packet */ 485 void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, 486 u32 lro_min_pld_of_first_pkt); 487 488 /* Set LRO Packet Limit */ 489 void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim); 490 491 /* Set LRO Max Number of Descriptors */ 492 void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, 493 u32 lro_max_desc_num, u32 lro); 494 495 /* Set LRO Time Base Divider */ 496 void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, 497 u32 lro_time_base_divider); 498 499 /*Set LRO Inactive Interval */ 500 void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, 501 u32 lro_inactive_interval); 502 503 /*Set LRO Max Coalescing Interval */ 504 void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, 505 u32 lro_max_coalescing_interval); 506 507 /* rx */ 508 509 /* set rx register reset disable */ 510 void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis); 511 512 /* tdm */ 513 514 /* set cpu id */ 515 void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); 516 517 /* set large send offload enable */ 518 void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, 519 u32 large_send_offload_en); 520 521 /* set tx descriptor enable */ 522 void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor); 523 524 /* set tx dca enable */ 525 void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en); 526 527 /* set tx dca mode */ 528 void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode); 529 530 /* set tx descriptor dca enable */ 531 void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca); 532 533 /* get tx descriptor head pointer */ 534 u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); 535 536 /* set tx descriptor length */ 537 void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, 538 u32 descriptor); 539 540 /* set tx descriptor write-back interrupt enable */ 541 void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, 542 u32 tx_desc_wr_wb_irq_en); 543 544 /* set tx descriptor write-back threshold */ 545 void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, 546 u32 tx_desc_wr_wb_threshold, 547 u32 descriptor); 548 549 /* Set TDM Interrupt Moderation Enable */ 550 void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, 551 u32 tdm_irq_moderation_en); 552 /* thm */ 553 554 /* set lso tcp flag of first packet */ 555 void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, 556 u32 lso_tcp_flag_of_first_pkt); 557 558 /* set lso tcp flag of last packet */ 559 void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, 560 u32 lso_tcp_flag_of_last_pkt); 561 562 /* set lso tcp flag of middle packet */ 563 void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, 564 u32 lso_tcp_flag_of_middle_pkt); 565 566 /* tpb */ 567 568 /* set tx buffer enable */ 569 void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en); 570 571 /* set tx buffer high threshold (per tc) */ 572 void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, 573 u32 tx_buff_hi_threshold_per_tc, 574 u32 buffer); 575 576 /* set tx buffer low threshold (per tc) */ 577 void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, 578 u32 tx_buff_lo_threshold_per_tc, 579 u32 buffer); 580 581 /* set tx dma system loopback enable */ 582 void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en); 583 584 /* set tx packet buffer size (per tc) */ 585 void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, 586 u32 tx_pkt_buff_size_per_tc, u32 buffer); 587 588 /* set tx path pad insert enable */ 589 void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en); 590 591 /* tpo */ 592 593 /* set ipv4 header checksum offload enable */ 594 void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, 595 u32 ipv4header_crc_offload_en); 596 597 /* set tcp/udp checksum offload enable */ 598 void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, 599 u32 tcp_udp_crc_offload_en); 600 601 /* set tx pkt system loopback enable */ 602 void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en); 603 604 /* tps */ 605 606 /* set tx packet scheduler data arbitration mode */ 607 void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, 608 u32 tx_pkt_shed_data_arb_mode); 609 610 /* set tx packet scheduler descriptor rate current time reset */ 611 void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, 612 u32 curr_time_res); 613 614 /* set tx packet scheduler descriptor rate limit */ 615 void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, 616 u32 tx_pkt_shed_desc_rate_lim); 617 618 /* set tx packet scheduler descriptor tc arbitration mode */ 619 void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, 620 u32 tx_pkt_shed_desc_tc_arb_mode); 621 622 /* set tx packet scheduler descriptor tc max credit */ 623 void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, 624 u32 tx_pkt_shed_desc_tc_max_credit, 625 u32 tc); 626 627 /* set tx packet scheduler descriptor tc weight */ 628 void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, 629 u32 tx_pkt_shed_desc_tc_weight, 630 u32 tc); 631 632 /* set tx packet scheduler descriptor vm arbitration mode */ 633 void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, 634 u32 tx_pkt_shed_desc_vm_arb_mode); 635 636 /* set tx packet scheduler tc data max credit */ 637 void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, 638 u32 tx_pkt_shed_tc_data_max_credit, 639 u32 tc); 640 641 /* set tx packet scheduler tc data weight */ 642 void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, 643 u32 tx_pkt_shed_tc_data_weight, 644 u32 tc); 645 646 /* tx */ 647 648 /* set tx register reset disable */ 649 void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis); 650 651 /* msm */ 652 653 /* get register access status */ 654 u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw); 655 656 /* set register address for indirect address */ 657 void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, 658 u32 reg_addr_for_indirect_addr); 659 660 /* set register read strobe */ 661 void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe); 662 663 /* get register read data */ 664 u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw); 665 666 /* set register write data */ 667 void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data); 668 669 /* set register write strobe */ 670 void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe); 671 672 /* pci */ 673 674 /* set pci register reset disable */ 675 void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); 676 677 #endif /* HW_ATL_LLH_H */ 678