/drivers/net/ethernet/qlogic/qed/ |
D | qed_dev.c | 51 static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) in qed_hw_bar_size() argument 57 if (IS_VF(p_hwfn->cdev)) in qed_hw_bar_size() 60 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); in qed_hw_bar_size() 65 if (p_hwfn->cdev->num_hwfns > 1) { in qed_hw_bar_size() 66 DP_INFO(p_hwfn, in qed_hw_bar_size() 70 DP_INFO(p_hwfn, in qed_hw_bar_size() 83 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; in qed_init_dp() local 85 p_hwfn->dp_level = dp_level; in qed_init_dp() 86 p_hwfn->dp_module = dp_module; in qed_init_dp() 95 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; in qed_init_struct() local [all …]
|
D | qed_mcp.c | 38 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ 50 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn) in qed_mcp_is_init() argument 52 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) in qed_mcp_is_init() 57 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) in qed_mcp_cmd_port_init() argument 59 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, in qed_mcp_cmd_port_init() 61 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr); in qed_mcp_cmd_port_init() 63 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, in qed_mcp_cmd_port_init() 64 MFW_PORT(p_hwfn)); in qed_mcp_cmd_port_init() 65 DP_VERBOSE(p_hwfn, QED_MSG_SP, in qed_mcp_cmd_port_init() 67 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); in qed_mcp_cmd_port_init() [all …]
|
D | qed_hw.c | 45 int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) in qed_ptt_pool_alloc() argument 63 p_hwfn->p_ptt_pool = p_pool; in qed_ptt_pool_alloc() 69 void qed_ptt_invalidate(struct qed_hwfn *p_hwfn) in qed_ptt_invalidate() argument 75 p_ptt = &p_hwfn->p_ptt_pool->ptts[i]; in qed_ptt_invalidate() 80 void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) in qed_ptt_pool_free() argument 82 kfree(p_hwfn->p_ptt_pool); in qed_ptt_pool_free() 83 p_hwfn->p_ptt_pool = NULL; in qed_ptt_pool_free() 86 struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) in qed_ptt_acquire() argument 93 spin_lock_bh(&p_hwfn->p_ptt_pool->lock); in qed_ptt_acquire() 95 if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) { in qed_ptt_acquire() [all …]
|
D | qed_vf.c | 15 static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) in qed_vf_pf_prep() argument 17 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; in qed_vf_pf_prep() 27 DP_VERBOSE(p_hwfn, in qed_vf_pf_prep() 40 p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length); in qed_vf_pf_prep() 49 static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) in qed_vf_pf_req_end() argument 51 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; in qed_vf_pf_req_end() 53 DP_VERBOSE(p_hwfn, QED_MSG_IOV, in qed_vf_pf_req_end() 57 mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); in qed_vf_pf_req_end() 60 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) in qed_send_msg2pf() argument 62 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; in qed_send_msg2pf() [all …]
|
D | qed_init_fw_funcs.c | 121 static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) in qed_enable_pf_rl() argument 123 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); in qed_enable_pf_rl() 126 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, in qed_enable_pf_rl() 129 STORE_RT_REG(p_hwfn, in qed_enable_pf_rl() 131 STORE_RT_REG(p_hwfn, in qed_enable_pf_rl() 136 STORE_RT_REG(p_hwfn, in qed_enable_pf_rl() 143 static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en) in qed_enable_pf_wfq() argument 145 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); in qed_enable_pf_wfq() 148 STORE_RT_REG(p_hwfn, in qed_enable_pf_wfq() 154 static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en) in qed_enable_vport_rl() argument [all …]
|
D | qed_int.c | 49 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ argument 50 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 72 int (*cb)(struct qed_hwfn *p_hwfn); 1294 static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn) in qed_mcp_attn_cb() argument 1296 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); in qed_mcp_attn_cb() 1299 DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n", in qed_mcp_attn_cb() 1301 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, in qed_mcp_attn_cb() 1320 static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn) in qed_pswhst_attn_cb() argument 1322 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, in qed_pswhst_attn_cb() 1328 addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, in qed_pswhst_attn_cb() [all …]
|
D | qed_roce.c | 69 void qed_async_roce_event(struct qed_hwfn *p_hwfn, in qed_async_roce_event() argument 72 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; in qed_async_roce_event() 78 static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, in qed_rdma_bmap_alloc() argument 81 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); in qed_rdma_bmap_alloc() 88 DP_NOTICE(p_hwfn, in qed_rdma_bmap_alloc() 93 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n", in qed_rdma_bmap_alloc() 98 static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, in qed_rdma_bmap_alloc_id() argument 101 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap); in qed_rdma_bmap_alloc_id() 106 DP_NOTICE(p_hwfn, "no id available max_count=%d\n", in qed_rdma_bmap_alloc_id() 116 static void qed_bmap_release_id(struct qed_hwfn *p_hwfn, in qed_bmap_release_id() argument [all …]
|
D | qed_cxt.c | 90 #define CONN_CXT_SIZE(p_hwfn) \ argument 91 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) 95 #define TYPE0_TASK_CXT_SIZE(p_hwfn) \ argument 96 ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn) 99 #define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context) argument 274 static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn, in qed_cxt_tm_iids() argument 316 static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, in qed_cxt_qm_iids() argument 319 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; in qed_cxt_qm_iids() 345 DP_VERBOSE(p_hwfn, QED_MSG_ILT, in qed_cxt_qm_iids() 350 static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn, in qed_cxt_tid_seg_info() argument [all …]
|
D | qed_spq.c | 43 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, in qed_spq_blocking_cb() argument 58 static int qed_spq_block(struct qed_hwfn *p_hwfn, in qed_spq_block() argument 79 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); in qed_spq_block() 80 rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt); in qed_spq_block() 82 DP_NOTICE(p_hwfn, "MCP drain failed\n"); in qed_spq_block() 104 DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n"); in qed_spq_block() 112 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, in qed_spq_fill_entry() argument 125 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", in qed_spq_fill_entry() 130 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, in qed_spq_fill_entry() 147 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, in qed_spq_hw_initialize() argument [all …]
|
D | qed_sriov.c | 24 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) in qed_sp_vf_start() argument 34 init_data.cid = qed_spq_get_cid(p_hwfn); in qed_sp_vf_start() 38 rc = qed_sp_init_request(p_hwfn, &p_ent, in qed_sp_vf_start() 49 switch (p_hwfn->hw_info.personality) { in qed_sp_vf_start() 57 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", in qed_sp_vf_start() 58 p_hwfn->hw_info.personality); in qed_sp_vf_start() 65 DP_VERBOSE(p_hwfn, in qed_sp_vf_start() 77 DP_VERBOSE(p_hwfn, QED_MSG_IOV, in qed_sp_vf_start() 81 return qed_spq_post(p_hwfn, p_ent, NULL); in qed_sp_vf_start() 84 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, in qed_sp_vf_stop() argument [all …]
|
D | qed_init_ops.c | 54 void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn) in qed_init_clear_rt_data() argument 59 p_hwfn->rt_data.b_valid[i] = false; in qed_init_clear_rt_data() 62 void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val) in qed_init_store_rt_reg() argument 64 p_hwfn->rt_data.init_val[rt_offset] = val; in qed_init_store_rt_reg() 65 p_hwfn->rt_data.b_valid[rt_offset] = true; in qed_init_store_rt_reg() 68 void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, in qed_init_store_rt_agg() argument 74 p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; in qed_init_store_rt_agg() 75 p_hwfn->rt_data.b_valid[rt_offset + i] = true; in qed_init_store_rt_agg() 79 static int qed_init_rt(struct qed_hwfn *p_hwfn, in qed_init_rt() argument 83 u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; in qed_init_rt() [all …]
|
D | qed_sp_commands.c | 26 int qed_sp_init_request(struct qed_hwfn *p_hwfn, in qed_sp_init_request() argument 37 rc = qed_spq_get_entry(p_hwfn, pp_ent); in qed_sp_init_request() 72 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", in qed_sp_init_request() 77 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, in qed_sp_init_request() 107 qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn, in qed_tunn_set_pf_fix_tunn_mode() argument 111 unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode; in qed_tunn_set_pf_fix_tunn_mode() 166 qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn, in qed_tunn_set_pf_update_params() argument 173 qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg); in qed_tunn_set_pf_update_params() 219 static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn, in qed_set_hw_tunn_mode() argument 235 qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable); in qed_set_hw_tunn_mode() [all …]
|
D | qed_l2.c | 44 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, in qed_sp_eth_vport_start() argument 54 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); in qed_sp_eth_vport_start() 59 init_data.cid = qed_spq_get_cid(p_hwfn); in qed_sp_eth_vport_start() 63 rc = qed_sp_init_request(p_hwfn, &p_ent, in qed_sp_eth_vport_start() 108 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, in qed_sp_eth_vport_start() 111 return qed_spq_post(p_hwfn, p_ent, NULL); in qed_sp_eth_vport_start() 114 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, in qed_sp_vport_start() argument 117 if (IS_VF(p_hwfn->cdev)) { in qed_sp_vport_start() 118 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id, in qed_sp_vport_start() 126 return qed_sp_eth_vport_start(p_hwfn, p_params); in qed_sp_vport_start() [all …]
|
D | qed_int.h | 68 void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, 82 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, 92 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, 103 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn); 122 int qed_int_sb_init(struct qed_hwfn *p_hwfn, 135 void qed_int_sb_setup(struct qed_hwfn *p_hwfn, 152 int qed_int_sb_release(struct qed_hwfn *p_hwfn, 174 void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, 193 #define SB_ALIGNED_SIZE(p_hwfn) \ argument 194 ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) [all …]
|
D | qed_mcp.h | 177 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn); 188 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, 202 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, 236 int qed_mcp_cmd(struct qed_hwfn *p_hwfn, 250 int qed_mcp_drain(struct qed_hwfn *p_hwfn, 262 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, 277 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, 290 int qed_mcp_set_led(struct qed_hwfn *p_hwfn, 302 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, 313 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, [all …]
|
D | qed_cxt.h | 41 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, 54 int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, 65 int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, 76 u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, 86 int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn); 95 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn); 104 int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn); 111 void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn); 120 int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn); 127 void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn); [all …]
|
D | qed_ll2.c | 69 static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn, in qed_ll2b_complete_tx_packet() argument 76 struct qed_dev *cdev = p_hwfn->cdev; in qed_ll2b_complete_tx_packet() 80 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr, in qed_ll2b_complete_tx_packet() 144 static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, in qed_ll2b_complete_rx_packet() argument 152 struct qed_dev *cdev = p_hwfn->cdev; in qed_ll2b_complete_rx_packet() 165 DP_VERBOSE(p_hwfn, in qed_ll2b_complete_rx_packet() 184 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data, in qed_ll2b_complete_rx_packet() 230 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn, in __qed_ll2_handle_sanity() argument 240 if (!p_hwfn->p_ll2_info) in __qed_ll2_handle_sanity() 243 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle]; in __qed_ll2_handle_sanity() [all …]
|
D | qed_debug.c | 1414 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, in qed_dbg_dev_init() argument 1417 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; in qed_dbg_dev_init() 1422 if (QED_IS_K2(p_hwfn->cdev)) { in qed_dbg_dev_init() 1425 } else if (QED_IS_BB_B0(p_hwfn->cdev)) { in qed_dbg_dev_init() 1441 static void qed_read_fw_info(struct qed_hwfn *p_hwfn, in qed_read_fw_info() argument 1460 dest[i] = qed_rd(p_hwfn, p_ptt, addr); in qed_read_fw_info() 1468 dest[i] = qed_rd(p_hwfn, p_ptt, addr); in qed_read_fw_info() 1553 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, in qed_dump_fw_ver_param() argument 1557 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; in qed_dump_fw_ver_param() 1575 qed_read_fw_info(p_hwfn, in qed_dump_fw_ver_param() [all …]
|
D | qed_vf.h | 570 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change); 578 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 587 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 596 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 605 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); 613 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac); 621 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, 632 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac); 643 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, 655 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); [all …]
|
D | qed_hw.h | 62 void qed_gtt_init(struct qed_hwfn *p_hwfn); 69 void qed_ptt_invalidate(struct qed_hwfn *p_hwfn); 78 int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn); 85 void qed_ptt_pool_free(struct qed_hwfn *p_hwfn); 95 u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, 115 void qed_ptt_set_win(struct qed_hwfn *p_hwfn, 127 struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, 138 void qed_wr(struct qed_hwfn *p_hwfn, 151 u32 qed_rd(struct qed_hwfn *p_hwfn, 165 void qed_memcpy_from(struct qed_hwfn *p_hwfn, [all …]
|
D | qed_sp.h | 44 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, 187 int qed_spq_post(struct qed_hwfn *p_hwfn, 198 int qed_spq_alloc(struct qed_hwfn *p_hwfn); 205 void qed_spq_setup(struct qed_hwfn *p_hwfn); 212 void qed_spq_free(struct qed_hwfn *p_hwfn); 226 qed_spq_get_entry(struct qed_hwfn *p_hwfn, 236 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, 246 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, 255 void qed_eq_setup(struct qed_hwfn *p_hwfn, 264 void qed_eq_free(struct qed_hwfn *p_hwfn, [all …]
|
D | qed_sriov.h | 21 #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) argument 25 #define IS_PF_SRIOV(p_hwfn) (0) argument 27 #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) argument 150 #define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \ argument 217 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); 227 int qed_iov_hw_info(struct qed_hwfn *p_hwfn); 239 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length); 247 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); 256 int qed_iov_alloc(struct qed_hwfn *p_hwfn); 264 void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); [all …]
|
D | qed_dcbx.c | 147 qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) in qed_dcbx_dp_protocol() argument 152 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "DCBX negotiated: %d\n", in qed_dcbx_dp_protocol() 158 DP_VERBOSE(p_hwfn, QED_MSG_DCB, in qed_dcbx_dp_protocol() 162 p_data->arr[id].tc, p_hwfn->hw_info.num_tc); in qed_dcbx_dp_protocol() 194 struct qed_hwfn *p_hwfn, in qed_dcbx_update_app_info() argument 199 struct qed_hw_info *p_info = &p_hwfn->hw_info; in qed_dcbx_update_app_info() 220 qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, in qed_dcbx_get_app_protocol_type() argument 236 DP_ERR(p_hwfn, in qed_dcbx_get_app_protocol_type() 249 qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, in qed_dcbx_process_tlv() argument 261 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); in qed_dcbx_process_tlv() [all …]
|
D | qed_selftest.c | 35 struct qed_hwfn *p_hwfn; in qed_selftest_register() local 41 p_hwfn = &cdev->hwfns[i]; in qed_selftest_register() 42 p_ptt = qed_ptt_acquire(p_hwfn); in qed_selftest_register() 44 DP_ERR(p_hwfn, "failed to acquire ptt\n"); in qed_selftest_register() 47 rc = qed_mcp_bist_register_test(p_hwfn, p_ptt); in qed_selftest_register() 48 qed_ptt_release(p_hwfn, p_ptt); in qed_selftest_register() 58 struct qed_hwfn *p_hwfn; in qed_selftest_clock() local 64 p_hwfn = &cdev->hwfns[i]; in qed_selftest_clock() 65 p_ptt = qed_ptt_acquire(p_hwfn); in qed_selftest_clock() 67 DP_ERR(p_hwfn, "failed to acquire ptt\n"); in qed_selftest_clock() [all …]
|
D | qed_dev_api.h | 117 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn); 156 struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn); 168 void qed_ptt_release(struct qed_hwfn *p_hwfn, 208 qed_dmae_host2grc(struct qed_hwfn *p_hwfn, 225 int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 240 int qed_dmae_host2host(struct qed_hwfn *p_hwfn, 282 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, 295 int qed_fw_vport(struct qed_hwfn *p_hwfn, 308 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, 319 int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, [all …]
|