/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_table.c | 6 int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, in mlx5dr_table_set_miss_action() argument 17 mlx5dr_domain_lock(tbl->dmn); in mlx5dr_table_set_miss_action() 19 if (!list_empty(&tbl->matcher_list)) in mlx5dr_table_set_miss_action() 20 last_matcher = list_last_entry(&tbl->matcher_list, in mlx5dr_table_set_miss_action() 24 if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX || in mlx5dr_table_set_miss_action() 25 tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) { in mlx5dr_table_set_miss_action() 29 last_htbl = tbl->rx.s_anchor; in mlx5dr_table_set_miss_action() 31 tbl->rx.default_icm_addr = action ? in mlx5dr_table_set_miss_action() 32 action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr : in mlx5dr_table_set_miss_action() 33 tbl->rx.nic_dmn->default_icm_addr; in mlx5dr_table_set_miss_action() [all …]
|
D | dr_matcher.c | 370 mlx5dr_dbg(matcher->tbl->dmn, in mlx5dr_matcher_select_builders() 384 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_matcher_set_ste_builders() 706 struct mlx5dr_table *tbl = matcher->tbl; in dr_matcher_add_to_tbl() local 707 struct mlx5dr_domain *dmn = tbl->dmn; in dr_matcher_add_to_tbl() 712 list_for_each_entry(tmp_matcher, &tbl->matcher_list, list_node) { in dr_matcher_add_to_tbl() 724 prev_matcher = list_last_entry(&tbl->matcher_list, in dr_matcher_add_to_tbl() 752 list_add(&matcher->list_node, &tbl->matcher_list); in dr_matcher_add_to_tbl() 771 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_matcher_uninit() 792 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_matcher_set_all_ste_builders() 810 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_matcher_init_nic() [all …]
|
D | dr_rule.c | 45 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_create_collision_htbl() 79 mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n"); in dr_rule_create_collision_entry() 92 mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n"); in dr_rule_create_collision_entry() 181 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_rehash_handle_collision() 238 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_rehash_copy_ste() 326 mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n"); in dr_rule_rehash_copy_miss_list() 345 mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n"); in dr_rule_rehash_copy_htbl() 377 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_rehash_htbl() 494 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn; in dr_rule_rehash() 515 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_handle_collision() [all …]
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_doorbell.c | 56 struct pvrdma_id_table *tbl = &dev->uar_table.tbl; in pvrdma_uar_table_init() local 61 tbl->last = 0; in pvrdma_uar_table_init() 62 tbl->top = 0; in pvrdma_uar_table_init() 63 tbl->max = num; in pvrdma_uar_table_init() 64 tbl->mask = mask; in pvrdma_uar_table_init() 65 spin_lock_init(&tbl->lock); in pvrdma_uar_table_init() 66 tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL); in pvrdma_uar_table_init() 67 if (!tbl->table) in pvrdma_uar_table_init() 71 set_bit(0, tbl->table); in pvrdma_uar_table_init() 78 struct pvrdma_id_table *tbl = &dev->uar_table.tbl; in pvrdma_uar_table_cleanup() local [all …]
|
/drivers/net/wireless/intel/iwlwifi/dvm/ |
D | rs.c | 436 static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) in get_expected_tpt() argument 438 if (tbl->expected_tpt) in get_expected_tpt() 439 return tbl->expected_tpt[rs_index]; in get_expected_tpt() 450 static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, in rs_collect_tx_data() argument 461 window = &(tbl->win[scale_index]); in rs_collect_tx_data() 464 tpt = get_expected_tpt(tbl, scale_index); in rs_collect_tx_data() 529 struct iwl_scale_tbl_info *tbl, in rate_n_flags_from_tbl() argument 534 if (is_legacy(tbl->lq_type)) { in rate_n_flags_from_tbl() 539 } else if (is_Ht(tbl->lq_type)) { in rate_n_flags_from_tbl() 546 if (is_siso(tbl->lq_type)) in rate_n_flags_from_tbl() [all …]
|
D | rs.h | 261 #define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) argument 262 #define is_siso(tbl) ((tbl) == LQ_SISO) argument 263 #define is_mimo2(tbl) ((tbl) == LQ_MIMO2) argument 264 #define is_mimo3(tbl) ((tbl) == LQ_MIMO3) argument 265 #define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl)) argument 266 #define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) argument 267 #define is_a_band(tbl) ((tbl) == LQ_A) argument 268 #define is_g_and(tbl) ((tbl) == LQ_G) argument
|
/drivers/net/wireless/intel/iwlegacy/ |
D | 4965-rs.c | 389 il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx) in il4965_get_expected_tpt() argument 391 if (tbl->expected_tpt) in il4965_get_expected_tpt() 392 return tbl->expected_tpt[rs_idx]; in il4965_get_expected_tpt() 404 il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx, in il4965_rs_collect_tx_data() argument 415 win = &(tbl->win[scale_idx]); in il4965_rs_collect_tx_data() 418 tpt = il4965_get_expected_tpt(tbl, scale_idx); in il4965_rs_collect_tx_data() 482 il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl, in il4965_rate_n_flags_from_tbl() argument 487 if (is_legacy(tbl->lq_type)) { in il4965_rate_n_flags_from_tbl() 492 } else if (is_Ht(tbl->lq_type)) { in il4965_rate_n_flags_from_tbl() 499 if (is_siso(tbl->lq_type)) in il4965_rate_n_flags_from_tbl() [all …]
|
/drivers/net/wireless/marvell/mwifiex/ |
D | 11n_rxreorder.c | 110 struct mwifiex_rx_reorder_tbl *tbl, in mwifiex_11n_dispatch_pkt_until_start_win() argument 120 pkt_to_send = (start_win > tbl->start_win) ? in mwifiex_11n_dispatch_pkt_until_start_win() 121 min((start_win - tbl->start_win), tbl->win_size) : in mwifiex_11n_dispatch_pkt_until_start_win() 122 tbl->win_size; in mwifiex_11n_dispatch_pkt_until_start_win() 125 if (tbl->rx_reorder_ptr[i]) { in mwifiex_11n_dispatch_pkt_until_start_win() 126 skb = tbl->rx_reorder_ptr[i]; in mwifiex_11n_dispatch_pkt_until_start_win() 128 tbl->rx_reorder_ptr[i] = NULL; in mwifiex_11n_dispatch_pkt_until_start_win() 136 for (i = 0; i < tbl->win_size - pkt_to_send; ++i) { in mwifiex_11n_dispatch_pkt_until_start_win() 137 tbl->rx_reorder_ptr[i] = tbl->rx_reorder_ptr[pkt_to_send + i]; in mwifiex_11n_dispatch_pkt_until_start_win() 138 tbl->rx_reorder_ptr[pkt_to_send + i] = NULL; in mwifiex_11n_dispatch_pkt_until_start_win() [all …]
|
/drivers/vfio/ |
D | vfio_iommu_spapr_tce.c | 207 struct iommu_table *tbl = container->tables[i]; in tce_iommu_find_table() local 209 if (tbl) { in tce_iommu_find_table() 210 unsigned long entry = ioba >> tbl->it_page_shift; in tce_iommu_find_table() 211 unsigned long start = tbl->it_offset; in tce_iommu_find_table() 212 unsigned long end = start + tbl->it_size; in tce_iommu_find_table() 215 *ptbl = tbl; in tce_iommu_find_table() 337 struct iommu_table *tbl, 340 struct iommu_table *tbl); 360 struct iommu_table *tbl = container->tables[i]; in tce_iommu_release() local 362 if (!tbl) in tce_iommu_release() [all …]
|
/drivers/firmware/efi/ |
D | memattr.c | 24 efi_memory_attributes_table_t *tbl; in efi_memattr_init() local 29 tbl = early_memremap(efi_mem_attr_table, sizeof(*tbl)); in efi_memattr_init() 30 if (!tbl) { in efi_memattr_init() 36 if (tbl->version > 2) { in efi_memattr_init() 38 tbl->version); in efi_memattr_init() 42 tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size; in efi_memattr_init() 47 early_memunmap(tbl, sizeof(*tbl)); in efi_memattr_init() 131 efi_memory_attributes_table_t *tbl; in efi_memattr_apply_permissions() local 134 if (tbl_size <= sizeof(*tbl)) in efi_memattr_apply_permissions() 146 tbl = memremap(efi_mem_attr_table, tbl_size, MEMREMAP_WB); in efi_memattr_apply_permissions() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | mod_hdr.c | 42 void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl) in mlx5e_mod_hdr_tbl_init() argument 44 mutex_init(&tbl->lock); in mlx5e_mod_hdr_tbl_init() 45 hash_init(tbl->hlist); in mlx5e_mod_hdr_tbl_init() 48 void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl) in mlx5e_mod_hdr_tbl_destroy() argument 50 mutex_destroy(&tbl->lock); in mlx5e_mod_hdr_tbl_destroy() 53 static struct mlx5e_mod_hdr_handle *mod_hdr_get(struct mod_hdr_tbl *tbl, in mod_hdr_get() argument 59 hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) { in mod_hdr_get() 72 struct mod_hdr_tbl *tbl, in mlx5e_mod_hdr_attach() argument 89 mutex_lock(&tbl->lock); in mlx5e_mod_hdr_attach() 90 mh = mod_hdr_get(tbl, &key, hash_key); in mlx5e_mod_hdr_attach() [all …]
|
D | mod_hdr.h | 25 struct mod_hdr_tbl *tbl, 29 struct mod_hdr_tbl *tbl, 33 void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl); 34 void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl);
|
/drivers/infiniband/hw/bnxt_re/ |
D | qplib_res.c | 421 struct bnxt_qplib_hwq *tbl; in bnxt_qplib_map_tqm_pgtbl() local 432 tbl = &ctx->qtbl[i]; in bnxt_qplib_map_tqm_pgtbl() 433 if (!tbl->max_elements) in bnxt_qplib_map_tqm_pgtbl() 437 switch (tbl->level) { in bnxt_qplib_map_tqm_pgtbl() 439 pg_count = tbl->pbl[PBL_LVL_1].pg_count; in bnxt_qplib_map_tqm_pgtbl() 442 dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k]; in bnxt_qplib_map_tqm_pgtbl() 450 *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] | in bnxt_qplib_map_tqm_pgtbl() 586 kfree(sgid_tbl->tbl); in bnxt_qplib_free_sgid_tbl() 590 sgid_tbl->tbl = NULL; in bnxt_qplib_free_sgid_tbl() 602 sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL); in bnxt_qplib_alloc_sgid_tbl() [all …]
|
/drivers/char/tpm/eventlog/ |
D | acpi.c | 73 struct acpi_table_tpm2 *tbl; in tpm_read_log_acpi() local 88 (struct acpi_table_header **)&tbl); in tpm_read_log_acpi() 92 if (tbl->header.length < in tpm_read_log_acpi() 93 sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) { in tpm_read_log_acpi() 94 acpi_put_table((struct acpi_table_header *)tbl); in tpm_read_log_acpi() 98 tpm2_phy = (void *)tbl + sizeof(*tbl); in tpm_read_log_acpi() 103 acpi_put_table((struct acpi_table_header *)tbl); in tpm_read_log_acpi() 107 acpi_put_table((struct acpi_table_header *)tbl); in tpm_read_log_acpi()
|
/drivers/staging/media/hantro/ |
D | hantro_h264.c | 204 struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu; in assemble_scaling_list() local 205 u32 *dst = (u32 *)tbl->scaling_list; in assemble_scaling_list() 230 struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu; in prepare_table() local 237 tbl->poc[i * 2] = dpb[i].top_field_order_cnt; in prepare_table() 238 tbl->poc[i * 2 + 1] = dpb[i].bottom_field_order_cnt; in prepare_table() 252 tbl->poc[32] = dec_param->top_field_order_cnt; in prepare_table() 253 tbl->poc[33] = dec_param->bottom_field_order_cnt; in prepare_table() 417 struct hantro_h264_dec_priv_tbl *tbl; in hantro_h264_dec_init() local 419 priv->cpu = dma_alloc_coherent(vpu->dev, sizeof(*tbl), &priv->dma, in hantro_h264_dec_init() 424 priv->size = sizeof(*tbl); in hantro_h264_dec_init() [all …]
|
/drivers/net/wireless/intel/iwlwifi/mvm/ |
D | rs.c | 584 struct iwl_scale_tbl_info *tbl) in rs_rate_scale_clear_tbl_windows() argument 590 rs_rate_scale_clear_window(&tbl->win[i]); in rs_rate_scale_clear_tbl_windows() 592 for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++) in rs_rate_scale_clear_tbl_windows() 593 rs_rate_scale_clear_window(&tbl->tpc_win[i]); in rs_rate_scale_clear_tbl_windows() 665 static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) in get_expected_tpt() argument 667 if (tbl->expected_tpt) in get_expected_tpt() 668 return tbl->expected_tpt[rs_index]; in get_expected_tpt() 680 struct iwl_scale_tbl_info *tbl, in _rs_collect_tx_data() argument 688 tpt = get_expected_tpt(tbl, scale_index); in _rs_collect_tx_data() 746 struct iwl_scale_tbl_info *tbl, in rs_collect_tpc_data() argument [all …]
|
/drivers/clk/tegra/ |
D | clk.c | 264 void __init tegra_init_from_table(struct tegra_clk_init_table *tbl, in tegra_init_from_table() argument 269 for (; tbl->clk_id < clk_max; tbl++) { in tegra_init_from_table() 270 clk = clks[tbl->clk_id]; in tegra_init_from_table() 273 __func__, PTR_ERR(clk), tbl->clk_id); in tegra_init_from_table() 279 if (tbl->parent_id < clk_max) { in tegra_init_from_table() 280 struct clk *parent = clks[tbl->parent_id]; in tegra_init_from_table() 289 if (tbl->rate) in tegra_init_from_table() 290 if (clk_set_rate(clk, tbl->rate)) { in tegra_init_from_table() 292 __func__, tbl->rate, in tegra_init_from_table() 297 if (tbl->state) in tegra_init_from_table()
|
/drivers/media/usb/gspca/gl860/ |
D | gl860.c | 580 int fetch_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len) in fetch_validx() argument 585 if (tbl[n].idx != 0xffff) in fetch_validx() 586 ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, in fetch_validx() 587 tbl[n].idx, 0, NULL); in fetch_validx() 588 else if (tbl[n].val == 0xffff) in fetch_validx() 591 msleep(tbl[n].val); in fetch_validx() 596 int keep_on_fetching_validx(struct gspca_dev *gspca_dev, struct validx *tbl, in keep_on_fetching_validx() argument 600 if (tbl[n].idx != 0xffff) in keep_on_fetching_validx() 601 ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, tbl[n].idx, in keep_on_fetching_validx() 603 else if (tbl[n].val == 0xffff) in keep_on_fetching_validx() [all …]
|
/drivers/gpu/drm/amd/display/dc/bios/ |
D | bios_parser.c | 169 ATOM_OBJECT_TABLE *tbl = in bios_parser_get_connector_id() local 172 if (!tbl) { in bios_parser_get_connector_id() 177 if (tbl->ucNumberOfObjects <= i) { in bios_parser_get_connector_id() 179 i, tbl->ucNumberOfObjects); in bios_parser_get_connector_id() 183 id = le16_to_cpu(tbl->asObjects[i].usObjectID); in bios_parser_get_connector_id() 654 ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl; in get_ss_info_v3_1() local 672 tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *) in get_ss_info_v3_1() 678 if (tbl[i].ucClockIndication != (uint8_t) id) in get_ss_info_v3_1() 697 if (SS_MODE_V3_EXTERNAL_SS_MASK & tbl[i].ucSpreadSpectrumMode) in get_ss_info_v3_1() 700 if (SS_MODE_V3_CENTRE_SPREAD_MASK & tbl[i].ucSpreadSpectrumMode) in get_ss_info_v3_1() [all …]
|
/drivers/iio/imu/bmi160/ |
D | bmi160_core.c | 212 const struct bmi160_scale *tbl; member 218 .tbl = bmi160_accel_scale, 222 .tbl = bmi160_gyro_scale, 254 const struct bmi160_odr *tbl; member 260 .tbl = bmi160_accel_odr, 264 .tbl = bmi160_gyro_odr, 333 if (bmi160_scale_table[t].tbl[i].uscale == uscale) in bmi160_set_scale() 340 bmi160_scale_table[t].tbl[i].bits); in bmi160_set_scale() 354 if (bmi160_scale_table[t].tbl[i].bits == val) { in bmi160_get_scale() 355 *uscale = bmi160_scale_table[t].tbl[i].uscale; in bmi160_get_scale() [all …]
|
/drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ |
D | ia_css_sdis_common.host.h | 65 s32 tbl[ISP_DVS_NUM_COEF_TYPES * ISP_MAX_SDIS_HOR_PROJ_NUM_ISP]; member 72 s32 tbl[ISP_DVS_NUM_COEF_TYPES * ISP_MAX_SDIS_VER_PROJ_NUM_ISP]; member 79 VMEM_ARRAY(tbl[ISP_DVS_NUM_COEF_TYPES], 84 VMEM_ARRAY(tbl[ISP_DVS_NUM_COEF_TYPES],
|
/drivers/watchdog/ |
D | wdat_wdt.c | 317 const struct acpi_table_wdat *tbl; in wdat_wdt_probe() local 325 (struct acpi_table_header **)&tbl); in wdat_wdt_probe() 339 if (tbl->timer_period < 1) in wdat_wdt_probe() 341 if (tbl->min_count > tbl->max_count) in wdat_wdt_probe() 344 wdat->period = tbl->timer_period; in wdat_wdt_probe() 345 wdat->wdd.min_hw_heartbeat_ms = wdat->period * tbl->min_count; in wdat_wdt_probe() 346 wdat->wdd.max_hw_heartbeat_ms = wdat->period * tbl->max_count; in wdat_wdt_probe() 347 wdat->stopped_in_sleep = tbl->flags & ACPI_WDAT_STOPPED; in wdat_wdt_probe() 373 entries = (struct acpi_wdat_entry *)(tbl + 1); in wdat_wdt_probe() 374 for (i = 0; i < tbl->entries; i++) { in wdat_wdt_probe()
|
/drivers/net/wireless/realtek/rtw88/ |
D | phy.c | 1029 void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl) in rtw_parse_tbl_phy_cond() argument 1031 const union phy_table_tile *p = tbl->data; in rtw_parse_tbl_phy_cond() 1032 const union phy_table_tile *end = p + tbl->size / 2; in rtw_parse_tbl_phy_cond() 1067 (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data); in rtw_parse_tbl_phy_cond() 1420 void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl) in rtw_parse_tbl_bb_pg() argument 1422 const struct rtw_phy_pg_cfg_pair *p = tbl->data; in rtw_parse_tbl_bb_pg() 1423 const struct rtw_phy_pg_cfg_pair *end = p + tbl->size; in rtw_parse_tbl_bb_pg() 1568 const struct rtw_table *tbl) in rtw_parse_tbl_txpwr_lmt() argument 1570 const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data; in rtw_parse_tbl_txpwr_lmt() 1571 const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size; in rtw_parse_tbl_txpwr_lmt() [all …]
|
D | phy.h | 33 void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl); 34 void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl); 35 void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, const struct rtw_table *tbl); 36 void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 38 void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 40 void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl, 42 void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
|
/drivers/infiniband/ulp/opa_vnic/ |
D | opa_vnic_encap.c | 153 struct opa_veswport_mactable *tbl) in opa_vnic_query_mac_tbl() argument 165 loffset = be16_to_cpu(tbl->offset); in opa_vnic_query_mac_tbl() 166 lnum_entries = be16_to_cpu(tbl->num_entries); in opa_vnic_query_mac_tbl() 177 entry = &tbl->tbl_entries[node->index - loffset]; in opa_vnic_query_mac_tbl() 184 tbl->mac_tbl_digest = cpu_to_be32(adapter->info.vport.mac_tbl_digest); in opa_vnic_query_mac_tbl() 205 struct opa_veswport_mactable *tbl) in opa_vnic_update_mac_tbl() argument 221 loffset = be16_to_cpu(tbl->offset); in opa_vnic_update_mac_tbl() 222 lnum_entries = be16_to_cpu(tbl->num_entries); in opa_vnic_update_mac_tbl() 228 &tbl->tbl_entries[i]; in opa_vnic_update_mac_tbl() 285 adapter->info.vport.mac_tbl_digest = be32_to_cpu(tbl->mac_tbl_digest); in opa_vnic_update_mac_tbl()
|