Home
last modified time | relevance | path

Searched refs:table (Results 1 – 25 of 895) sorted by relevance

12345678910>>...36

/drivers/net/wireless/intel/iwlwifi/fw/
Ddump.c120 struct iwl_umac_error_event_table table = {}; in iwl_fwrt_dump_umac_error_log() local
129 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); in iwl_fwrt_dump_umac_error_log()
131 if (table.valid) in iwl_fwrt_dump_umac_error_log()
132 fwrt->dump.umac_err_id = table.error_id; in iwl_fwrt_dump_umac_error_log()
141 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { in iwl_fwrt_dump_umac_error_log()
144 fwrt->trans->status, table.valid); in iwl_fwrt_dump_umac_error_log()
147 if ((table.error_id & ~FW_SYSASSERT_CPU_MASK) == in iwl_fwrt_dump_umac_error_log()
154 IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id, in iwl_fwrt_dump_umac_error_log()
155 iwl_fw_lookup_assert_desc(table.error_id)); in iwl_fwrt_dump_umac_error_log()
156 IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1); in iwl_fwrt_dump_umac_error_log()
[all …]
/drivers/net/wireguard/
Dpeerlookup.c10 static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, in pubkey_bucket() argument
17 const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key); in pubkey_bucket()
19 return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)]; in pubkey_bucket()
24 struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); in wg_pubkey_hashtable_alloc() local
26 if (!table) in wg_pubkey_hashtable_alloc()
29 get_random_bytes(&table->key, sizeof(table->key)); in wg_pubkey_hashtable_alloc()
30 hash_init(table->hashtable); in wg_pubkey_hashtable_alloc()
31 mutex_init(&table->lock); in wg_pubkey_hashtable_alloc()
32 return table; in wg_pubkey_hashtable_alloc()
35 void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, in wg_pubkey_hashtable_add() argument
[all …]
/drivers/gpu/drm/i915/gt/
Dintel_mocs.c25 const struct drm_i915_mocs_entry *table; member
454 struct drm_i915_mocs_table *table) in get_mocs_settings() argument
458 memset(table, 0, sizeof(struct drm_i915_mocs_table)); in get_mocs_settings()
460 table->unused_entries_index = I915_MOCS_PTE; in get_mocs_settings()
462 table->size = ARRAY_SIZE(mtl_mocs_table); in get_mocs_settings()
463 table->table = mtl_mocs_table; in get_mocs_settings()
464 table->n_entries = MTL_NUM_MOCS_ENTRIES; in get_mocs_settings()
465 table->uc_index = 9; in get_mocs_settings()
466 table->unused_entries_index = 1; in get_mocs_settings()
468 table->size = ARRAY_SIZE(dg2_mocs_table); in get_mocs_settings()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/
Drl.c112 static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, in find_rl_entry() argument
119 lockdep_assert_held(&table->rl_lock); in find_rl_entry()
120 WARN_ON(!table->rl_entry); in find_rl_entry()
122 for (i = 0; i < table->max_size; i++) { in find_rl_entry()
124 if (!table->rl_entry[i].refcount) in find_rl_entry()
125 return &table->rl_entry[i]; in find_rl_entry()
129 if (table->rl_entry[i].refcount) { in find_rl_entry()
130 if (table->rl_entry[i].dedicated) in find_rl_entry()
132 if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in, in find_rl_entry()
134 return &table->rl_entry[i]; in find_rl_entry()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/sf/
Ddevlink.c40 mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id) in mlx5_sf_lookup_by_function_id() argument
42 return xa_load(&table->function_ids, fn_id); in mlx5_sf_lookup_by_function_id()
45 static int mlx5_sf_function_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf) in mlx5_sf_function_id_insert() argument
47 return xa_insert(&table->function_ids, sf->hw_fn_id, sf, GFP_KERNEL); in mlx5_sf_function_id_insert()
50 static void mlx5_sf_function_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) in mlx5_sf_function_id_erase() argument
52 xa_erase(&table->function_ids, sf->hw_fn_id); in mlx5_sf_function_id_erase()
56 mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw, in mlx5_sf_alloc() argument
70 id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum); in mlx5_sf_alloc()
82 hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id); in mlx5_sf_alloc()
83 dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id); in mlx5_sf_alloc()
[all …]
Dhw_table.c61 mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id) in mlx5_sf_table_fn_to_hwc() argument
65 for (i = 0; i < ARRAY_SIZE(table->hwc); i++) { in mlx5_sf_table_fn_to_hwc()
66 if (table->hwc[i].max_fn && in mlx5_sf_table_fn_to_hwc()
67 fn_id >= table->hwc[i].start_fn_id && in mlx5_sf_table_fn_to_hwc()
68 fn_id < (table->hwc[i].start_fn_id + table->hwc[i].max_fn)) in mlx5_sf_table_fn_to_hwc()
69 return &table->hwc[i]; in mlx5_sf_table_fn_to_hwc()
74 static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller, in mlx5_sf_hw_table_id_alloc() argument
81 hwc = mlx5_sf_controller_to_hwc(table->dev, controller); in mlx5_sf_hw_table_id_alloc()
103 static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id) in mlx5_sf_hw_table_id_free() argument
107 hwc = mlx5_sf_controller_to_hwc(table->dev, controller); in mlx5_sf_hw_table_id_free()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/
Ddev.c29 struct mlx5_sf_dev_table *table; member
40 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; in mlx5_sf_dev_allocated() local
42 return table && !xa_empty(&table->devices); in mlx5_sf_dev_allocated()
91 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; in mlx5_sf_dev_add() local
119 sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length); in mlx5_sf_dev_add()
136 err = xa_insert(&table->devices, sf_index, sf_dev, GFP_KERNEL); in mlx5_sf_dev_add()
150 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; in mlx5_sf_dev_del() local
152 xa_erase(&table->devices, sf_index); in mlx5_sf_dev_del()
159 struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb); in mlx5_sf_dev_state_change_handler() local
166 max_functions = mlx5_sf_max_functions(table->dev); in mlx5_sf_dev_state_change_handler()
[all …]
/drivers/net/ethernet/mellanox/mlx4/
Dicm.c258 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) in mlx4_table_get() argument
260 u32 i = (obj & (table->num_obj - 1)) / in mlx4_table_get()
261 (MLX4_TABLE_CHUNK_SIZE / table->obj_size); in mlx4_table_get()
264 mutex_lock(&table->mutex); in mlx4_table_get()
266 if (table->icm[i]) { in mlx4_table_get()
267 ++table->icm[i]->refcount; in mlx4_table_get()
271 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, in mlx4_table_get()
272 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | in mlx4_table_get()
273 __GFP_NOWARN, table->coherent); in mlx4_table_get()
274 if (!table->icm[i]) { in mlx4_table_get()
[all …]
/drivers/infiniband/hw/hns/
Dhns_roce_hem.c203 struct hns_roce_hem_table *table, unsigned long *obj, in hns_roce_calc_hem_mhop() argument
212 if (get_hem_table_config(hr_dev, mhop, table->type)) in hns_roce_calc_hem_mhop()
222 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); in hns_roce_calc_hem_mhop()
224 chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : in hns_roce_calc_hem_mhop()
226 table_idx = *obj / (chunk_size / table->obj_size); in hns_roce_calc_hem_mhop()
242 table->type, mhop->hop_num); in hns_roce_calc_hem_mhop()
295 struct hns_roce_hem_table *table, unsigned long obj, in calc_hem_config() argument
306 ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop); in calc_hem_config()
314 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); in calc_hem_config()
331 table->type, mhop->hop_num); in calc_hem_config()
[all …]
/drivers/md/dm-vdo/
Dpriority-table.c56 struct priority_table *table; in vdo_make_priority_table() local
64 struct bucket, __func__, &table); in vdo_make_priority_table()
69 struct bucket *bucket = &table->buckets[priority]; in vdo_make_priority_table()
75 table->max_priority = max_priority; in vdo_make_priority_table()
76 table->search_vector = 0; in vdo_make_priority_table()
78 *table_ptr = table; in vdo_make_priority_table()
88 void vdo_free_priority_table(struct priority_table *table) in vdo_free_priority_table() argument
90 if (table == NULL) in vdo_free_priority_table()
97 vdo_reset_priority_table(table); in vdo_free_priority_table()
99 vdo_free(table); in vdo_free_priority_table()
[all …]
/drivers/media/i2c/
Dks0127.c200 u8 *table = reg_defaults; in init_reg_defaults() local
206 table[KS_CMDA] = 0x2c; /* VSE=0, CCIR 601, autodetect standard */ in init_reg_defaults()
207 table[KS_CMDB] = 0x12; /* VALIGN=0, AGC control and input */ in init_reg_defaults()
208 table[KS_CMDC] = 0x00; /* Test options */ in init_reg_defaults()
210 table[KS_CMDD] = 0x01; in init_reg_defaults()
211 table[KS_HAVB] = 0x00; /* HAV Start Control */ in init_reg_defaults()
212 table[KS_HAVE] = 0x00; /* HAV End Control */ in init_reg_defaults()
213 table[KS_HS1B] = 0x10; /* HS1 Start Control */ in init_reg_defaults()
214 table[KS_HS1E] = 0x00; /* HS1 End Control */ in init_reg_defaults()
215 table[KS_HS2B] = 0x00; /* HS2 Start Control */ in init_reg_defaults()
[all …]
/drivers/net/ethernet/marvell/octeontx2/af/
Drvu_npc_hash.c429 struct npc_exact_table *table = rvu->hw->table; in rvu_exact_calculate_hash() local
448 hash &= table->mem_table.hash_mask; in rvu_exact_calculate_hash()
449 hash += table->mem_table.hash_offset; in rvu_exact_calculate_hash()
468 struct npc_exact_table *table; in rvu_npc_exact_alloc_mem_table_entry() local
471 table = rvu->hw->table; in rvu_npc_exact_alloc_mem_table_entry()
472 depth = table->mem_table.depth; in rvu_npc_exact_alloc_mem_table_entry()
475 mutex_lock(&table->lock); in rvu_npc_exact_alloc_mem_table_entry()
476 for (i = 0; i < table->mem_table.ways; i++) { in rvu_npc_exact_alloc_mem_table_entry()
477 if (test_bit(hash + i * depth, table->mem_table.bmap)) in rvu_npc_exact_alloc_mem_table_entry()
480 set_bit(hash + i * depth, table->mem_table.bmap); in rvu_npc_exact_alloc_mem_table_entry()
[all …]
/drivers/infiniband/core/
Dcache.c49 u16 table[] __counted_by(table_len);
170 static bool is_gid_index_default(const struct ib_gid_table *table, in is_gid_index_default() argument
173 return index < 32 && (BIT(index) & table->default_gid_indices); in is_gid_index_default()
241 struct ib_gid_table *table = rdma_gid_table(device, port_num); in free_gid_entry_locked() local
246 write_lock_irq(&table->rwlock); in free_gid_entry_locked()
254 if (entry == table->data_vec[entry->attr.index]) in free_gid_entry_locked()
255 table->data_vec[entry->attr.index] = NULL; in free_gid_entry_locked()
257 write_unlock_irq(&table->rwlock); in free_gid_entry_locked()
286 struct ib_gid_table *table = rdma_gid_table(device, port_num); in free_gid_work() local
288 mutex_lock(&table->lock); in free_gid_work()
[all …]
/drivers/clk/
Dclk-divider.c45 static unsigned int _get_table_maxdiv(const struct clk_div_table *table, in _get_table_maxdiv() argument
51 for (clkt = table; clkt->div; clkt++) in _get_table_maxdiv()
57 static unsigned int _get_table_mindiv(const struct clk_div_table *table) in _get_table_mindiv() argument
62 for (clkt = table; clkt->div; clkt++) in _get_table_mindiv()
68 static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width, in _get_maxdiv() argument
75 if (table) in _get_maxdiv()
76 return _get_table_maxdiv(table, width); in _get_maxdiv()
80 static unsigned int _get_table_div(const struct clk_div_table *table, in _get_table_div() argument
85 for (clkt = table; clkt->div; clkt++) in _get_table_div()
91 static unsigned int _get_div(const struct clk_div_table *table, in _get_div() argument
[all …]
/drivers/gpu/drm/amd/pm/powerplay/smumgr/
Dvegam_smumgr.c449 SMU75_Discrete_DpmTable *table) in vegam_populate_smc_mvdd_table() argument
459 table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US( in vegam_populate_smc_mvdd_table()
462 table->SmioTable2.Pattern[level].Smio = in vegam_populate_smc_mvdd_table()
464 table->Smio[level] |= in vegam_populate_smc_mvdd_table()
467 table->SmioMask2 = data->mvdd_voltage_table.mask_low; in vegam_populate_smc_mvdd_table()
469 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); in vegam_populate_smc_mvdd_table()
476 struct SMU75_Discrete_DpmTable *table) in vegam_populate_smc_vddci_table() argument
487 table->SmioTable1.Pattern[level].Voltage = PP_HOST_TO_SMC_US( in vegam_populate_smc_vddci_table()
489 table->SmioTable1.Pattern[level].Smio = (uint8_t) level; in vegam_populate_smc_vddci_table()
491 table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low; in vegam_populate_smc_vddci_table()
[all …]
Dpolaris10_smumgr.c432 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); in polaris10_populate_bapm_parameters_in_dpm_table() local
442 table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); in polaris10_populate_bapm_parameters_in_dpm_table()
443 table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); in polaris10_populate_bapm_parameters_in_dpm_table()
449 table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( in polaris10_populate_bapm_parameters_in_dpm_table()
451 table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( in polaris10_populate_bapm_parameters_in_dpm_table()
453 table->FanGainEdge = PP_HOST_TO_SMC_US( in polaris10_populate_bapm_parameters_in_dpm_table()
455 table->FanGainHotspot = PP_HOST_TO_SMC_US( in polaris10_populate_bapm_parameters_in_dpm_table()
464 table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); in polaris10_populate_bapm_parameters_in_dpm_table()
465 table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); in polaris10_populate_bapm_parameters_in_dpm_table()
478 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); in polaris10_populate_zero_rpm_parameters() local
[all …]
Dci_smumgr.c842 SMU7_Discrete_DpmTable *table) in ci_populate_smc_vddc_table() argument
848 table->VddcLevelCount = data->vddc_voltage_table.count; in ci_populate_smc_vddc_table()
849 for (count = 0; count < table->VddcLevelCount; count++) { in ci_populate_smc_vddc_table()
852 &(table->VddcLevel[count])); in ci_populate_smc_vddc_table()
857 table->VddcLevel[count].Smio = (uint8_t) count; in ci_populate_smc_vddc_table()
858 table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low; in ci_populate_smc_vddc_table()
859 table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low; in ci_populate_smc_vddc_table()
861 table->VddcLevel[count].Smio = 0; in ci_populate_smc_vddc_table()
865 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); in ci_populate_smc_vddc_table()
871 SMU7_Discrete_DpmTable *table) in ci_populate_smc_vdd_ci_table() argument
[all …]
Dtonga_smumgr.c303 SMU72_Discrete_DpmTable *table) in tonga_populate_smc_vddc_table() argument
309 table->VddcLevelCount = data->vddc_voltage_table.count; in tonga_populate_smc_vddc_table()
310 for (count = 0; count < table->VddcLevelCount; count++) { in tonga_populate_smc_vddc_table()
311 table->VddcTable[count] = in tonga_populate_smc_vddc_table()
314 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); in tonga_populate_smc_vddc_table()
320 SMU72_Discrete_DpmTable *table) in tonga_populate_smc_vdd_gfx_table() argument
326 table->VddGfxLevelCount = data->vddgfx_voltage_table.count; in tonga_populate_smc_vdd_gfx_table()
328 table->VddGfxTable[count] = in tonga_populate_smc_vdd_gfx_table()
331 CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount); in tonga_populate_smc_vdd_gfx_table()
337 SMU72_Discrete_DpmTable *table) in tonga_populate_smc_vdd_ci_table() argument
[all …]
Diceland_smumgr.c618 SMU71_Discrete_DpmTable *table) in iceland_populate_smc_vddc_table() argument
624 table->VddcLevelCount = data->vddc_voltage_table.count; in iceland_populate_smc_vddc_table()
625 for (count = 0; count < table->VddcLevelCount; count++) { in iceland_populate_smc_vddc_table()
628 &(table->VddcLevel[count])); in iceland_populate_smc_vddc_table()
633 table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; in iceland_populate_smc_vddc_table()
635 table->VddcLevel[count].Smio = 0; in iceland_populate_smc_vddc_table()
638 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); in iceland_populate_smc_vddc_table()
644 SMU71_Discrete_DpmTable *table) in iceland_populate_smc_vdd_ci_table() argument
650 table->VddciLevelCount = data->vddci_voltage_table.count; in iceland_populate_smc_vdd_ci_table()
652 for (count = 0; count < table->VddciLevelCount; count++) { in iceland_populate_smc_vdd_ci_table()
[all …]
Dfiji_smumgr.c755 struct SMU73_Discrete_DpmTable *table) in fiji_populate_cac_table() argument
773 table->BapmVddcVidLoSidd[count] = in fiji_populate_cac_table()
775 table->BapmVddcVidHiSidd[count] = in fiji_populate_cac_table()
783 struct SMU73_Discrete_DpmTable *table) in fiji_populate_smc_voltage_tables() argument
787 result = fiji_populate_cac_table(hwmgr, table); in fiji_populate_smc_voltage_tables()
821 struct SMU73_Discrete_DpmTable *table) in fiji_populate_ulv_state() argument
823 return fiji_populate_ulv_level(hwmgr, &table->Ulv); in fiji_populate_ulv_state()
827 struct SMU73_Discrete_DpmTable *table) in fiji_populate_smc_link_level() argument
837 table->LinkLevel[i].PcieGenSpeed = in fiji_populate_smc_link_level()
839 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( in fiji_populate_smc_link_level()
[all …]
/drivers/net/ethernet/sfc/
Dmcdi_filters.c27 efx_mcdi_filter_entry_spec(const struct efx_mcdi_filter_table *table, in efx_mcdi_filter_entry_spec() argument
30 return (struct efx_filter_spec *)(table->entry[filter_idx].spec & in efx_mcdi_filter_entry_spec()
35 efx_mcdi_filter_entry_flags(const struct efx_mcdi_filter_table *table, in efx_mcdi_filter_entry_flags() argument
38 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; in efx_mcdi_filter_entry_flags()
84 efx_mcdi_filter_set_entry(struct efx_mcdi_filter_table *table, in efx_mcdi_filter_set_entry() argument
89 table->entry[filter_idx].spec = (unsigned long)spec | flags; in efx_mcdi_filter_set_entry()
328 static int efx_mcdi_filter_pri(struct efx_mcdi_filter_table *table, in efx_mcdi_filter_pri() argument
335 match_pri < table->rx_match_count; in efx_mcdi_filter_pri()
337 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags) in efx_mcdi_filter_pri()
349 struct efx_mcdi_filter_table *table; in efx_mcdi_filter_insert_locked() local
[all …]
/drivers/infiniband/hw/mthca/
Dmthca_memfree.c222 int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) in mthca_table_get() argument
224 int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; in mthca_table_get()
227 mutex_lock(&table->mutex); in mthca_table_get()
229 if (table->icm[i]) { in mthca_table_get()
230 ++table->icm[i]->refcount; in mthca_table_get()
234 table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, in mthca_table_get()
235 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | in mthca_table_get()
236 __GFP_NOWARN, table->coherent); in mthca_table_get()
237 if (!table->icm[i]) { in mthca_table_get()
242 if (mthca_MAP_ICM(dev, table->icm[i], in mthca_table_get()
[all …]
/drivers/gpu/drm/radeon/
Drv730_dpm.c227 RV770_SMC_STATETABLE *table) in rv730_populate_smc_acpi_state() argument
239 table->ACPIState = table->initialState; in rv730_populate_smc_acpi_state()
240 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; in rv730_populate_smc_acpi_state()
244 &table->ACPIState.levels[0].vddc); in rv730_populate_smc_acpi_state()
245 table->ACPIState.levels[0].gen2PCIE = pi->pcie_gen2 ? in rv730_populate_smc_acpi_state()
247 table->ACPIState.levels[0].gen2XSP = in rv730_populate_smc_acpi_state()
251 &table->ACPIState.levels[0].vddc); in rv730_populate_smc_acpi_state()
252 table->ACPIState.levels[0].gen2PCIE = 0; in rv730_populate_smc_acpi_state()
294 table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl); in rv730_populate_smc_acpi_state()
295 table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL2 = cpu_to_be32(mpll_func_cntl_2); in rv730_populate_smc_acpi_state()
[all …]
/drivers/acpi/
Dtables.c331 struct acpi_table_header *table = NULL; in acpi_table_parse() local
340 acpi_get_table(id, acpi_apic_instance, &table); in acpi_table_parse()
342 acpi_get_table(id, 0, &table); in acpi_table_parse()
344 if (table) { in acpi_table_parse()
345 handler(table); in acpi_table_parse()
346 acpi_put_table(table); in acpi_table_parse()
359 struct acpi_table_header *table = NULL; in check_multiple_madt() local
361 acpi_get_table(ACPI_SIG_MADT, 2, &table); in check_multiple_madt()
362 if (table) { in check_multiple_madt()
368 acpi_put_table(table); in check_multiple_madt()
[all …]
/drivers/net/ethernet/sfc/siena/
Dfarch.c1884 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; member
1889 struct efx_farch_filter_table *table,
1938 struct efx_farch_filter_table *table; in efx_farch_filter_push_rx_config() local
1943 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; in efx_farch_filter_push_rx_config()
1945 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + in efx_farch_filter_push_rx_config()
1948 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + in efx_farch_filter_push_rx_config()
1951 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + in efx_farch_filter_push_rx_config()
1954 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + in efx_farch_filter_push_rx_config()
1957 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; in efx_farch_filter_push_rx_config()
1958 if (table->size) { in efx_farch_filter_push_rx_config()
[all …]

12345678910>>...36