/drivers/net/ethernet/mellanox/mlx5/core/sf/ |
D | devlink.c | 33 mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index) in mlx5_sf_lookup_by_index() argument 35 return xa_load(&table->port_indices, port_index); in mlx5_sf_lookup_by_index() 39 mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id) in mlx5_sf_lookup_by_function_id() argument 44 xa_for_each(&table->port_indices, index, sf) { in mlx5_sf_lookup_by_function_id() 51 static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf) in mlx5_sf_id_insert() argument 53 return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL); in mlx5_sf_id_insert() 56 static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) in mlx5_sf_id_erase() argument 58 xa_erase(&table->port_indices, sf->port_index); in mlx5_sf_id_erase() 62 mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw, in mlx5_sf_alloc() argument 76 id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum); in mlx5_sf_alloc() [all …]
|
D | hw_table.c | 59 mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id) in mlx5_sf_table_fn_to_hwc() argument 63 for (i = 0; i < ARRAY_SIZE(table->hwc); i++) { in mlx5_sf_table_fn_to_hwc() 64 if (table->hwc[i].max_fn && in mlx5_sf_table_fn_to_hwc() 65 fn_id >= table->hwc[i].start_fn_id && in mlx5_sf_table_fn_to_hwc() 66 fn_id < (table->hwc[i].start_fn_id + table->hwc[i].max_fn)) in mlx5_sf_table_fn_to_hwc() 67 return &table->hwc[i]; in mlx5_sf_table_fn_to_hwc() 72 static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller, in mlx5_sf_hw_table_id_alloc() argument 79 hwc = mlx5_sf_controller_to_hwc(table->dev, controller); in mlx5_sf_hw_table_id_alloc() 101 static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id) in mlx5_sf_hw_table_id_free() argument 105 hwc = mlx5_sf_controller_to_hwc(table->dev, controller); in mlx5_sf_hw_table_id_free() [all …]
|
/drivers/net/wireguard/ |
D | peerlookup.c | 10 static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, in pubkey_bucket() argument 17 const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key); in pubkey_bucket() 19 return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)]; in pubkey_bucket() 24 struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); in wg_pubkey_hashtable_alloc() local 26 if (!table) in wg_pubkey_hashtable_alloc() 29 get_random_bytes(&table->key, sizeof(table->key)); in wg_pubkey_hashtable_alloc() 30 hash_init(table->hashtable); in wg_pubkey_hashtable_alloc() 31 mutex_init(&table->lock); in wg_pubkey_hashtable_alloc() 32 return table; in wg_pubkey_hashtable_alloc() 35 void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, in wg_pubkey_hashtable_add() argument [all …]
|
/drivers/net/wireless/intel/iwlwifi/fw/ |
D | dump.c | 148 struct iwl_umac_error_event_table table = {}; in iwl_fwrt_dump_umac_error_log() local 156 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); in iwl_fwrt_dump_umac_error_log() 158 if (table.valid) in iwl_fwrt_dump_umac_error_log() 159 fwrt->dump.umac_err_id = table.error_id; in iwl_fwrt_dump_umac_error_log() 161 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { in iwl_fwrt_dump_umac_error_log() 164 fwrt->trans->status, table.valid); in iwl_fwrt_dump_umac_error_log() 167 IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id, in iwl_fwrt_dump_umac_error_log() 168 iwl_fw_lookup_assert_desc(table.error_id)); in iwl_fwrt_dump_umac_error_log() 169 IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1); in iwl_fwrt_dump_umac_error_log() 170 IWL_ERR(fwrt, "0x%08X | umac branchlink2\n", table.blink2); in iwl_fwrt_dump_umac_error_log() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | rl.c | 113 static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, in find_rl_entry() argument 120 lockdep_assert_held(&table->rl_lock); in find_rl_entry() 121 WARN_ON(!table->rl_entry); in find_rl_entry() 123 for (i = 0; i < table->max_size; i++) { in find_rl_entry() 125 if (!table->rl_entry[i].refcount) in find_rl_entry() 126 return &table->rl_entry[i]; in find_rl_entry() 130 if (table->rl_entry[i].refcount) { in find_rl_entry() 131 if (table->rl_entry[i].dedicated) in find_rl_entry() 133 if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in, in find_rl_entry() 135 return &table->rl_entry[i]; in find_rl_entry() [all …]
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_hem.c | 204 struct hns_roce_hem_table *table, unsigned long *obj, in hns_roce_calc_hem_mhop() argument 213 if (get_hem_table_config(hr_dev, mhop, table->type)) in hns_roce_calc_hem_mhop() 223 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); in hns_roce_calc_hem_mhop() 225 chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : in hns_roce_calc_hem_mhop() 227 table_idx = *obj / (chunk_size / table->obj_size); in hns_roce_calc_hem_mhop() 243 table->type, mhop->hop_num); in hns_roce_calc_hem_mhop() 337 struct hns_roce_hem_table *table, unsigned long obj, in calc_hem_config() argument 348 ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop); in calc_hem_config() 356 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); in calc_hem_config() 373 table->type, mhop->hop_num); in calc_hem_config() [all …]
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | icm.c | 258 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) in mlx4_table_get() argument 260 u32 i = (obj & (table->num_obj - 1)) / in mlx4_table_get() 261 (MLX4_TABLE_CHUNK_SIZE / table->obj_size); in mlx4_table_get() 264 mutex_lock(&table->mutex); in mlx4_table_get() 266 if (table->icm[i]) { in mlx4_table_get() 267 ++table->icm[i]->refcount; in mlx4_table_get() 271 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, in mlx4_table_get() 272 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | in mlx4_table_get() 273 __GFP_NOWARN, table->coherent); in mlx4_table_get() 274 if (!table->icm[i]) { in mlx4_table_get() [all …]
|
/drivers/infiniband/core/ |
D | cache.c | 49 u16 table[]; member 170 static bool is_gid_index_default(const struct ib_gid_table *table, in is_gid_index_default() argument 173 return index < 32 && (BIT(index) & table->default_gid_indices); in is_gid_index_default() 241 struct ib_gid_table *table = rdma_gid_table(device, port_num); in free_gid_entry_locked() local 246 write_lock_irq(&table->rwlock); in free_gid_entry_locked() 254 if (entry == table->data_vec[entry->attr.index]) in free_gid_entry_locked() 255 table->data_vec[entry->attr.index] = NULL; in free_gid_entry_locked() 257 write_unlock_irq(&table->rwlock); in free_gid_entry_locked() 286 struct ib_gid_table *table = rdma_gid_table(device, port_num); in free_gid_work() local 288 mutex_lock(&table->lock); in free_gid_work() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/ |
D | dev.c | 29 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; in mlx5_sf_dev_allocated() local 31 return table && !xa_empty(&table->devices); in mlx5_sf_dev_allocated() 74 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; in mlx5_sf_dev_add() local 101 if (!table->max_sfs) { in mlx5_sf_dev_add() 107 sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length); in mlx5_sf_dev_add() 122 err = xa_insert(&table->devices, sf_index, sf_dev, GFP_KERNEL); in mlx5_sf_dev_add() 136 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; in mlx5_sf_dev_del() local 138 xa_erase(&table->devices, sf_index); in mlx5_sf_dev_del() 145 struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb); in mlx5_sf_dev_state_change_handler() local 152 max_functions = mlx5_sf_max_functions(table->dev); in mlx5_sf_dev_state_change_handler() [all …]
|
/drivers/gpu/drm/i915/gt/ |
D | intel_mocs.c | 24 const struct drm_i915_mocs_entry *table; member 339 struct drm_i915_mocs_table *table) in get_mocs_settings() argument 344 table->size = ARRAY_SIZE(dg1_mocs_table); in get_mocs_settings() 345 table->table = dg1_mocs_table; in get_mocs_settings() 346 table->n_entries = GEN9_NUM_MOCS_ENTRIES; in get_mocs_settings() 348 table->size = ARRAY_SIZE(tgl_mocs_table); in get_mocs_settings() 349 table->table = tgl_mocs_table; in get_mocs_settings() 350 table->n_entries = GEN9_NUM_MOCS_ENTRIES; in get_mocs_settings() 352 table->size = ARRAY_SIZE(icl_mocs_table); in get_mocs_settings() 353 table->table = icl_mocs_table; in get_mocs_settings() [all …]
|
/drivers/media/i2c/ |
D | ks0127.c | 208 u8 *table = reg_defaults; in init_reg_defaults() local 214 table[KS_CMDA] = 0x2c; /* VSE=0, CCIR 601, autodetect standard */ in init_reg_defaults() 215 table[KS_CMDB] = 0x12; /* VALIGN=0, AGC control and input */ in init_reg_defaults() 216 table[KS_CMDC] = 0x00; /* Test options */ in init_reg_defaults() 218 table[KS_CMDD] = 0x01; in init_reg_defaults() 219 table[KS_HAVB] = 0x00; /* HAV Start Control */ in init_reg_defaults() 220 table[KS_HAVE] = 0x00; /* HAV End Control */ in init_reg_defaults() 221 table[KS_HS1B] = 0x10; /* HS1 Start Control */ in init_reg_defaults() 222 table[KS_HS1E] = 0x00; /* HS1 End Control */ in init_reg_defaults() 223 table[KS_HS2B] = 0x00; /* HS2 Start Control */ in init_reg_defaults() [all …]
|
/drivers/clk/ |
D | clk-divider.c | 45 static unsigned int _get_table_maxdiv(const struct clk_div_table *table, in _get_table_maxdiv() argument 51 for (clkt = table; clkt->div; clkt++) in _get_table_maxdiv() 57 static unsigned int _get_table_mindiv(const struct clk_div_table *table) in _get_table_mindiv() argument 62 for (clkt = table; clkt->div; clkt++) in _get_table_mindiv() 68 static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width, in _get_maxdiv() argument 75 if (table) in _get_maxdiv() 76 return _get_table_maxdiv(table, width); in _get_maxdiv() 80 static unsigned int _get_table_div(const struct clk_div_table *table, in _get_table_div() argument 85 for (clkt = table; clkt->div; clkt++) in _get_table_div() 91 static unsigned int _get_div(const struct clk_div_table *table, in _get_div() argument [all …]
|
/drivers/gpu/drm/amd/pm/powerplay/smumgr/ |
D | vegam_smumgr.c | 450 SMU75_Discrete_DpmTable *table) in vegam_populate_smc_mvdd_table() argument 460 table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US( in vegam_populate_smc_mvdd_table() 463 table->SmioTable2.Pattern[level].Smio = in vegam_populate_smc_mvdd_table() 465 table->Smio[level] |= in vegam_populate_smc_mvdd_table() 468 table->SmioMask2 = data->mvdd_voltage_table.mask_low; in vegam_populate_smc_mvdd_table() 470 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); in vegam_populate_smc_mvdd_table() 477 struct SMU75_Discrete_DpmTable *table) in vegam_populate_smc_vddci_table() argument 488 table->SmioTable1.Pattern[level].Voltage = PP_HOST_TO_SMC_US( in vegam_populate_smc_vddci_table() 490 table->SmioTable1.Pattern[level].Smio = (uint8_t) level; in vegam_populate_smc_vddci_table() 492 table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low; in vegam_populate_smc_vddci_table() [all …]
|
D | polaris10_smumgr.c | 432 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); in polaris10_populate_bapm_parameters_in_dpm_table() local 442 table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); in polaris10_populate_bapm_parameters_in_dpm_table() 443 table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); in polaris10_populate_bapm_parameters_in_dpm_table() 449 table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( in polaris10_populate_bapm_parameters_in_dpm_table() 451 table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( in polaris10_populate_bapm_parameters_in_dpm_table() 453 table->FanGainEdge = PP_HOST_TO_SMC_US( in polaris10_populate_bapm_parameters_in_dpm_table() 455 table->FanGainHotspot = PP_HOST_TO_SMC_US( in polaris10_populate_bapm_parameters_in_dpm_table() 464 table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); in polaris10_populate_bapm_parameters_in_dpm_table() 465 table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); in polaris10_populate_bapm_parameters_in_dpm_table() 478 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); in polaris10_populate_zero_rpm_parameters() local [all …]
|
D | ci_smumgr.c | 841 SMU7_Discrete_DpmTable *table) in ci_populate_smc_vddc_table() argument 847 table->VddcLevelCount = data->vddc_voltage_table.count; in ci_populate_smc_vddc_table() 848 for (count = 0; count < table->VddcLevelCount; count++) { in ci_populate_smc_vddc_table() 851 &(table->VddcLevel[count])); in ci_populate_smc_vddc_table() 856 table->VddcLevel[count].Smio = (uint8_t) count; in ci_populate_smc_vddc_table() 857 table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low; in ci_populate_smc_vddc_table() 858 table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low; in ci_populate_smc_vddc_table() 860 table->VddcLevel[count].Smio = 0; in ci_populate_smc_vddc_table() 864 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); in ci_populate_smc_vddc_table() 870 SMU7_Discrete_DpmTable *table) in ci_populate_smc_vdd_ci_table() argument [all …]
|
D | tonga_smumgr.c | 303 SMU72_Discrete_DpmTable *table) in tonga_populate_smc_vddc_table() argument 309 table->VddcLevelCount = data->vddc_voltage_table.count; in tonga_populate_smc_vddc_table() 310 for (count = 0; count < table->VddcLevelCount; count++) { in tonga_populate_smc_vddc_table() 311 table->VddcTable[count] = in tonga_populate_smc_vddc_table() 314 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); in tonga_populate_smc_vddc_table() 320 SMU72_Discrete_DpmTable *table) in tonga_populate_smc_vdd_gfx_table() argument 326 table->VddGfxLevelCount = data->vddgfx_voltage_table.count; in tonga_populate_smc_vdd_gfx_table() 328 table->VddGfxTable[count] = in tonga_populate_smc_vdd_gfx_table() 331 CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount); in tonga_populate_smc_vdd_gfx_table() 337 SMU72_Discrete_DpmTable *table) in tonga_populate_smc_vdd_ci_table() argument [all …]
|
D | iceland_smumgr.c | 618 SMU71_Discrete_DpmTable *table) in iceland_populate_smc_vddc_table() argument 624 table->VddcLevelCount = data->vddc_voltage_table.count; in iceland_populate_smc_vddc_table() 625 for (count = 0; count < table->VddcLevelCount; count++) { in iceland_populate_smc_vddc_table() 628 &(table->VddcLevel[count])); in iceland_populate_smc_vddc_table() 633 table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; in iceland_populate_smc_vddc_table() 635 table->VddcLevel[count].Smio = 0; in iceland_populate_smc_vddc_table() 638 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); in iceland_populate_smc_vddc_table() 644 SMU71_Discrete_DpmTable *table) in iceland_populate_smc_vdd_ci_table() argument 650 table->VddciLevelCount = data->vddci_voltage_table.count; in iceland_populate_smc_vdd_ci_table() 652 for (count = 0; count < table->VddciLevelCount; count++) { in iceland_populate_smc_vdd_ci_table() [all …]
|
D | fiji_smumgr.c | 755 struct SMU73_Discrete_DpmTable *table) in fiji_populate_cac_table() argument 773 table->BapmVddcVidLoSidd[count] = in fiji_populate_cac_table() 775 table->BapmVddcVidHiSidd[count] = in fiji_populate_cac_table() 783 struct SMU73_Discrete_DpmTable *table) in fiji_populate_smc_voltage_tables() argument 787 result = fiji_populate_cac_table(hwmgr, table); in fiji_populate_smc_voltage_tables() 821 struct SMU73_Discrete_DpmTable *table) in fiji_populate_ulv_state() argument 823 return fiji_populate_ulv_level(hwmgr, &table->Ulv); in fiji_populate_ulv_state() 827 struct SMU73_Discrete_DpmTable *table) in fiji_populate_smc_link_level() argument 837 table->LinkLevel[i].PcieGenSpeed = in fiji_populate_smc_link_level() 839 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( in fiji_populate_smc_link_level() [all …]
|
/drivers/net/ethernet/sfc/ |
D | mcdi_filters.c | 27 efx_mcdi_filter_entry_spec(const struct efx_mcdi_filter_table *table, in efx_mcdi_filter_entry_spec() argument 30 return (struct efx_filter_spec *)(table->entry[filter_idx].spec & in efx_mcdi_filter_entry_spec() 35 efx_mcdi_filter_entry_flags(const struct efx_mcdi_filter_table *table, in efx_mcdi_filter_entry_flags() argument 38 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; in efx_mcdi_filter_entry_flags() 84 efx_mcdi_filter_set_entry(struct efx_mcdi_filter_table *table, in efx_mcdi_filter_set_entry() argument 89 table->entry[filter_idx].spec = (unsigned long)spec | flags; in efx_mcdi_filter_set_entry() 325 static int efx_mcdi_filter_pri(struct efx_mcdi_filter_table *table, in efx_mcdi_filter_pri() argument 332 match_pri < table->rx_match_count; in efx_mcdi_filter_pri() 334 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags) in efx_mcdi_filter_pri() 345 struct efx_mcdi_filter_table *table; in efx_mcdi_filter_insert_locked() local [all …]
|
D | farch.c | 1882 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; member 1887 struct efx_farch_filter_table *table, 1936 struct efx_farch_filter_table *table; in efx_farch_filter_push_rx_config() local 1941 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; in efx_farch_filter_push_rx_config() 1943 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + in efx_farch_filter_push_rx_config() 1946 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + in efx_farch_filter_push_rx_config() 1949 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + in efx_farch_filter_push_rx_config() 1952 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + in efx_farch_filter_push_rx_config() 1955 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; in efx_farch_filter_push_rx_config() 1956 if (table->size) { in efx_farch_filter_push_rx_config() [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_memfree.c | 222 int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) in mthca_table_get() argument 224 int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; in mthca_table_get() 227 mutex_lock(&table->mutex); in mthca_table_get() 229 if (table->icm[i]) { in mthca_table_get() 230 ++table->icm[i]->refcount; in mthca_table_get() 234 table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, in mthca_table_get() 235 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | in mthca_table_get() 236 __GFP_NOWARN, table->coherent); in mthca_table_get() 237 if (!table->icm[i]) { in mthca_table_get() 242 if (mthca_MAP_ICM(dev, table->icm[i], in mthca_table_get() [all …]
|
/drivers/gpu/drm/radeon/ |
D | rv730_dpm.c | 227 RV770_SMC_STATETABLE *table) in rv730_populate_smc_acpi_state() argument 239 table->ACPIState = table->initialState; in rv730_populate_smc_acpi_state() 240 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; in rv730_populate_smc_acpi_state() 244 &table->ACPIState.levels[0].vddc); in rv730_populate_smc_acpi_state() 245 table->ACPIState.levels[0].gen2PCIE = pi->pcie_gen2 ? in rv730_populate_smc_acpi_state() 247 table->ACPIState.levels[0].gen2XSP = in rv730_populate_smc_acpi_state() 251 &table->ACPIState.levels[0].vddc); in rv730_populate_smc_acpi_state() 252 table->ACPIState.levels[0].gen2PCIE = 0; in rv730_populate_smc_acpi_state() 294 table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl); in rv730_populate_smc_acpi_state() 295 table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL2 = cpu_to_be32(mpll_func_cntl_2); in rv730_populate_smc_acpi_state() [all …]
|
/drivers/acpi/ |
D | tables.c | 424 struct acpi_table_header *table = NULL; in acpi_table_parse() local 433 acpi_get_table(id, acpi_apic_instance, &table); in acpi_table_parse() 435 acpi_get_table(id, 0, &table); in acpi_table_parse() 437 if (table) { in acpi_table_parse() 438 handler(table); in acpi_table_parse() 439 acpi_put_table(table); in acpi_table_parse() 452 struct acpi_table_header *table = NULL; in check_multiple_madt() local 454 acpi_get_table(ACPI_SIG_MADT, 2, &table); in check_multiple_madt() 455 if (table) { in check_multiple_madt() 461 acpi_put_table(table); in check_multiple_madt() [all …]
|
/drivers/net/ethernet/sfc/falcon/ |
D | farch.c | 1814 struct ef4_farch_filter_table table[EF4_FARCH_FILTER_TABLE_COUNT]; member 1819 struct ef4_farch_filter_table *table, 1868 struct ef4_farch_filter_table *table; in ef4_farch_filter_push_rx_config() local 1873 table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP]; in ef4_farch_filter_push_rx_config() 1875 table->search_limit[EF4_FARCH_FILTER_TCP_FULL] + in ef4_farch_filter_push_rx_config() 1878 table->search_limit[EF4_FARCH_FILTER_TCP_WILD] + in ef4_farch_filter_push_rx_config() 1881 table->search_limit[EF4_FARCH_FILTER_UDP_FULL] + in ef4_farch_filter_push_rx_config() 1884 table->search_limit[EF4_FARCH_FILTER_UDP_WILD] + in ef4_farch_filter_push_rx_config() 1887 table = &state->table[EF4_FARCH_FILTER_TABLE_RX_MAC]; in ef4_farch_filter_push_rx_config() 1888 if (table->size) { in ef4_farch_filter_push_rx_config() [all …]
|
/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ |
D | smu_helper.c | 53 uint32_t *table; in phm_copy_clock_limits_array() local 56 table = kzalloc(array_size, GFP_KERNEL); in phm_copy_clock_limits_array() 57 if (NULL == table) in phm_copy_clock_limits_array() 61 table[i] = le32_to_cpu(pptable_array[i]); in phm_copy_clock_limits_array() 63 *pptable_info_array = table; in phm_copy_clock_limits_array() 75 uint32_t *table; in phm_copy_overdrive_settings_limits_array() local 78 table = kzalloc(array_size, GFP_KERNEL); in phm_copy_overdrive_settings_limits_array() 79 if (NULL == table) in phm_copy_overdrive_settings_limits_array() 83 table[i] = le32_to_cpu(pptable_array[i]); in phm_copy_overdrive_settings_limits_array() 85 *pptable_info_array = table; in phm_copy_overdrive_settings_limits_array() [all …]
|