/drivers/acpi/ |
D | nvs.c | 95 struct nvs_page *entry, *next; in suspend_nvs_register() local 103 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); in suspend_nvs_register() 104 if (!entry) in suspend_nvs_register() 107 list_add_tail(&entry->node, &nvs_list); in suspend_nvs_register() 108 entry->phys_start = start; in suspend_nvs_register() 110 entry->size = (size < nr_bytes) ? size : nr_bytes; in suspend_nvs_register() 112 start += entry->size; in suspend_nvs_register() 113 size -= entry->size; in suspend_nvs_register() 118 list_for_each_entry_safe(entry, next, &nvs_list, node) { in suspend_nvs_register() 119 list_del(&entry->node); in suspend_nvs_register() [all …]
|
D | pci_irq.c | 112 static void do_prt_fixups(struct acpi_prt_entry *entry, in do_prt_fixups() argument 123 entry->id.segment == quirk->segment && in do_prt_fixups() 124 entry->id.bus == quirk->bus && in do_prt_fixups() 125 entry->id.device == quirk->device && in do_prt_fixups() 126 entry->pin == quirk->pin && in do_prt_fixups() 132 entry->id.segment, entry->id.bus, in do_prt_fixups() 133 entry->id.device, pin_name(entry->pin), in do_prt_fixups() 147 struct acpi_prt_entry *entry; in acpi_pci_irq_check_entry() local 153 entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL); in acpi_pci_irq_check_entry() 154 if (!entry) in acpi_pci_irq_check_entry() [all …]
|
/drivers/firmware/ |
D | memmap.c | 43 static ssize_t start_show(struct firmware_map_entry *entry, char *buf); 44 static ssize_t end_show(struct firmware_map_entry *entry, char *buf); 45 static ssize_t type_show(struct firmware_map_entry *entry, char *buf); 56 ssize_t (*show)(struct firmware_map_entry *entry, char *buf); 99 struct firmware_map_entry *entry = to_memmap_entry(kobj); in release_firmware_map_entry() local 101 if (PageReserved(virt_to_page(entry))) { in release_firmware_map_entry() 109 list_add(&entry->list, &map_entries_bootmem); in release_firmware_map_entry() 115 kfree(entry); in release_firmware_map_entry() 143 struct firmware_map_entry *entry) in firmware_map_add_entry() argument 147 entry->start = start; in firmware_map_add_entry() [all …]
|
D | dmi-sysfs.c | 54 ssize_t (*show)(struct dmi_sysfs_entry *entry, char *buf); 69 ssize_t (*show)(struct dmi_sysfs_entry *entry, 101 struct dmi_sysfs_entry *entry = to_entry(kobj); in dmi_sysfs_attr_show() local 108 return attr->show(entry, buf); in dmi_sysfs_attr_show() 119 struct dmi_sysfs_entry *entry; member 130 struct dmi_sysfs_entry *entry = data->entry; in find_dmi_entry_helper() local 133 if (dh->type != entry->dh.type) in find_dmi_entry_helper() 149 data->ret = data->callback(entry, dh, data->private); in find_dmi_entry_helper() 159 static ssize_t find_dmi_entry(struct dmi_sysfs_entry *entry, in find_dmi_entry() argument 163 .entry = entry, in find_dmi_entry() [all …]
|
/drivers/staging/media/atomisp/pci/base/refcount/src/ |
D | refcount.c | 98 struct ia_css_refcount_entry *entry; in ia_css_refcount_uninit() local 108 entry = myrefcount.items + i; in ia_css_refcount_uninit() 109 if (entry->data != mmgr_NULL) { in ia_css_refcount_uninit() 113 hmm_free(entry->data); in ia_css_refcount_uninit() 114 entry->data = mmgr_NULL; in ia_css_refcount_uninit() 115 entry->count = 0; in ia_css_refcount_uninit() 116 entry->id = 0; in ia_css_refcount_uninit() 128 struct ia_css_refcount_entry *entry; in ia_css_refcount_increment() local 133 entry = refcount_find_entry(ptr, false); in ia_css_refcount_increment() 138 if (!entry) { in ia_css_refcount_increment() [all …]
|
/drivers/gpu/drm/ |
D | drm_scatter.c | 46 static void drm_sg_cleanup(struct drm_sg_mem * entry) in drm_sg_cleanup() argument 51 for (i = 0; i < entry->pages; i++) { in drm_sg_cleanup() 52 page = entry->pagelist[i]; in drm_sg_cleanup() 57 vfree(entry->virtual); in drm_sg_cleanup() 59 kfree(entry->busaddr); in drm_sg_cleanup() 60 kfree(entry->pagelist); in drm_sg_cleanup() 61 kfree(entry); in drm_sg_cleanup() 82 struct drm_sg_mem *entry; in drm_legacy_sg_alloc() local 99 entry = kzalloc(sizeof(*entry), GFP_KERNEL); in drm_legacy_sg_alloc() 100 if (!entry) in drm_legacy_sg_alloc() [all …]
|
/drivers/staging/media/tegra-vde/ |
D | dmabuf-cache.c | 29 static void tegra_vde_release_entry(struct tegra_vde_cache_entry *entry) in tegra_vde_release_entry() argument 31 struct dma_buf *dmabuf = entry->a->dmabuf; in tegra_vde_release_entry() 33 WARN_ON_ONCE(entry->refcnt); in tegra_vde_release_entry() 35 if (entry->vde->domain) in tegra_vde_release_entry() 36 tegra_vde_iommu_unmap(entry->vde, entry->iova); in tegra_vde_release_entry() 38 dma_buf_unmap_attachment(entry->a, entry->sgt, entry->dma_dir); in tegra_vde_release_entry() 39 dma_buf_detach(dmabuf, entry->a); in tegra_vde_release_entry() 42 list_del(&entry->list); in tegra_vde_release_entry() 43 kfree(entry); in tegra_vde_release_entry() 48 struct tegra_vde_cache_entry *entry; in tegra_vde_delayed_unmap() local [all …]
|
/drivers/net/dsa/mv88e6xxx/ |
D | global1_vtu.c | 20 struct mv88e6xxx_vtu_entry *entry) in mv88e6xxx_g1_vtu_fid_read() argument 29 entry->fid = val & MV88E6352_G1_VTU_FID_MASK; in mv88e6xxx_g1_vtu_fid_read() 35 struct mv88e6xxx_vtu_entry *entry) in mv88e6xxx_g1_vtu_fid_write() argument 37 u16 val = entry->fid & MV88E6352_G1_VTU_FID_MASK; in mv88e6xxx_g1_vtu_fid_write() 45 struct mv88e6xxx_vtu_entry *entry) in mv88e6xxx_g1_vtu_sid_read() argument 54 entry->sid = val & MV88E6352_G1_VTU_SID_MASK; in mv88e6xxx_g1_vtu_sid_read() 60 struct mv88e6xxx_vtu_entry *entry) in mv88e6xxx_g1_vtu_sid_write() argument 62 u16 val = entry->sid & MV88E6352_G1_VTU_SID_MASK; in mv88e6xxx_g1_vtu_sid_write() 91 struct mv88e6xxx_vtu_entry *entry) in mv88e6xxx_g1_vtu_vid_read() argument 100 entry->vid = val & 0xfff; in mv88e6xxx_g1_vtu_vid_read() [all …]
|
/drivers/gpu/drm/amd/pm/powerplay/smumgr/ |
D | vega10_smumgr.c | 46 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, in vega10_copy_table_from_smc() 48 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, in vega10_copy_table_from_smc() 52 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), in vega10_copy_table_from_smc() 56 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), in vega10_copy_table_from_smc() 60 priv->smu_tables.entry[table_id].table_id, in vega10_copy_table_from_smc() 66 memcpy(table, priv->smu_tables.entry[table_id].table, in vega10_copy_table_from_smc() 67 priv->smu_tables.entry[table_id].size); in vega10_copy_table_from_smc() 86 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, in vega10_copy_table_to_smc() 88 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, in vega10_copy_table_to_smc() 91 memcpy(priv->smu_tables.entry[table_id].table, table, in vega10_copy_table_to_smc() [all …]
|
D | vega12_smumgr.c | 49 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, in vega12_copy_table_from_smc() 51 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, in vega12_copy_table_from_smc() 55 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), in vega12_copy_table_from_smc() 60 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), in vega12_copy_table_from_smc() 74 memcpy(table, priv->smu_tables.entry[table_id].table, in vega12_copy_table_from_smc() 75 priv->smu_tables.entry[table_id].size); in vega12_copy_table_from_smc() 94 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, in vega12_copy_table_to_smc() 96 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, in vega12_copy_table_to_smc() 99 memcpy(priv->smu_tables.entry[table_id].table, table, in vega12_copy_table_to_smc() 100 priv->smu_tables.entry[table_id].size); in vega12_copy_table_to_smc() [all …]
|
D | vega20_smumgr.c | 173 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, in vega20_copy_table_from_smc() 175 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, in vega20_copy_table_from_smc() 180 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), in vega20_copy_table_from_smc() 186 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), in vega20_copy_table_from_smc() 198 memcpy(table, priv->smu_tables.entry[table_id].table, in vega20_copy_table_from_smc() 199 priv->smu_tables.entry[table_id].size); in vega20_copy_table_from_smc() 219 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, in vega20_copy_table_to_smc() 221 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, in vega20_copy_table_to_smc() 224 memcpy(priv->smu_tables.entry[table_id].table, table, in vega20_copy_table_to_smc() 225 priv->smu_tables.entry[table_id].size); in vega20_copy_table_to_smc() [all …]
|
/drivers/net/dsa/sja1105/ |
D | sja1105_static_config.c | 98 struct sja1105_avb_params_entry *entry = entry_ptr; in sja1105et_avb_params_entry_packing() local 100 sja1105_packing(buf, &entry->destmeta, 95, 48, size, op); in sja1105et_avb_params_entry_packing() 101 sja1105_packing(buf, &entry->srcmeta, 47, 0, size, op); in sja1105et_avb_params_entry_packing() 109 struct sja1105_avb_params_entry *entry = entry_ptr; in sja1105pqrs_avb_params_entry_packing() local 111 sja1105_packing(buf, &entry->cas_master, 126, 126, size, op); in sja1105pqrs_avb_params_entry_packing() 112 sja1105_packing(buf, &entry->destmeta, 125, 78, size, op); in sja1105pqrs_avb_params_entry_packing() 113 sja1105_packing(buf, &entry->srcmeta, 77, 30, size, op); in sja1105pqrs_avb_params_entry_packing() 121 struct sja1105_general_params_entry *entry = entry_ptr; in sja1105et_general_params_entry_packing() local 123 sja1105_packing(buf, &entry->vllupformat, 319, 319, size, op); in sja1105et_general_params_entry_packing() 124 sja1105_packing(buf, &entry->mirr_ptacu, 318, 318, size, op); in sja1105et_general_params_entry_packing() [all …]
|
/drivers/infiniband/core/ |
D | ib_core_uverbs.c | 32 struct rdma_user_mmap_entry *entry) in rdma_umap_priv_init() argument 37 if (entry) { in rdma_umap_priv_init() 38 kref_get(&entry->ref); in rdma_umap_priv_init() 39 priv->entry = entry; in rdma_umap_priv_init() 69 struct rdma_user_mmap_entry *entry) in rdma_user_mmap_io() argument 96 rdma_umap_priv_init(priv, vma, entry); in rdma_user_mmap_io() 120 struct rdma_user_mmap_entry *entry; in rdma_user_mmap_entry_get_pgoff() local 127 entry = xa_load(&ucontext->mmap_xa, pgoff); in rdma_user_mmap_entry_get_pgoff() 134 if (!entry || entry->start_pgoff != pgoff || entry->driver_removed || in rdma_user_mmap_entry_get_pgoff() 135 !kref_get_unless_zero(&entry->ref)) in rdma_user_mmap_entry_get_pgoff() [all …]
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_cmdbuf_res.c | 105 struct vmw_cmdbuf_res *entry) in vmw_cmdbuf_res_free() argument 107 list_del(&entry->head); in vmw_cmdbuf_res_free() 108 WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash)); in vmw_cmdbuf_res_free() 109 vmw_resource_unreference(&entry->res); in vmw_cmdbuf_res_free() 110 kfree(entry); in vmw_cmdbuf_res_free() 125 struct vmw_cmdbuf_res *entry, *next; in vmw_cmdbuf_res_commit() local 127 list_for_each_entry_safe(entry, next, list, head) { in vmw_cmdbuf_res_commit() 128 list_del(&entry->head); in vmw_cmdbuf_res_commit() 129 if (entry->res->func->commit_notify) in vmw_cmdbuf_res_commit() 130 entry->res->func->commit_notify(entry->res, in vmw_cmdbuf_res_commit() [all …]
|
/drivers/net/ethernet/rocker/ |
D | rocker_ofdpa.c | 93 struct hlist_node entry; member 103 struct hlist_node entry; member 129 struct hlist_node entry; member 141 struct hlist_node entry; member 148 struct hlist_node entry; member 306 const struct ofdpa_flow_tbl_entry *entry) in ofdpa_cmd_flow_tbl_add_ig_port() argument 309 entry->key.ig_port.in_pport)) in ofdpa_cmd_flow_tbl_add_ig_port() 312 entry->key.ig_port.in_pport_mask)) in ofdpa_cmd_flow_tbl_add_ig_port() 315 entry->key.ig_port.goto_tbl)) in ofdpa_cmd_flow_tbl_add_ig_port() 323 const struct ofdpa_flow_tbl_entry *entry) in ofdpa_cmd_flow_tbl_add_vlan() argument [all …]
|
/drivers/misc/vmw_vmci/ |
D | vmci_queue_pair.c | 805 struct qp_entry *entry; in qp_list_find() local 810 list_for_each_entry(entry, &qp_list->head, list_item) { in qp_list_find() 811 if (vmci_handle_is_equal(entry->handle, handle)) in qp_list_find() 812 return entry; in qp_list_find() 824 struct qp_guest_endpoint *entry; in qp_guest_handle_to_entry() local 827 entry = qp ? container_of( in qp_guest_handle_to_entry() 829 return entry; in qp_guest_handle_to_entry() 838 struct qp_broker_entry *entry; in qp_broker_handle_to_entry() local 841 entry = qp ? container_of( in qp_broker_handle_to_entry() 843 return entry; in qp_broker_handle_to_entry() [all …]
|
/drivers/isdn/mISDN/ |
D | dsp_pipeline.c | 62 struct dsp_element_entry *entry = in mISDN_dsp_dev_release() local 64 list_del(&entry->list); in mISDN_dsp_dev_release() 65 kfree(entry); in mISDN_dsp_dev_release() 70 struct dsp_element_entry *entry; in mISDN_dsp_element_register() local 76 entry = kzalloc(sizeof(struct dsp_element_entry), GFP_ATOMIC); in mISDN_dsp_element_register() 77 if (!entry) in mISDN_dsp_element_register() 80 INIT_LIST_HEAD(&entry->list); in mISDN_dsp_element_register() 81 entry->elem = elem; in mISDN_dsp_element_register() 83 entry->dev.class = elements_class; in mISDN_dsp_element_register() 84 entry->dev.release = mISDN_dsp_dev_release; in mISDN_dsp_element_register() [all …]
|
/drivers/hid/ |
D | hid-lg4ff.c | 303 struct lg4ff_device_entry *entry = drv_data->device_props; in lg4ff_adjust_input_event() local 306 if (!entry) { in lg4ff_adjust_input_event() 311 switch (entry->wdata.product_id) { in lg4ff_adjust_input_event() 315 new_value = lg4ff_adjust_dfp_x_axis(value, entry->wdata.range); in lg4ff_adjust_input_event() 330 struct lg4ff_device_entry *entry = drv_data->device_props; in lg4ff_raw_event() local 332 if (!entry) in lg4ff_raw_event() 336 if (entry->wdata.combine) { in lg4ff_raw_event() 337 switch (entry->wdata.product_id) { in lg4ff_raw_event() 409 struct lg4ff_device_entry *entry; in lg4ff_play() local 421 entry = drv_data->device_props; in lg4ff_play() [all …]
|
/drivers/pci/ |
D | msi.c | 74 struct msi_desc *entry; in arch_setup_msi_irqs() local 84 for_each_pci_msi_entry(entry, dev) { in arch_setup_msi_irqs() 85 ret = arch_setup_msi_irq(dev, entry); in arch_setup_msi_irqs() 98 struct msi_desc *entry; in arch_teardown_msi_irqs() local 100 for_each_pci_msi_entry(entry, dev) in arch_teardown_msi_irqs() 101 if (entry->irq) in arch_teardown_msi_irqs() 102 for (i = 0; i < entry->nvec_used; i++) in arch_teardown_msi_irqs() 103 arch_teardown_msi_irq(entry->irq + i); in arch_teardown_msi_irqs() 109 struct msi_desc *entry; in default_restore_msi_irq() local 111 entry = NULL; in default_restore_msi_irq() [all …]
|
/drivers/net/ethernet/marvell/mvpp2/ |
D | mvpp2_debugfs.c | 60 struct mvpp2_dbgfs_flow_tbl_entry *entry = s->private; in mvpp2_dbgfs_flow_flt_hits_show() local 62 u32 hits = mvpp2_cls_flow_hits(entry->priv, entry->id); in mvpp2_dbgfs_flow_flt_hits_show() 73 struct mvpp2_dbgfs_flow_entry *entry = s->private; in mvpp2_dbgfs_flow_dec_hits_show() local 75 u32 hits = mvpp2_cls_lookup_hits(entry->priv, entry->flow); in mvpp2_dbgfs_flow_dec_hits_show() 86 struct mvpp2_dbgfs_flow_entry *entry = s->private; in mvpp2_dbgfs_flow_type_show() local 90 f = mvpp2_cls_flow_get(entry->flow); in mvpp2_dbgfs_flow_type_show() 126 const struct mvpp2_dbgfs_flow_entry *entry = s->private; in mvpp2_dbgfs_flow_id_show() local 129 f = mvpp2_cls_flow_get(entry->flow); in mvpp2_dbgfs_flow_id_show() 142 struct mvpp2_dbgfs_port_flow_entry *entry = s->private; in mvpp2_dbgfs_port_flow_hash_opt_show() local 143 struct mvpp2_port *port = entry->port; in mvpp2_dbgfs_port_flow_hash_opt_show() [all …]
|
/drivers/gpu/drm/amd/amdkfd/ |
D | soc15_int.h | 35 #define SOC15_CLIENT_ID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) & 0xff) argument 36 #define SOC15_SOURCE_ID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 8 & 0xff) argument 37 #define SOC15_RING_ID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 16 & 0xff) argument 38 #define SOC15_VMID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 24 & 0xf) argument 39 #define SOC15_VMID_TYPE_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 31 & 0x1) argument 40 #define SOC15_PASID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[3]) & 0xffff) argument 41 #define SOC15_CONTEXT_ID0_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[4])) argument 42 #define SOC15_CONTEXT_ID1_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[5])) argument 43 #define SOC15_CONTEXT_ID2_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[6])) argument 44 #define SOC15_CONTEXT_ID3_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[7])) argument
|
/drivers/sh/intc/ |
D | virq.c | 27 #define for_each_virq(entry, head) \ argument 28 for (entry = head; entry; entry = entry->next) 86 struct intc_virq_list *entry; in add_virq_to_pirq() local 90 for_each_virq(entry, irq_get_handler_data(irq)) { in add_virq_to_pirq() 91 if (entry->irq == virq) in add_virq_to_pirq() 93 last = &entry->next; in add_virq_to_pirq() 96 entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC); in add_virq_to_pirq() 97 if (!entry) in add_virq_to_pirq() 100 entry->irq = virq; in add_virq_to_pirq() 103 *last = entry; in add_virq_to_pirq() [all …]
|
/drivers/infiniband/hw/hfi1/ |
D | affinity.c | 227 static void node_affinity_destroy(struct hfi1_affinity_node *entry) in node_affinity_destroy() argument 229 free_percpu(entry->comp_vect_affinity); in node_affinity_destroy() 230 kfree(entry); in node_affinity_destroy() 236 struct hfi1_affinity_node *entry; in node_affinity_destroy_all() local 240 entry = list_entry(pos, struct hfi1_affinity_node, in node_affinity_destroy_all() 243 node_affinity_destroy(entry); in node_affinity_destroy_all() 251 struct hfi1_affinity_node *entry; in node_affinity_allocate() local 253 entry = kzalloc(sizeof(*entry), GFP_KERNEL); in node_affinity_allocate() 254 if (!entry) in node_affinity_allocate() 256 entry->node = node; in node_affinity_allocate() [all …]
|
/drivers/soc/qcom/ |
D | smp2p.c | 175 struct smp2p_entry *entry; in qcom_smp2p_intr() local 202 list_for_each_entry(entry, &smp2p->inbound, node) { in qcom_smp2p_intr() 204 if (!strcmp(buf, entry->name)) { in qcom_smp2p_intr() 205 entry->value = &in->entries[i].value; in qcom_smp2p_intr() 213 list_for_each_entry(entry, &smp2p->inbound, node) { in qcom_smp2p_intr() 215 if (!entry->value) in qcom_smp2p_intr() 218 val = readl(entry->value); in qcom_smp2p_intr() 220 status = val ^ entry->last_value; in qcom_smp2p_intr() 221 entry->last_value = val; in qcom_smp2p_intr() 227 for_each_set_bit(i, entry->irq_enabled, 32) { in qcom_smp2p_intr() [all …]
|
/drivers/parisc/ |
D | pdc_stable.c | 98 ssize_t (*show)(struct pdcspath_entry *entry, char *buf); 99 ssize_t (*store)(struct pdcspath_entry *entry, const char *buf, size_t count); 139 pdcspath_fetch(struct pdcspath_entry *entry) in pdcspath_fetch() argument 143 if (!entry) in pdcspath_fetch() 146 devpath = &entry->devpath; in pdcspath_fetch() 149 entry, devpath, entry->addr); in pdcspath_fetch() 152 if (pdc_stable_read(entry->addr, devpath, sizeof(*devpath)) != PDC_OK) in pdcspath_fetch() 158 entry->dev = hwpath_to_device((struct hardware_path *)devpath); in pdcspath_fetch() 160 entry->ready = 1; in pdcspath_fetch() 162 DPRINTK("%s: device: 0x%p\n", __func__, entry->dev); in pdcspath_fetch() [all …]
|