/drivers/acpi/ |
D | nvs.c | 96 struct nvs_page *entry, *next; in suspend_nvs_register() local 104 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); in suspend_nvs_register() 105 if (!entry) in suspend_nvs_register() 108 list_add_tail(&entry->node, &nvs_list); in suspend_nvs_register() 109 entry->phys_start = start; in suspend_nvs_register() 111 entry->size = (size < nr_bytes) ? size : nr_bytes; in suspend_nvs_register() 113 start += entry->size; in suspend_nvs_register() 114 size -= entry->size; in suspend_nvs_register() 119 list_for_each_entry_safe(entry, next, &nvs_list, node) { in suspend_nvs_register() 120 list_del(&entry->node); in suspend_nvs_register() [all …]
|
D | pci_irq.c | 124 static void do_prt_fixups(struct acpi_prt_entry *entry, in do_prt_fixups() argument 135 entry->id.segment == quirk->segment && in do_prt_fixups() 136 entry->id.bus == quirk->bus && in do_prt_fixups() 137 entry->id.device == quirk->device && in do_prt_fixups() 138 entry->pin == quirk->pin && in do_prt_fixups() 144 entry->id.segment, entry->id.bus, in do_prt_fixups() 145 entry->id.device, pin_name(entry->pin), in do_prt_fixups() 159 struct acpi_prt_entry *entry; in acpi_pci_irq_check_entry() local 165 entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL); in acpi_pci_irq_check_entry() 166 if (!entry) in acpi_pci_irq_check_entry() [all …]
|
/drivers/firmware/ |
D | memmap.c | 52 static ssize_t start_show(struct firmware_map_entry *entry, char *buf); 53 static ssize_t end_show(struct firmware_map_entry *entry, char *buf); 54 static ssize_t type_show(struct firmware_map_entry *entry, char *buf); 65 ssize_t (*show)(struct firmware_map_entry *entry, char *buf); 108 struct firmware_map_entry *entry = to_memmap_entry(kobj); in release_firmware_map_entry() local 110 if (PageReserved(virt_to_page(entry))) { in release_firmware_map_entry() 118 list_add(&entry->list, &map_entries_bootmem); in release_firmware_map_entry() 124 kfree(entry); in release_firmware_map_entry() 152 struct firmware_map_entry *entry) in firmware_map_add_entry() argument 156 entry->start = start; in firmware_map_add_entry() [all …]
|
D | dmi-sysfs.c | 52 ssize_t (*show)(struct dmi_sysfs_entry *entry, char *buf); 67 ssize_t (*show)(struct dmi_sysfs_entry *entry, 99 struct dmi_sysfs_entry *entry = to_entry(kobj); in dmi_sysfs_attr_show() local 106 return attr->show(entry, buf); in dmi_sysfs_attr_show() 117 struct dmi_sysfs_entry *entry; member 128 struct dmi_sysfs_entry *entry = data->entry; in find_dmi_entry_helper() local 131 if (dh->type != entry->dh.type) in find_dmi_entry_helper() 147 data->ret = data->callback(entry, dh, data->private); in find_dmi_entry_helper() 157 static ssize_t find_dmi_entry(struct dmi_sysfs_entry *entry, in find_dmi_entry() argument 161 .entry = entry, in find_dmi_entry() [all …]
|
/drivers/gpu/drm/ |
D | drm_scatter.c | 50 static void drm_sg_cleanup(struct drm_sg_mem * entry) in drm_sg_cleanup() argument 55 for (i = 0; i < entry->pages; i++) { in drm_sg_cleanup() 56 page = entry->pagelist[i]; in drm_sg_cleanup() 61 vfree(entry->virtual); in drm_sg_cleanup() 63 kfree(entry->busaddr); in drm_sg_cleanup() 64 kfree(entry->pagelist); in drm_sg_cleanup() 65 kfree(entry); in drm_sg_cleanup() 86 struct drm_sg_mem *entry; in drm_legacy_sg_alloc() local 100 entry = kzalloc(sizeof(*entry), GFP_KERNEL); in drm_legacy_sg_alloc() 101 if (!entry) in drm_legacy_sg_alloc() [all …]
|
D | drm_agpsupport.c | 199 struct drm_agp_mem *entry; in drm_agp_alloc() local 206 if (!(entry = kzalloc(sizeof(*entry), GFP_KERNEL))) in drm_agp_alloc() 212 kfree(entry); in drm_agp_alloc() 216 entry->handle = (unsigned long)memory->key + 1; in drm_agp_alloc() 217 entry->memory = memory; in drm_agp_alloc() 218 entry->bound = 0; in drm_agp_alloc() 219 entry->pages = pages; in drm_agp_alloc() 220 list_add(&entry->head, &dev->agp->memory); in drm_agp_alloc() 222 request->handle = entry->handle; in drm_agp_alloc() 250 struct drm_agp_mem *entry; in drm_agp_lookup_entry() local [all …]
|
D | drm_bufs.c | 44 struct drm_map_list *entry; in drm_find_matching_map() local 45 list_for_each_entry(entry, &dev->maplist, head) { in drm_find_matching_map() 54 if (!entry->map || in drm_find_matching_map() 55 map->type != entry->map->type || in drm_find_matching_map() 56 entry->master != dev->primary->master) in drm_find_matching_map() 62 return entry; in drm_find_matching_map() 65 if ((entry->map->offset & 0xffffffff) == in drm_find_matching_map() 67 return entry; in drm_find_matching_map() 71 if (entry->map->offset == map->offset) in drm_find_matching_map() 72 return entry; in drm_find_matching_map() [all …]
|
/drivers/misc/vmw_vmci/ |
D | vmci_queue_pair.c | 905 struct qp_entry *entry; in qp_list_find() local 910 list_for_each_entry(entry, &qp_list->head, list_item) { in qp_list_find() 911 if (vmci_handle_is_equal(entry->handle, handle)) in qp_list_find() 912 return entry; in qp_list_find() 924 struct qp_guest_endpoint *entry; in qp_guest_handle_to_entry() local 927 entry = qp ? container_of( in qp_guest_handle_to_entry() 929 return entry; in qp_guest_handle_to_entry() 938 struct qp_broker_entry *entry; in qp_broker_handle_to_entry() local 941 entry = qp ? container_of( in qp_broker_handle_to_entry() 943 return entry; in qp_broker_handle_to_entry() [all …]
|
D | vmci_doorbell.c | 99 struct dbell_entry *entry; in vmci_dbell_get_priv_flags() local 107 entry = container_of(resource, struct dbell_entry, resource); in vmci_dbell_get_priv_flags() 108 *priv_flags = entry->priv_flags; in vmci_dbell_get_priv_flags() 145 static void dbell_index_table_add(struct dbell_entry *entry) in dbell_index_table_add() argument 150 vmci_resource_get(&entry->resource); in dbell_index_table_add() 194 entry->idx = new_notify_idx; in dbell_index_table_add() 195 bucket = VMCI_DOORBELL_HASH(entry->idx); in dbell_index_table_add() 196 hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); in dbell_index_table_add() 205 static void dbell_index_table_remove(struct dbell_entry *entry) in dbell_index_table_remove() argument 209 hlist_del_init(&entry->node); in dbell_index_table_remove() [all …]
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_cmdbuf_res.c | 106 struct vmw_cmdbuf_res *entry) in vmw_cmdbuf_res_free() argument 108 list_del(&entry->head); in vmw_cmdbuf_res_free() 109 WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash)); in vmw_cmdbuf_res_free() 110 vmw_resource_unreference(&entry->res); in vmw_cmdbuf_res_free() 111 kfree(entry); in vmw_cmdbuf_res_free() 126 struct vmw_cmdbuf_res *entry, *next; in vmw_cmdbuf_res_commit() local 128 list_for_each_entry_safe(entry, next, list, head) { in vmw_cmdbuf_res_commit() 129 list_del(&entry->head); in vmw_cmdbuf_res_commit() 130 if (entry->res->func->commit_notify) in vmw_cmdbuf_res_commit() 131 entry->res->func->commit_notify(entry->res, in vmw_cmdbuf_res_commit() [all …]
|
/drivers/hid/ |
D | hid-lg4ff.c | 310 struct lg4ff_device_entry *entry = drv_data->device_props; in lg4ff_adjust_input_event() local 313 if (!entry) { in lg4ff_adjust_input_event() 318 switch (entry->wdata.product_id) { in lg4ff_adjust_input_event() 322 new_value = lg4ff_adjust_dfp_x_axis(value, entry->wdata.range); in lg4ff_adjust_input_event() 364 struct lg4ff_device_entry *entry; in lg4ff_play() local 376 entry = drv_data->device_props; in lg4ff_play() 377 if (!entry) { in lg4ff_play() 381 value = entry->report->field[0]->value; in lg4ff_play() 390 spin_lock_irqsave(&entry->report_lock, flags); in lg4ff_play() 401 hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); in lg4ff_play() [all …]
|
/drivers/isdn/mISDN/ |
D | dsp_pipeline.c | 82 struct dsp_element_entry *entry = in mISDN_dsp_dev_release() local 84 list_del(&entry->list); in mISDN_dsp_dev_release() 85 kfree(entry); in mISDN_dsp_dev_release() 90 struct dsp_element_entry *entry; in mISDN_dsp_element_register() local 96 entry = kzalloc(sizeof(struct dsp_element_entry), GFP_ATOMIC); in mISDN_dsp_element_register() 97 if (!entry) in mISDN_dsp_element_register() 100 entry->elem = elem; in mISDN_dsp_element_register() 102 entry->dev.class = elements_class; in mISDN_dsp_element_register() 103 entry->dev.release = mISDN_dsp_dev_release; in mISDN_dsp_element_register() 104 dev_set_drvdata(&entry->dev, elem); in mISDN_dsp_element_register() [all …]
|
/drivers/infiniband/hw/cxgb4/ |
D | resource.c | 84 u32 entry; in c4iw_get_resource() local 85 entry = c4iw_id_alloc(id_table); in c4iw_get_resource() 86 if (entry == (u32)(-1)) in c4iw_get_resource() 88 return entry; in c4iw_get_resource() 91 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) in c4iw_put_resource() argument 93 PDBG("%s entry 0x%x\n", __func__, entry); in c4iw_put_resource() 94 c4iw_id_free(id_table, entry); in c4iw_put_resource() 99 struct c4iw_qid_list *entry; in c4iw_get_cqid() local 105 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, in c4iw_get_cqid() 106 entry); in c4iw_get_cqid() [all …]
|
/drivers/pci/ |
D | msi.c | 110 struct msi_desc *entry; in arch_setup_msi_irqs() local 122 for_each_pci_msi_entry(entry, dev) { in arch_setup_msi_irqs() 123 ret = arch_setup_msi_irq(dev, entry); in arch_setup_msi_irqs() 140 struct msi_desc *entry; in default_teardown_msi_irqs() local 142 for_each_pci_msi_entry(entry, dev) in default_teardown_msi_irqs() 143 if (entry->irq) in default_teardown_msi_irqs() 144 for (i = 0; i < entry->nvec_used; i++) in default_teardown_msi_irqs() 145 arch_teardown_msi_irq(entry->irq + i); in default_teardown_msi_irqs() 155 struct msi_desc *entry; in default_restore_msi_irq() local 157 entry = NULL; in default_restore_msi_irq() [all …]
|
/drivers/sh/intc/ |
D | virq.c | 27 #define for_each_virq(entry, head) \ argument 28 for (entry = head; entry; entry = entry->next) 86 struct intc_virq_list *entry; in add_virq_to_pirq() local 90 for_each_virq(entry, irq_get_handler_data(irq)) { in add_virq_to_pirq() 91 if (entry->irq == virq) in add_virq_to_pirq() 93 last = &entry->next; in add_virq_to_pirq() 96 entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC); in add_virq_to_pirq() 97 if (!entry) { in add_virq_to_pirq() 102 entry->irq = virq; in add_virq_to_pirq() 105 *last = entry; in add_virq_to_pirq() [all …]
|
/drivers/parisc/ |
D | pdc_stable.c | 111 ssize_t (*show)(struct pdcspath_entry *entry, char *buf); 112 ssize_t (*store)(struct pdcspath_entry *entry, const char *buf, size_t count); 152 pdcspath_fetch(struct pdcspath_entry *entry) in pdcspath_fetch() argument 156 if (!entry) in pdcspath_fetch() 159 devpath = &entry->devpath; in pdcspath_fetch() 162 entry, devpath, entry->addr); in pdcspath_fetch() 165 if (pdc_stable_read(entry->addr, devpath, sizeof(*devpath)) != PDC_OK) in pdcspath_fetch() 171 entry->dev = hwpath_to_device((struct hardware_path *)devpath); in pdcspath_fetch() 173 entry->ready = 1; in pdcspath_fetch() 175 DPRINTK("%s: device: 0x%p\n", __func__, entry->dev); in pdcspath_fetch() [all …]
|
/drivers/oprofile/ |
D | cpu_buffer.c | 142 *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) in op_cpu_buffer_write_reserve() argument 144 entry->event = ring_buffer_lock_reserve in op_cpu_buffer_write_reserve() 146 size * sizeof(entry->sample->data[0])); in op_cpu_buffer_write_reserve() 147 if (!entry->event) in op_cpu_buffer_write_reserve() 149 entry->sample = ring_buffer_event_data(entry->event); in op_cpu_buffer_write_reserve() 150 entry->size = size; in op_cpu_buffer_write_reserve() 151 entry->data = entry->sample->data; in op_cpu_buffer_write_reserve() 153 return entry->sample; in op_cpu_buffer_write_reserve() 156 int op_cpu_buffer_write_commit(struct op_entry *entry) in op_cpu_buffer_write_commit() argument 158 return ring_buffer_unlock_commit(op_ring_buffer, entry->event); in op_cpu_buffer_write_commit() [all …]
|
D | cpu_buffer.h | 78 *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size); 79 int op_cpu_buffer_write_commit(struct op_entry *entry); 80 struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu); 85 int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val) in op_cpu_buffer_add_data() argument 87 if (!entry->size) in op_cpu_buffer_add_data() 89 *entry->data = val; in op_cpu_buffer_add_data() 90 entry->size--; in op_cpu_buffer_add_data() 91 entry->data++; in op_cpu_buffer_add_data() 92 return entry->size; in op_cpu_buffer_add_data() 97 int op_cpu_buffer_get_size(struct op_entry *entry) in op_cpu_buffer_get_size() argument [all …]
|
/drivers/staging/lustre/lustre/llite/ |
D | statahead.c | 90 static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry) in ll_sa_entry_unhashed() argument 92 return list_empty(&entry->se_hash); in ll_sa_entry_unhashed() 98 static inline int ll_sa_entry_stated(struct ll_sa_entry *entry) in ll_sa_entry_stated() argument 101 return (entry->se_stat != SA_ENTRY_INIT); in ll_sa_entry_stated() 113 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) in ll_sa_entry_enhash() argument 115 int i = ll_sa_entry_hash(entry->se_qstr.hash); in ll_sa_entry_enhash() 118 list_add_tail(&entry->se_hash, &sai->sai_cache[i]); in ll_sa_entry_enhash() 126 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) in ll_sa_entry_unhash() argument 128 int i = ll_sa_entry_hash(entry->se_qstr.hash); in ll_sa_entry_unhash() 131 list_del_init(&entry->se_hash); in ll_sa_entry_unhash() [all …]
|
/drivers/gpu/drm/ttm/ |
D | ttm_execbuf_util.c | 36 struct ttm_validate_buffer *entry) in ttm_eu_backoff_reservation_reverse() argument 38 list_for_each_entry_continue_reverse(entry, list, head) { in ttm_eu_backoff_reservation_reverse() 39 struct ttm_buffer_object *bo = entry->bo; in ttm_eu_backoff_reservation_reverse() 47 struct ttm_validate_buffer *entry; in ttm_eu_del_from_lru_locked() local 49 list_for_each_entry(entry, list, head) { in ttm_eu_del_from_lru_locked() 50 struct ttm_buffer_object *bo = entry->bo; in ttm_eu_del_from_lru_locked() 60 struct ttm_validate_buffer *entry; in ttm_eu_backoff_reservation() local 66 entry = list_first_entry(list, struct ttm_validate_buffer, head); in ttm_eu_backoff_reservation() 67 glob = entry->bo->glob; in ttm_eu_backoff_reservation() 70 list_for_each_entry(entry, list, head) { in ttm_eu_backoff_reservation() [all …]
|
/drivers/firmware/efi/ |
D | runtime-map.c | 30 ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf); 38 static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf) in type_show() argument 40 return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type); in type_show() 43 #define EFI_RUNTIME_FIELD(var) entry->md.var 46 static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \ 64 struct efi_runtime_map_entry *entry = to_map_entry(kobj); in map_attr_show() local 67 return map_attr->show(entry, buf); in map_attr_show() 94 struct efi_runtime_map_entry *entry; in map_release() local 96 entry = to_map_entry(kobj); in map_release() 97 kfree(entry); in map_release() [all …]
|
/drivers/net/wireless/brcm80211/brcmfmac/ |
D | fwsignal.c | 743 struct brcmf_fws_mac_descriptor *entry; in brcmf_fws_macdesc_lookup() local 749 entry = &fws->desc.nodes[0]; in brcmf_fws_macdesc_lookup() 751 if (entry->occupied && !memcmp(entry->ea, ea, ETH_ALEN)) in brcmf_fws_macdesc_lookup() 752 return entry; in brcmf_fws_macdesc_lookup() 753 entry++; in brcmf_fws_macdesc_lookup() 762 struct brcmf_fws_mac_descriptor *entry = &fws->desc.other; in brcmf_fws_macdesc_find() local 772 entry = ifp->fws_desc; in brcmf_fws_macdesc_find() 776 entry = brcmf_fws_macdesc_lookup(fws, da); in brcmf_fws_macdesc_find() 777 if (IS_ERR(entry)) in brcmf_fws_macdesc_find() 778 entry = ifp->fws_desc; in brcmf_fws_macdesc_find() [all …]
|
/drivers/net/wireless/rt2x00/ |
D | rt2x00usb.c | 218 static void rt2x00usb_work_txdone_entry(struct queue_entry *entry) in rt2x00usb_work_txdone_entry() argument 228 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) in rt2x00usb_work_txdone_entry() 229 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); in rt2x00usb_work_txdone_entry() 231 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN); in rt2x00usb_work_txdone_entry() 239 struct queue_entry *entry; in rt2x00usb_work_txdone() local 243 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); in rt2x00usb_work_txdone() 245 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || in rt2x00usb_work_txdone() 246 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) in rt2x00usb_work_txdone() 249 rt2x00usb_work_txdone_entry(entry); in rt2x00usb_work_txdone() 256 struct queue_entry *entry = (struct queue_entry *)urb->context; in rt2x00usb_interrupt_txdone() local [all …]
|
/drivers/ntb/ |
D | ntb_transport.c | 99 struct list_head entry; member 115 unsigned int entry; member 195 struct list_head entry; member 201 struct list_head entry; member 301 list_add_tail(&nt->entry, &ntb_transport_list); in ntb_bus_init() 309 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { in ntb_bus_remove() 312 list_del(&client_dev->entry); in ntb_bus_remove() 316 list_del(&nt->entry); in ntb_bus_remove() 338 list_for_each_entry(nt, &ntb_transport_list, entry) in ntb_transport_unregister_client_dev() 339 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) in ntb_transport_unregister_client_dev() [all …]
|
/drivers/net/ethernet/dec/tulip/ |
D | interrupt.c | 62 int entry; in tulip_refill_rx() local 67 entry = tp->dirty_rx % RX_RING_SIZE; in tulip_refill_rx() 68 if (tp->rx_buffers[entry].skb == NULL) { in tulip_refill_rx() 72 skb = tp->rx_buffers[entry].skb = in tulip_refill_rx() 81 tp->rx_buffers[entry].skb = NULL; in tulip_refill_rx() 85 tp->rx_buffers[entry].mapping = mapping; in tulip_refill_rx() 87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); in tulip_refill_rx() 90 tp->rx_ring[entry].status = cpu_to_le32(DescOwned); in tulip_refill_rx() 116 int entry = tp->cur_rx % RX_RING_SIZE; in tulip_poll() local 132 entry, tp->rx_ring[entry].status); in tulip_poll() [all …]
|