/drivers/iio/buffer/ |
D | industrialio-buffer-dma.c | 323 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_request_update() 349 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_request_update() 408 list_for_each_entry_safe(block, _block, &queue->incoming, head) { in iio_dma_buffer_enable() 451 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_enqueue() 619 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_init() 651 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_exit()
|
/drivers/block/drbd/ |
D | drbd_req.c | 1436 static void submit_fast_path(struct drbd_device *device, struct list_head *incoming) in submit_fast_path() argument 1442 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { in submit_fast_path() 1463 struct list_head *incoming, in prepare_al_transaction_nonblock() argument 1472 while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) { in prepare_al_transaction_nonblock() 1508 LIST_HEAD(incoming); /* from drbd_make_request() */ in do_submit() 1514 list_splice_tail_init(&device->submit.writes, &incoming); in do_submit() 1521 list_splice_init(&busy, &incoming); in do_submit() 1522 submit_fast_path(device, &incoming); in do_submit() 1523 if (list_empty(&incoming)) in do_submit() 1529 list_splice_init(&busy, &incoming); in do_submit() [all …]
|
/drivers/md/ |
D | dm-ps-service-time.c | 215 size_t incoming) in st_compare_load() argument 253 sz1 += incoming; in st_compare_load() 254 sz2 += incoming; in st_compare_load()
|
D | Kconfig | 479 the path expected to complete the incoming I/O in the shortest 489 the path expected to complete the incoming I/O in the shortest
|
/drivers/greybus/ |
D | connection.c | 635 bool incoming; in gb_connection_flush_incoming_operations() local 638 incoming = false; in gb_connection_flush_incoming_operations() 643 incoming = true; in gb_connection_flush_incoming_operations() 648 if (!incoming) in gb_connection_flush_incoming_operations()
|
/drivers/gpu/drm/i915/gt/uc/ |
D | intel_guc_ct.h | 82 struct list_head incoming; /* incoming requests */ member
|
D | intel_guc_ct.c | 110 INIT_LIST_HEAD(&ct->requests.incoming); in intel_guc_ct_init_early() 1061 request = list_first_entry_or_null(&ct->requests.incoming, in ct_process_incoming_requests() 1065 done = !!list_empty(&ct->requests.incoming); in ct_process_incoming_requests() 1112 list_add_tail(&request->link, &ct->requests.incoming); in ct_handle_event()
|
/drivers/virt/ |
D | Kconfig | 37 2) A file interface to reading incoming doorbells.
|
/drivers/net/arcnet/ |
D | arcdevice.h | 311 struct Incoming incoming[256]; /* one from each address */ member
|
D | rfc1201.c | 140 struct Incoming *in = &lp->rfc1201.incoming[saddr]; in rx()
|
/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/ |
D | gpc.fuc | 338 // incoming fifo command? 342 // queue incoming fifo command for later processing
|
D | hub.fuc | 322 // incoming fifo command? 326 // queue incoming fifo command for later processing
|
/drivers/pci/controller/ |
D | pci-hyperv.c | 346 struct pci_incoming_message incoming; member 352 struct pci_incoming_message incoming; member 452 struct pci_incoming_message incoming; member 458 struct pci_incoming_message incoming; member
|
/drivers/infiniband/ulp/rtrs/ |
D | README | 35 When processing an incoming write or read request, rtrs client uses memory
|
/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/ |
D | g98.fuc0s | 202 // incoming fifo command.
|
/drivers/scsi/aic7xxx/ |
D | aic7xxx.reg | 1560 * incoming command queue. 1580 * incoming target mode command descriptors. The
|
D | aic79xx.reg | 3888 * incoming target mode command descriptors. The 3904 * incoming command queue.
|
D | aic7xxx.seq | 2337 * Restore an SCB that failed to match an incoming reselection
|
D | aic79xx.seq | 662 * care of any incoming L_Qs.
|
/drivers/iommu/ |
D | Kconfig | 350 incoming transactions from devices that are not attached to
|
/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/ |
D | com.fuc | 285 // read incoming fifo command
|
/drivers/hid/ |
D | Kconfig | 579 generic USB_HID driver and all incoming events will be multiplexed
|