/drivers/gpu/drm/i915/ |
D | intel_lrc.c | 820 #define wa_ctx_emit(batch, index, cmd) \ argument 826 batch[__index] = (cmd); \ 829 #define wa_ctx_emit_reg(batch, index, reg) \ argument 830 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg)) 849 uint32_t *batch, in gen8_emit_flush_coherentl3_wa() argument 864 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | in gen8_emit_flush_coherentl3_wa() 866 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); in gen8_emit_flush_coherentl3_wa() 867 wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256); in gen8_emit_flush_coherentl3_wa() 868 wa_ctx_emit(batch, index, 0); in gen8_emit_flush_coherentl3_wa() 870 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); in gen8_emit_flush_coherentl3_wa() [all …]
|
D | i915_gem_render_state.c | 64 #define OUT_BATCH(batch, i, val) \ argument 70 (batch)[(i)++] = (val); \ 91 u32 s = rodata->batch[i]; in render_state_setup() 98 rodata->batch[i + 1] != 0) { in render_state_setup()
|
D | intel_renderstate.h | 31 const u32 *batch; member 38 .batch = gen ## _g ## _null_state_batch, \
|
D | i915_gem_execbuffer.c | 55 struct i915_vma *batch; member 1480 exec_start = params->batch->node.start + in execbuf_submit() 1484 exec_len = params->batch->size - params->args_batch_start_offset; in execbuf_submit() 1677 params->batch = eb_get_batch(eb); in i915_gem_do_execbuffer() 1701 if (params->batch->obj->base.pending_write_domain) { in i915_gem_do_execbuffer() 1706 if (args->batch_start_offset > params->batch->size || in i915_gem_do_execbuffer() 1707 args->batch_len > params->batch->size - args->batch_start_offset) { in i915_gem_do_execbuffer() 1718 params->batch->obj, in i915_gem_do_execbuffer() 1740 params->batch = vma; in i915_gem_do_execbuffer() 1744 params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; in i915_gem_do_execbuffer() [all …]
|
D | i915_gem_request.h | 128 struct i915_vma *batch; member
|
D | i915_gem_request.c | 430 req->batch = NULL; in i915_gem_request_alloc()
|
D | i915_gpu_error.c | 1178 request->batch); in i915_gem_record_rings()
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_mob.c | 238 struct vmw_otable_batch *batch) in vmw_otable_batch_setup() argument 242 struct vmw_otable *otables = batch->otables; in vmw_otable_batch_setup() 247 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 260 &batch->otable_bo); in vmw_otable_batch_setup() 265 ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL); in vmw_otable_batch_setup() 267 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm); in vmw_otable_batch_setup() 270 ret = vmw_bo_map_dma(batch->otable_bo); in vmw_otable_batch_setup() 274 ttm_bo_unreserve(batch->otable_bo); in vmw_otable_batch_setup() 277 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 278 if (!batch->otables[i].enabled) in vmw_otable_batch_setup() [all …]
|
/drivers/xen/ |
D | gntdev.c | 759 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, in gntdev_get_page() argument 771 batch->pages[batch->nr_pages++] = page; in gntdev_get_page() 779 static void gntdev_put_pages(struct gntdev_copy_batch *batch) in gntdev_put_pages() argument 783 for (i = 0; i < batch->nr_pages; i++) in gntdev_put_pages() 784 put_page(batch->pages[i]); in gntdev_put_pages() 785 batch->nr_pages = 0; in gntdev_put_pages() 788 static int gntdev_copy(struct gntdev_copy_batch *batch) in gntdev_copy() argument 792 gnttab_batch_copy(batch->ops, batch->nr_ops); in gntdev_copy() 793 gntdev_put_pages(batch); in gntdev_copy() 799 for (i = 0; i < batch->nr_ops; i++) { in gntdev_copy() [all …]
|
D | grant-table.c | 752 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count) in gnttab_batch_map() argument 756 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count)) in gnttab_batch_map() 758 for (op = batch; op < batch + count; op++) in gnttab_batch_map() 765 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) in gnttab_batch_copy() argument 769 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count)) in gnttab_batch_copy() 771 for (op = batch; op < batch + count; op++) in gnttab_batch_copy()
|
/drivers/staging/lustre/lnet/selftest/ |
D | console.c | 1143 struct lstcon_batch *batch; in lstcon_testrpc_condition() local 1151 batch = test->tes_batch; in lstcon_testrpc_condition() 1152 LASSERT(batch); in lstcon_testrpc_condition() 1162 hash = batch->bat_cli_hash; in lstcon_testrpc_condition() 1163 head = &batch->bat_cli_list; in lstcon_testrpc_condition() 1168 hash = batch->bat_srv_hash; in lstcon_testrpc_condition() 1169 head = &batch->bat_srv_list; in lstcon_testrpc_condition() 1235 lstcon_verify_batch(const char *name, struct lstcon_batch **batch) in lstcon_verify_batch() argument 1239 rc = lstcon_batch_find(name, batch); in lstcon_verify_batch() 1245 if ((*batch)->bat_state != LST_BATCH_IDLE) { in lstcon_verify_batch() [all …]
|
D | conrpc.c | 651 struct lstcon_batch *batch; in lstcon_batrpc_prep() local 674 batch = (struct lstcon_batch *)tsb; in lstcon_batrpc_prep() 675 brq->bar_arg = batch->bat_arg; in lstcon_batrpc_prep()
|
D | framework.c | 667 struct sfw_batch *batch; in sfw_destroy_session() local 673 batch = list_entry(sn->sn_batches.next, in sfw_destroy_session() 675 list_del_init(&batch->bat_list); in sfw_destroy_session() 676 sfw_destroy_batch(batch); in sfw_destroy_session()
|
/drivers/char/ |
D | random.c | 2077 struct batched_entropy *batch; in get_random_long() local 2082 batch = &get_cpu_var(batched_entropy_long); in get_random_long() 2083 if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) { in get_random_long() 2084 extract_crng((u8 *)batch->entropy_long); in get_random_long() 2085 batch->position = 0; in get_random_long() 2087 ret = batch->entropy_long[batch->position++]; in get_random_long() 2103 struct batched_entropy *batch; in get_random_int() local 2108 batch = &get_cpu_var(batched_entropy_int); in get_random_int() 2109 if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) { in get_random_int() 2110 extract_crng((u8 *)batch->entropy_int); in get_random_int() [all …]
|
/drivers/misc/ |
D | vmw_balloon.c | 182 static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx) in vmballoon_batch_get_pa() argument 184 return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK; in vmballoon_batch_get_pa() 187 static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch, in vmballoon_batch_get_status() argument 190 return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK); in vmballoon_batch_get_status() 193 static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx, in vmballoon_batch_set_pa() argument 196 batch->pages[idx] = pa; in vmballoon_batch_set_pa()
|
/drivers/target/iscsi/ |
D | iscsi_target_erl1.c | 1072 int batch = 0; in iscsit_handle_ooo_cmdsn() local 1080 batch = 1; in iscsit_handle_ooo_cmdsn() 1085 batch = 1; in iscsit_handle_ooo_cmdsn() 1093 ooo_cmdsn->batch_count = (batch) ? in iscsit_handle_ooo_cmdsn()
|
/drivers/block/xen-blkback/ |
D | blkback.c | 783 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST); in xen_blkbk_unmap() local 785 invcount = xen_blkbk_unmap_prepare(ring, pages, batch, in xen_blkbk_unmap() 792 pages += batch; in xen_blkbk_unmap() 793 num -= batch; in xen_blkbk_unmap()
|
/drivers/staging/android/ion/ |
D | ion_heap.c | 314 heap->shrinker.batch = 0; in ion_heap_init_shrinker()
|
/drivers/staging/lustre/lustre/obdecho/ |
D | echo_client.c | 1335 u64 batch, struct obd_trans_info *oti, in echo_client_prep_commit() argument 1348 npages = batch >> PAGE_SHIFT; in echo_client_prep_commit()
|
/drivers/md/ |
D | raid5.c | 5790 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; in handle_active_stripes() local 5796 batch[batch_size++] = sh; in handle_active_stripes() 5822 handle_stripe(batch[i]); in handle_active_stripes() 5829 hash = batch[i]->hash_lock_index; in handle_active_stripes() 5830 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); in handle_active_stripes() 6660 conf->shrinker.batch = 128; in setup_conf()
|
D | dm-bufio.c | 1706 c->shrinker.batch = 0; in dm_bufio_client_create()
|
/drivers/message/fusion/lsi/ |
D | mpi_history.txt | 230 * 06-22-00 01.00.03 Removed batch controls from LAN_0 page and updated the
|
/drivers/md/bcache/ |
D | btree.c | 805 c->shrink.batch = c->btree_pages * 2; in bch_btree_cache_alloc()
|
/drivers/scsi/aic7xxx/ |
D | aic7xxx.seq | 1078 * we can batch the clearing of HADDR with the fixup.
|
D | aic79xx.seq | 376 * order is preserved even if we batch.
|