/external/mesa3d/src/compiler/nir/ |
D | nir_lower_ubo_vec4.c | 101 unsigned align_mul = nir_intrinsic_align_mul(intr); in nir_lower_ubo_vec4_lower() local 110 align_mul = MIN2(align_mul, 16); in nir_lower_ubo_vec4_lower() 115 bool aligned_mul = (align_mul == 16 && in nir_lower_ubo_vec4_lower() 143 } else if (align_mul == 8 && in nir_lower_ubo_vec4_lower()
|
D | nir_lower_io.c | 1200 uint32_t align_mul, uint32_t align_offset, in build_explicit_io_load() argument 1210 align_mul, align_offset, in build_explicit_io_load() 1218 align_mul, align_offset, in build_explicit_io_load() 1224 align_mul, align_offset, in build_explicit_io_load() 1235 align_mul, align_offset, in build_explicit_io_load() 1242 align_mul, align_offset, in build_explicit_io_load() 1361 nir_intrinsic_set_align(load, align_mul, align_offset); in build_explicit_io_load() 1419 uint32_t align_mul, uint32_t align_offset, in build_explicit_io_store() argument 1428 align_mul, align_offset, in build_explicit_io_store() 1435 align_mul, align_offset, in build_explicit_io_store() [all …]
|
D | nir_lower_wrmasks.c | 133 unsigned align_mul = nir_intrinsic_align_mul(intr); in split_wrmask() local 137 align_off = align_off % align_mul; in split_wrmask() 139 nir_intrinsic_set_align(new_intr, align_mul, align_off); in split_wrmask()
|
D | nir_opt_load_store_vectorize.c | 176 uint32_t align_mul; member 544 uint32_t align_mul = 31; in calc_alignment() local 547 align_mul = MIN2(align_mul, ffsll(entry->key->offset_defs_mul[i])); in calc_alignment() 550 entry->align_mul = 1u << (align_mul - 1); in calc_alignment() 552 if (!has_align || entry->align_mul >= nir_intrinsic_align_mul(entry->intrin)) { in calc_alignment() 553 entry->align_offset = entry->offset % entry->align_mul; in calc_alignment() 555 entry->align_mul = nir_intrinsic_align_mul(entry->intrin); in calc_alignment() 651 if (!ctx->callback(low->align_mul, in new_bitsize_acceptable() 794 first->align_mul = low->align_mul; in vectorize_loads() 880 second->align_mul = low->align_mul; in vectorize_stores() [all …]
|
D | nir_deref.c | 850 if (cast->cast.align_mul == 0) in opt_remove_restricting_cast_alignments() 874 if (parent_mul < cast->cast.align_mul) in opt_remove_restricting_cast_alignments() 904 assert(cast->cast.align_mul <= parent_mul); in opt_remove_restricting_cast_alignments() 905 if (parent_offset % cast->cast.align_mul != cast->cast.align_offset) in opt_remove_restricting_cast_alignments() 911 cast->cast.align_mul = 0; in opt_remove_restricting_cast_alignments() 1016 if (cast->cast.align_mul > 0) in opt_replace_struct_wrapper_cast() 1054 if (cast->cast.align_mul > 0) in opt_deref_cast() 1137 if (cast->cast.align_mul > 0) in is_vector_bitcast_deref()
|
D | nir.h | 1490 unsigned align_mul; member 2081 INTRINSIC_IDX_ACCESSORS(align_mul, ALIGN_MUL, unsigned) in INTRINSIC_IDX_ACCESSORS() 2097 unsigned align_mul, unsigned align_offset) in INTRINSIC_IDX_ACCESSORS() 2099 assert(util_is_power_of_two_nonzero(align_mul)); in INTRINSIC_IDX_ACCESSORS() 2100 assert(align_offset < align_mul); in INTRINSIC_IDX_ACCESSORS() 2101 nir_intrinsic_set_align_mul(intrin, align_mul); in INTRINSIC_IDX_ACCESSORS() 2115 const unsigned align_mul = nir_intrinsic_align_mul(intrin); in nir_intrinsic_align() local 2117 assert(align_offset < align_mul); in nir_intrinsic_align() 2118 return align_offset ? 1 << (ffs(align_offset) - 1) : align_mul; in nir_intrinsic_align() 4569 uint32_t *align_mul, [all …]
|
D | nir_opt_memcpy.c | 43 if (cast->cast.align_mul > 0) in opt_memcpy_deref_cast()
|
D | nir_instr_set.c | 175 hash = HASH(hash, instr->cast.align_mul); in hash_deref() 629 deref1->cast.align_mul != deref2->cast.align_mul || in nir_instrs_equal()
|
D | nir_clone.c | 342 nderef->cast.align_mul = deref->cast.align_mul; in clone_deref_instr()
|
D | nir_validate.c | 438 if (instr->cast.align_mul > 0) { in validate_deref_instr() 439 validate_assert(state, util_is_power_of_two_nonzero(instr->cast.align_mul)); in validate_deref_instr() 440 validate_assert(state, instr->cast.align_offset < instr->cast.align_mul); in validate_deref_instr()
|
D | nir_builder.h | 1237 uint32_t align_mul, uint32_t align_offset) in nir_alignment_deref_cast() argument 1246 deref->cast.align_mul = align_mul; in nir_alignment_deref_cast()
|
D | nir_serialize.c | 1043 blob_write_uint32(ctx->blob, deref->cast.align_mul); in write_deref() 1109 deref->cast.align_mul = blob_read_uint32(ctx->blob); in read_deref()
|
D | nir_print.c | 750 instr->cast.align_mul, instr->cast.align_offset); in print_deref_instr()
|
/external/mesa3d/src/freedreno/ir3/ |
D | ir3_nir.c | 149 ir3_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset, in ir3_nir_should_vectorize_mem() argument 163 assert(util_is_power_of_two_nonzero(align_mul)); in ir3_nir_should_vectorize_mem() 164 align_mul = MIN2(align_mul, 16); in ir3_nir_should_vectorize_mem() 168 if (align_mul < 4) in ir3_nir_should_vectorize_mem() 171 unsigned worst_start_offset = 16 - align_mul + align_offset; in ir3_nir_should_vectorize_mem()
|
/external/mesa3d/src/intel/compiler/ |
D | brw_nir_lower_mem_access_bit_sizes.c | 167 const unsigned align_mul = nir_intrinsic_align_mul(intrin); in lower_mem_store_bit_size() local 208 (align_mul >= 4 && (align_offset + start) % 4 == 0) || in lower_mem_store_bit_size()
|
D | brw_nir.c | 923 brw_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset, in brw_nir_should_vectorize_mem() argument 947 align = align_mul; in brw_nir_should_vectorize_mem()
|
/external/mesa3d/src/compiler/glsl/ |
D | gl_nir_lower_buffers.c | 219 cast->cast.align_mul = NIR_ALIGN_MUL_MAX; in lower_buffer_interface_derefs_impl()
|
/external/mesa3d/src/intel/vulkan/ |
D | anv_nir_apply_pipeline_layout.c | 567 cast->cast.align_mul = ANV_UBO_ALIGNMENT; in lower_load_vulkan_descriptor() 573 cast->cast.align_mul = ANV_SSBO_ALIGNMENT; in lower_load_vulkan_descriptor()
|
/external/mesa3d/src/amd/compiler/ |
D | aco_instruction_selection.cpp | 3019 unsigned align_mul = 0; member 3052 const unsigned align_mul = info.align_mul ? info.align_mul : component_size; in emit_load() local 3053 unsigned align_offset = (info.align_offset + const_offset) % align_mul; in emit_load() 3062 byte_align = align_mul % 4 == 0 ? align_offset % 4 : -1; in emit_load() 3067 (bytes_needed == 2 && (align_mul % 2 || align_offset % 2)) || in emit_load() 3074 bytes_needed += byte_align == -1 ? 4 - info.align_mul : byte_align; in emit_load() 3087 bool need_to_align_offset = byte_align && (align_mul % 4 || align_offset % 4); in emit_load() 3133 unsigned align = align_offset ? 1 << (ffs(align_offset) - 1) : align_mul; in emit_load() 3191 align_offset = (align_offset + info.component_stride) % align_mul; in emit_load() 3194 align_offset = (align_offset + val.bytes()) % align_mul; in emit_load() [all …]
|
/external/mesa3d/src/compiler/nir/tests/ |
D | load_store_vectorizer_tests.cpp | 73 static bool mem_vectorize_callback(unsigned align_mul, unsigned align_offset, 367 unsigned align_mul, unsigned align_offset, unsigned bit_size, in mem_vectorize_callback() argument
|
/external/mesa3d/src/amd/vulkan/ |
D | radv_pipeline.c | 2932 mem_vectorize_callback(unsigned align_mul, unsigned align_offset, in mem_vectorize_callback() argument 2948 align = align_mul; in mem_vectorize_callback()
|
/external/mesa3d/docs/relnotes/ |
D | 20.3.0.rst | 1562 - nir: Document a bit about how align_mul/offset work. 1565 - nir/gl_nir_lower_buffers: Set up align_mul/offset on UBOs. 1566 - nir: Make the load_store_vectorizer provide align_mul + align_offset.
|