Home
last modified time | relevance | path

Searched refs:threads_per_block (Results 1 – 15 of 15) sorted by relevance

/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dlaunch_dimensions.cc38 int64 threads_per_block = gpu_device_info.threads_per_block_limit; in ThreadsPerBlockLimit() local
39 if (threads_per_block <= 0) { in ThreadsPerBlockLimit()
47 threads_per_block = gpu_device_info.threads_per_warp; in ThreadsPerBlockLimit()
48 if (threads_per_block == 0) { in ThreadsPerBlockLimit()
50 threads_per_block = 32; in ThreadsPerBlockLimit()
53 return threads_per_block; in ThreadsPerBlockLimit()
79 int64 threads_per_block = ThreadsPerBlockLimit(gpu_device_info); in CalculateLaunchDimensions() local
84 threads_per_block = in CalculateLaunchDimensions()
85 RoundUpToNearest(threads_per_block / unroll_factor, int64{32}); in CalculateLaunchDimensions()
86 if (num_elements < threads_per_block) { in CalculateLaunchDimensions()
[all …]
Dir_emitter_unnested.h632 int threads_per_block);
639 int threads_per_block);
703 llvm::Value* EmitThreadId(int64 threads_per_block, llvm::Type* index_ty);
709 ThreadIdInfo EmitThreadIdInfo(int64 threads_per_block, llvm::Type* index_ty,
Delemental_ir_emitter.cc313 llvm::Value* threads_per_block = IntCast( in EmitThreadId() local
316 return NSWAdd(NSWMul(block_id, threads_per_block), thread_id_in_block); in EmitThreadId()
Dir_emitter_unnested.cc4321 int threads_per_block) { in EmitFullWarpShuffleDownLoopForAllReduces() argument
4326 partial_result_addresses[i], threads_per_block); in EmitFullWarpShuffleDownLoopForAllReduces()
4332 llvm::Value* partial_result_address, int threads_per_block) { in EmitFullWarpShuffleDownLoopForReduce() argument
4334 CHECK_EQ(threads_per_block % 32, 0); in EmitFullWarpShuffleDownLoopForReduce()
4669 llvm::Value* IrEmitterUnnested::EmitThreadId(int64 threads_per_block, in EmitThreadId() argument
4675 llvm_ir::AddRangeMetadata(0, threads_per_block, thread_id_raw); in EmitThreadId()
4681 int64 threads_per_block, llvm::Type* index_ty, int64 num_threads_x) { in EmitThreadIdInfo() argument
4685 llvm::Value* thread_id = EmitThreadId(threads_per_block, index_ty); in EmitThreadIdInfo()
/external/tensorflow/tensorflow/core/kernels/image/
Dadjust_hue_op_gpu.cu.cc35 const int threads_per_block = config.thread_per_block; in operator ()() local
37 (number_of_elements + threads_per_block - 1) / threads_per_block; in operator ()()
39 block_count, threads_per_block, 0, stream, in operator ()()
Dadjust_saturation_op_gpu.cu.cc36 const int threads_per_block = config.thread_per_block; in operator ()() local
38 (number_of_elements + threads_per_block - 1) / threads_per_block; in operator ()()
40 block_count, threads_per_block, 0, stream, in operator ()()
/external/tensorflow/tensorflow/stream_executor/
Ddevice_description.cc155 int64 element_count, int64 *threads_per_block, in CalculateDimensionality() argument
157 *threads_per_block = device_description.threads_per_block_limit(); in CalculateDimensionality()
158 *block_count = port::MathUtil::CeilOfRatio(element_count, *threads_per_block); in CalculateDimensionality()
160 CHECK_LE(element_count, *threads_per_block); in CalculateDimensionality()
161 *threads_per_block = element_count; in CalculateDimensionality()
Ddevice_description.h349 int64 element_count, int64 *threads_per_block,
/external/tensorflow/tensorflow/core/kernels/
Dreduction_gpu_kernels.cu.h721 const int threads_per_block = 128;
722 const int warps_per_block = threads_per_block / TF_RED_WARPSIZE;
726 threads_per_block, 0, cu_stream, in, out,
862 int threads_per_block = 128;
863 int num_blocks = Eigen::divup(extent_y, threads_per_block);
866 num_blocks, threads_per_block, 0, cu_stream, in,
875 int threads_per_block = 128;
877 (extent_x * extent_z + threads_per_block - 1) / threads_per_block;
882 num_blocks, threads_per_block, 0, cu_stream, in,
890 int threads_per_block = 128;
[all …]
/external/tensorflow/tensorflow/stream_executor/gpu/
Dredzone_allocator.cc223 int64 threads_per_block = std::min( in RunRedzoneChecker() local
226 tensorflow::MathUtil::CeilOfRatio(num_elements, threads_per_block); in RunRedzoneChecker()
228 stream->ThenLaunch(ThreadDim(threads_per_block), BlockDim(block_count), in RunRedzoneChecker()
Dgpu_driver.h569 GpuContext* context, GpuFunctionHandle kernel, int threads_per_block,
/external/mesa3d/src/gallium/drivers/radeonsi/
Dsi_get.c731 unsigned threads_per_block = get_max_threads_per_block(sscreen, ir_type); in si_get_compute_param() local
732 block_size[0] = threads_per_block; in si_get_compute_param()
733 block_size[1] = threads_per_block; in si_get_compute_param()
734 block_size[2] = threads_per_block; in si_get_compute_param()
/external/mesa3d/src/gallium/drivers/r600/
Dr600_pipe_common.c958 unsigned threads_per_block = get_max_threads_per_block(rscreen, ir_type); in r600_get_compute_param() local
959 block_size[0] = threads_per_block; in r600_get_compute_param()
960 block_size[1] = threads_per_block; in r600_get_compute_param()
961 block_size[2] = threads_per_block; in r600_get_compute_param()
/external/tensorflow/tensorflow/stream_executor/cuda/
Dcuda_driver.cc1624 GpuContext* context, CUfunction kernel, int threads_per_block, in GetMaxOccupiedBlocksPerCore() argument
1631 &max_blocks, kernel, threads_per_block, dynamic_shared_memory_bytes), in GetMaxOccupiedBlocksPerCore()
/external/tensorflow/tensorflow/stream_executor/rocm/
Drocm_driver.cc1308 GpuContext* context, hipFunction_t kernel, int threads_per_block, in GetMaxOccupiedBlocksPerCore() argument