/external/pigweed/pw_console/py/pw_console/ |
D | web_kernel.py | 180 kernel_params: dict[str, Any], 185 self.kernel_params = kernel_params 187 if kernel_params.get('global_vars', None) is None: 188 self.kernel_params['global_vars'] = {} 189 if kernel_params.get('local_vars', None) is None: 190 self.kernel_params['local_vars'] = {} 202 if kernel_params.get('sentence_completions'): 204 kernel_params.get('sentence_completions', {}) 384 for logger_name in self.kernel_params['loggers'].keys(): 385 for logger in self.kernel_params['loggers'][logger_name]: [all …]
|
D | web_server.py | 62 kernel_params: dict[str, Any] | None = None, 65 handler = WebHandler(html_files=html_files, kernel_params=kernel_params) 90 kernel_params: dict[str, Any] | None = None, 96 self.kernel_params: dict[str, Any] = {} 97 if kernel_params: 98 self.kernel_params = kernel_params 162 ws, self.kernel_params, self.web_socket_streaming_responder_loop
|
D | web.py | 71 kernel_params={
|
/external/pytorch/aten/src/ATen/native/transformers/ |
D | sdp_utils_cpp.cpp | 60 SDPBackend select_sdp_backend_cpp(sdp_params const& kernel_params) { in select_sdp_backend_cpp() argument 69 const auto ordering = priority_order_cpp(kernel_params); in select_sdp_backend_cpp() 77 if (use_flash_attention_cpp(kernel_params, print_debug)) { in select_sdp_backend_cpp() 99 use_flash_attention_cpp(kernel_params, print_debug); in select_sdp_backend_cpp()
|
D | sdp_utils_cpp.h | 54 SDPBackend select_sdp_backend_cpp(sdp_params const& kernel_params);
|
D | attention.cpp | 428 sdp::sdp_params kernel_params{query_, key, value, attn_mask_, dropout_p, is_causal}; in _fused_sdp_choice_cpp() local 429 auto backend = sdp::select_sdp_backend_cpp(kernel_params); in _fused_sdp_choice_cpp()
|
/external/executorch/backends/vulkan/runtime/graph/ops/impl/ |
D | Pool.cpp | 87 Kernel2dParams kernel_params = create_kernel2d_params( in add_max_pool2d_node() local 107 graph.create_params_buffer(kernel_params), in add_max_pool2d_node() 162 Kernel2dParams kernel_params = in add_avg_pool2d_node() local 179 graph.create_params_buffer(kernel_params), in add_avg_pool2d_node()
|
D | Convolution.cpp | 351 Kernel2dParams kernel_params = create_kernel2d_params( in add_conv2d_node() local 359 create_conv2d_params(graph, weight_data, kernel_params, transposed_val); in add_conv2d_node() 363 check_conv2d_params(kernel_params, transposed_val); in add_conv2d_node() 385 graph.create_params_buffer(kernel_params), in add_conv2d_node() 452 Kernel1dParams kernel_params = { in add_conv1d_node() local 482 graph.create_params_buffer(kernel_params), in add_conv1d_node()
|
/external/pytorch/aten/src/ATen/native/transformers/cuda/ |
D | sdp_utils.cpp | 673 SDPBackend select_sdp_backend(sdp_params const& kernel_params) { in select_sdp_backend() argument 684 const auto ordering = priority_order(kernel_params); in select_sdp_backend() 692 if (sdp::can_use_cudnn_attention(kernel_params, print_debug)) { in select_sdp_backend() 697 if (sdp::can_use_flash_attention(kernel_params, print_debug)) { in select_sdp_backend() 702 if (sdp::can_use_mem_efficient_attention(kernel_params, print_debug)) { in select_sdp_backend() 724 sdp::can_use_mem_efficient_attention(kernel_params, print_debug); in select_sdp_backend() 726 sdp::can_use_flash_attention(kernel_params, print_debug); in select_sdp_backend() 728 sdp::can_use_cudnn_attention(kernel_params, print_debug); in select_sdp_backend()
|
D | sdp_utils.h | 11 SDPBackend select_sdp_backend(sdp_params const& kernel_params);
|
D | attention.cu | 563 sdp::sdp_params kernel_params{q, k, v, mask, 0.0, false}; in native_multi_head_attention_cuda() local 564 auto backend = select_sdp_backend(kernel_params); in native_multi_head_attention_cuda() 567 …bool no_seq_len_1_nested = query.is_nested() ? check_for_seq_len_1_nested_tensor(kernel_params, fa… in native_multi_head_attention_cuda() 831 sdp::sdp_params kernel_params{query_, key, value, attn_mask_, dropout_p, is_causal}; in _fused_sdp_choice_cuda() local 832 auto backend = select_sdp_backend(kernel_params); in _fused_sdp_choice_cuda()
|
/external/tensorflow/tensorflow/compiler/mlir/tools/kernel_gen/transforms/ |
D | tf_kernel_to_llvm_pass.cc | 202 auto kernel_params = generateParamsArray(launch_op, adaptor, rewriter); in matchAndRewrite() local 235 adaptor.blockSizeY(), adaptor.blockSizeZ(), kernel_params}); in matchAndRewrite()
|
/external/tensorflow/tensorflow/compiler/xla/stream_executor/gpu/ |
D | gpu_driver.h | 270 GpuStreamHandle stream, void** kernel_params, void** extra);
|
/external/tensorflow/tensorflow/compiler/xla/stream_executor/cuda/ |
D | cuda_gpu_executor.cc | 469 void** kernel_params = const_cast<void**>(args.argument_addresses().data()); in Launch() local 475 kernel_params, nullptr /* = extra */); in Launch()
|
D | cuda_driver.cc | 464 void** kernel_params, void** extra) { in LaunchKernel() argument 473 kernel_params, extra), in LaunchKernel()
|
/external/tensorflow/tensorflow/stream_executor/rocm/ |
D | rocm_driver.cc | 409 GpuStreamHandle stream, void** kernel_params, void** extra) { in LaunchKernel() argument 418 shared_mem_bytes, stream, kernel_params, extra), in LaunchKernel()
|
/external/pigweed/docs/ |
D | changelog.rst | 4623 * `Default WebHandler.kernel_params to an empty dictionary
|