Searched refs:pad_tensor (Results 1 – 4 of 4) sorted by relevance
/external/tensorflow/tensorflow/lite/tools/versioning/ |
D | gpu_compatibility.cc | 835 auto& pad_tensor = op_sig.inputs.at(1); in CheckGpuDelegateCompatibility() local 836 if (pad_tensor.dims.size() != 2) { in CheckGpuDelegateCompatibility() 839 pad_tensor.dims.size(), " dim")); in CheckGpuDelegateCompatibility() 841 bool supported = pad_tensor.dims[0] == 3 || pad_tensor.dims[0] == 4; in CheckGpuDelegateCompatibility() 842 if (!supported || pad_tensor.dims[1] != 2) { in CheckGpuDelegateCompatibility() 845 pad_tensor.dims[0], "x", pad_tensor.dims[1])); in CheckGpuDelegateCompatibility()
|
/external/pytorch/torch/distributed/tensor/ |
D | placement_types.py | 14 pad_tensor, 123 shard = pad_tensor(shard, self.dim, pad_size) 239 local_tensor = pad_tensor(local_tensor, self.dim, pad_size) 303 local_tensor = pad_tensor(local_tensor, self.dim, old_dim_pad_size) 311 local_tensor = pad_tensor(local_tensor, new_shard_dim, new_dim_pad_size)
|
D | _collective_utils.py | 170 def pad_tensor(tensor: torch.Tensor, pad_dim: int, pad_size: int) -> torch.Tensor: function
|
/external/tensorflow/tensorflow/compiler/xla/mlir_hlo/tests/Dialect/mhlo/ |
D | hlo-legalize-to-linalg.mlir | 2836 func.func @pad_tensor(%arg0: tensor<12x4xf32>, %arg1: tensor<f32>) -> tensor<18x12xf32> { 2844 // CHECK-LABEL: func @pad_tensor
|