/external/pytorch/test/torch_np/numpy_tests/core/ |
D | test_einsum.py | 29 # Setup for optimize einsum 41 (TypeError, IndexError, ValueError), np.einsum, optimize=do_opt 43 assert_raises((IndexError, ValueError), np.einsum, "", optimize=do_opt) 46 assert_raises((AttributeError, TypeError), np.einsum, 0, 0, optimize=do_opt) 49 assert_raises(TypeError, np.einsum, "", 0, out="test", optimize=do_opt) 54 np.einsum, 62 assert_raises(ValueError, np.einsum, "", 0, casting="blah", optimize=do_opt) 66 TypeError, np.einsum, "", 0, dtype="bad_data_type", optimize=do_opt 70 assert_raises(TypeError, np.einsum, "", 0, bad_arg=0, optimize=do_opt) 74 (RuntimeError, TypeError), np.einsum, *(None,) * 63, optimize=do_opt [all …]
|
/external/pytorch/torch/backends/opt_einsum/ |
D | __init__.py | 1 # mypy: allow-untyped-defs 18 def is_available() -> bool: 23 def get_opt_einsum() -> Any: 28 def _set_enabled(_enabled: bool) -> None: 32 "the benefits of calculating an optimal path for einsum. torch.einsum will " 34 "calculation, please install opt-einsum." 40 def _get_enabled() -> bool: 44 def _set_strategy(_strategy: str) -> None: 48 "torch.einsum will bypass path calculation and simply contract from left to right. " 54 "torch.einsum will bypass path calculation and simply contract from left to right. " [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/ |
D | batchmatmul_to_einsum.mlir | 1 // RUN: tf-opt %s -tf-batch-matmul-to-tf-einsum | FileCheck %s 3 func.func @test_batch_matmul_to_einsum(%arg0: tensor<1x2x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<… 4 // CHECK-LABEL: test_batch_matmul_to_einsum 5 …// CHECK: "tf.Einsum"(%arg0, %arg1) {equation = "...mk,...kn->...mn"} : (tensor<1x2x3xf32>, tensor… 6 …, %arg1) {adj_x = false, adj_y = false} : (tensor<1x2x3xf32>, tensor<3x4xf32>) -> tensor<1x2x4xf32> 10 …tmul_broadcast_to_einsum(%arg0: tensor<2x2x4xf32>, %arg1: tensor<2x4x2xf32>) -> tensor<2x2x2xf32> { 11 // CHECK-LABEL: test_batch_matmul_broadcast_to_einsum 12 …// CHECK: "tf.Einsum"(%arg0, %arg1) {equation = "...mk,...kn->...mn"} : (tensor<2x2x4xf32>, tensor… 13 …%arg1) {adj_x = false, adj_y = false} : (tensor<2x2x4xf32>, tensor<2x4x2xf32>) -> tensor<2x2x2xf32> 17 …c_shape_both_arg_to_einsum(%arg0: tensor<1x2x?xf32>, %arg1: tensor<?x4xf32>) -> tensor<1x2x4xf32> { [all …]
|
D | einsum.mlir | 1 // RUN: tf-opt -split-input-file -verify-diagnostics -tf-einsum %s | FileCheck %s 3 func.func @einsum_basic(%arg0: tensor<3x4x5xf32>, %arg1: tensor<3x5x6xf32>) -> tensor<3x4x6xf32> { 4 …%0 = "tf.Einsum"(%arg0, %arg1) {T = "tfdtype$DT_FLOAT", equation = "ijk,ikm->ijm"}: (tensor<3x4x5x… 6 // CHECK-LABEL: einsum_basic 7 …%arg1) {adj_x = false, adj_y = false} : (tensor<3x4x5xf32>, tensor<3x5x6xf32>) -> tensor<3x4x6xf32> 10 func.func @einsum_matmul(%arg0: tensor<7x9xf32>, %arg1: tensor<9x5xf32>) -> tensor<7x5xf32> { 11 …%0 = "tf.Einsum"(%arg0, %arg1) {T = "tfdtype$DT_FLOAT", equation = "ae,ed->ad"}: (tensor<7x9xf32>,… 13 // CHECK-LABEL: einsum_matmul 14 …arg0, %arg1) {adj_x = false, adj_y = false} : (tensor<7x9xf32>, tensor<9x5xf32>) -> tensor<7x5xf32> 18 func.func @einsum_broadcast(%arg0: tensor<3x4x5xf32>, %arg1: tensor<5x6xf32>) -> tensor<3x4x6xf32> { [all …]
|
/external/tensorflow/tensorflow/compiler/xla/mlir_hlo/tests/Dialect/mhlo/ |
D | hlo-legalize-einsum-to-dot-general.mlir | 1 // RUN: mlir-hlo-opt -mhlo-legalize-einsum-to-dot-general %s -o - | FileCheck %s 3 func.func @einsum_diag(%arg0: tensor<6x6xf32>) -> tensor<6xf32> { 5 …%1 = "mhlo.einsum"(%0, %arg0) {einsum_config = ",ii->i"} : (tensor<f32>, tensor<6x6xf32>) -> tenso… 8 // CHECK-LABEL: func @einsum_diag 9 // NOTE: 2-operand diagonal not supported as a dot_general lowering. 12 // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]] 14 // CHECK: "mhlo.einsum" 16 …_high_rank_vector_mul(%arg0: tensor<8x2x6xf32>, %arg1: tensor<8x5x3x6xf32>) -> tensor<8x5x3x2xf32>… 17 …%0 = "mhlo.einsum"(%arg0, %arg1) {einsum_config = "bxy,bijy->bijx"} : (tensor<8x2x6xf32>, tensor<8… 20 // CHECK-LABEL: func @einsum_batched_matrix_high_rank_vector_mul [all …]
|
D | hlo-legalize-to-linalg.mlir | 1 // RUN: mlir-hlo-opt %s -hlo-legalize-to-linalg -split-input-file --canonicalize | FILECHECK_OPTS="… 3 // CHECK: #map = affine_map<(d0, d1) -> (d0, d1)> 4 // CHECK-LABEL: func @float_add 6 %rhs: tensor<2x2xf32>) -> tensor<2x2xf32> { 8 // CHECK-SAME: {someattr} 9 // CHECK: ^{{[a-z0-9_]*}} 10 // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: f32 11 // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: f32 12 // CHECK: %[[RESULT:[a-zA-Z0-9_]*]] = arith.addf %[[ARG0]], %[[ARG1]] 15 : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32> [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/hlo/tests/ |
D | hlo-legalize-einsum-to-dot-general.mlir | 1 // RUN: mlir-hlo-opt -mhlo-legalize-einsum-to-dot-general %s -o - | FileCheck %s 3 func @einsum_diag(%arg0: tensor<6x6xf32>) -> tensor<6xf32> { 5 …%1 = "mhlo.einsum"(%0, %arg0) {einsum_config = ",ii->i"} : (tensor<f32>, tensor<6x6xf32>) -> tenso… 8 // CHECK-LABEL: func @einsum_diag 9 // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]] 12 // CHECK-SAME: dot_dimension_numbers = { 13 // CHECK-SAME: lhs_batching_dimensions = dense<> : tensor<0xi64>, 14 // CHECK-SAME: lhs_contracting_dimensions = dense<> : tensor<0xi64>, 15 // CHECK-SAME: rhs_batching_dimensions = dense<> : tensor<0xi64>, 16 // CHECK-SAME: rhs_contracting_dimensions = dense<> : tensor<0xi64>} [all …]
|
/external/pytorch/.github/requirements/ |
D | pip-requirements-macOS.txt | 8 # Use numba-0.49.1 or older on Intel Macs, but 0.56.0 on M1 machines, as older numba is not availab… 11 opt-einsum>=3.3 13 nvidia-ml-py==11.525.84 17 pytest-xdist==3.3.1 18 pytest-rerunfailures==10.3 19 pytest-flakefinder==1.1.0 23 unittest-xml-reporting<=3.2.0,>=2.0.0 26 pytest-cpp==2.3.0 28 z3-solver==4.12.2.0
|
/external/pytorch/aten/src/ATen/native/ |
D | Linear.cpp | 43 static int value = -1; in parseLinearFlatten3d() 44 if (value == -1) { in parseLinearFlatten3d() 59 // can't use -1 in reshape because it errors when a dimension is 0 in _flatten_nd_linear() 61 for (int64_t i = 0, ndim = input_sizes.size(); i < ndim - 1; ++i) { in _flatten_nd_linear() 64 auto inp_reshape = input.reshape_symint({flattened_dim, input_sizes.at(input_sizes.size() -1)}); in _flatten_nd_linear() 66 auto new_size = input_sizes.slice(0, input_sizes.size() - 1); in _flatten_nd_linear() 94 if (input_dim == 2 && bias->defined()) { in linear() 98 if (bias->defined() && !input.is_xla()) { in linear() 103 …ous() && input.layout() == c10::kStrided && weight.layout() == c10::kStrided && bias->dim() == 1) { in linear() 112 if (bias->defined()) { in linear() [all …]
|
/external/pytorch/.ci/docker/ |
D | requirements-ci.txt | 29 #Description: dill extends pickle with serializing and de-serializing for most built-ins 70 #mkl #this breaks linux-bionic-rocm4.5-py3.7 77 #mkl-devel 87 #MonkeyType # breaks pytorch-xla-linux-bionic-py3.7-clang8 114 #Description: Just-In-Time Compiler for Numerical Functions 120 #Description: Provides N-dimensional arrays and linear algebra 137 opt-einsum==3.3 138 #Description: Python library to optimize tensor contraction order, used in einsum 175 pytest-xdist==3.3.1 180 pytest-flakefinder==1.1.0 [all …]
|
/external/pytorch/.github/workflows/ |
D | _docs.yml | 6 build-environment: 9 description: Top-level label for what's being built/tested. 10 docker-image: 19 run-doxygen: 24 sync-tag: 30 job with the same `sync-tag` is identical. 31 s3-bucket: 35 default: "gha-artifacts" 36 aws-role-to-assume: 41 upload-aws-role-to-assume: [all …]
|
D | _win-test.yml | 1 name: win-test 6 build-environment: 9 description: Top-level label for what's being built/tested. 10 cuda-version: 14 test-matrix: 18 sync-tag: 24 job with the same `sync-tag` is identical. 25 timeout-minutes: 38 if: github.repository_owner == 'pytorch' && toJSON(fromJSON(inputs.test-matrix).include) != '[]' 40 matrix: ${{ fromJSON(inputs.test-matrix) }} [all …]
|
D | _linux-test.yml | 1 name: linux-test 6 build-environment: 9 description: Top-level label for what's being built/tested. 10 test-matrix: 14 docker-image: 18 sync-tag: 24 job with the same `sync-tag` is identical. 25 timeout-minutes: 31 use-gha: 36 dashboard-tag: [all …]
|
/external/pytorch/.github/actions/linux-test/ |
D | action.yml | 1 name: linux-test 4 build-environment: 7 description: Top-level label for what's being built/tested. 8 test-matrix: 12 docker-image: 16 sync-tag: 22 job with the same `sync-tag` is identical. 23 use-gha: 28 dashboard-tag: 32 s3-bucket: [all …]
|
/external/pytorch/ |
D | setup.py | 5 # build with -O0 and -g (debug symbols) 8 # build with optimizations and -g (debug symbols) 32 # build CUDA kernels with -lineinfo --source-in-ptx. Note that 67 # MKL-DNN threading mode: TBB or OMP (default) 70 # Prefer to link with MKL statically - Unix only 90 # disables use of system-wide nccl (we will use our submoduled 113 # specify the version of PyTorch, rather than the hard-coded version 127 # specify a namespace for ONNX built here rather than the hard-coded 147 # /usr/local/cuda-x.y 178 # ATen parallel backend to use for intra- and inter-op parallelism [all …]
|
/external/pytorch/aten/src/ATen/ |
D | autocast_mode.h | 25 // deprecated CUDA-specific autocast APIs 207 // If nextArg is floating-point, compare its scalar_type with our 258 // Template to catch non-Tensor args (no-op that returns current best guess) 275 // promoted to float32. Non-Tensor arguments are ignored. 340 // Template to catch non-Tensor args. 389 // Policies correspond to op categories that need code-divergent handling. 411 // and redispatch to the type-aware overload. 420 https://stackoverflow.com/questions/46533698/how-to-deduce-argument-list-from-function-pointer) 503 // If ineligible, calls F with unaltered args. Does not set opt dtype, 504 // because setting opt dtype explicitly may interfere with internal [all …]
|
/external/pytorch/c10/core/ |
D | DispatchKey.h | 22 // customizable per-backend, then we also look at the lower BackendComponent 30 // built-in keys) 32 // If you add a new (non-privateuse) backend here, 53 // WARNING! If we add a new per-backend functionality key that has higher 79 // - dense kernels (e.g. DispatchKey::CPU) 80 // - sparse kernels (e.g. DispatchKey::SparseCPU) 81 // - quantized kernels (e.g. DispatchKey::QuantizedCPU) 82 // - autograd kernels (e.g. DispatchKey::AutogradCPU) 86 // per-backend functionality] 116 // (1) non-customizable backends (e.g. FPGA) [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/quantization/tensorflow/tests/ |
D | prepare_lifting.mlir | 7 // http://www.apache.org/licenses/LICENSE-2.0 15 // RUN: tf-quant-opt %s -quant-prepare-lifting | FileCheck %s 17 func.func @decompose_batch_norm(%arg0: tensor<*xf32>) -> (tensor<*xf32>) { 18 %cst = "tf.Const"() {value = dense<1.000000e+00> : tensor<2xf32>} : () -> tensor<2xf32> 19 %cst_0 = "tf.Const"() {value = dense<0.500000e+00> : tensor<2xf32>} : () -> tensor<2xf32> 20 …-5 : f32, exponential_avg_factor = 1.000000e+00 : f32, is_training = false} : (tensor<*xf32>, tens… 24 // CHECK-DAG: %[[CONST:.*]] = "tf.Const"() {value = dense<2.49743462E-5> : tensor<2xf32>} : () -> t… 25 // CHECK-DAG: %[[CONST_0:.*]] = "tf.Const"() {value = dense<0.999950051> : tensor<2xf32>} : () -> t… 26 // CHECK: %[[mul:.*]] = "tf.Mul"(%arg0, %[[CONST_0]]) : (tensor<*xf32>, tensor<2xf32>) -> tensor<*x… 27 // CHECK: %[[add:.*]] = "tf.Add"(%[[mul]], %[[CONST]]) : (tensor<*xf32>, tensor<2xf32>) -> tensor<*… [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/ir/ |
D | tf_ops_a_m.cc | 7 http://www.apache.org/licenses/LICENSE-2.0 42 #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project 43 #include "mlir/Dialect/Traits.h" // from @llvm-project 44 #include "mlir/IR/Attributes.h" // from @llvm-project 45 #include "mlir/IR/Builders.h" // from @llvm-project 46 #include "mlir/IR/BuiltinAttributes.h" // from @llvm-project 47 #include "mlir/IR/BuiltinOps.h" // from @llvm-project 48 #include "mlir/IR/BuiltinTypes.h" // from @llvm-project 49 #include "mlir/IR/Diagnostics.h" // from @llvm-project 50 #include "mlir/IR/DialectImplementation.h" // from @llvm-project [all …]
|
/external/tensorflow/tensorflow/python/util/ |
D | dispatch.py | 7 # http://www.apache.org/licenses/LICENSE-2.0 15 """Type-based dispatch for TensorFlow's Python APIs. 27 * The "type-based dispatch" system checks the types of the parameters passed 34 dispatch system in favor of the type-based dispatch system, once all users have 47 ### Type-based Dispatch 49 The main interface for the type-based dispatch system is the `dispatch_for_api` 58 visible ops by default. APIs/ops that are implemented in Python can opt in to 64 import typing # pylint: disable=unused-import (used in doctests) 103 def handle(self, args, kwargs): # pylint: disable=unused-argument 200 # pylint: disable=g-doc-return-or-yield [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/xla/tests/translate/ |
D | export.mlir | 1 // RUN: tf-mlir-translate -split-input-file -mlir-hlo-to-hlo-text %s | FileCheck %s 2 // RUN: tf-mlir-translate -split-input-file -mlir-hlo-to-hlo-text --via-builder=true %s | FileCheck… 5 func.func @main(%arg0: tensor<2xi1>) -> tensor<2xi1> { 6 %0 = "mhlo.add"(%arg0, %arg0) : (tensor<2xi1>, tensor<2xi1>) -> tensor<2xi1> 14 // ----- 17 func.func @main(%arg0: !mhlo.token, %arg1: !mhlo.token) -> !mhlo.token { 18 %0 = "mhlo.after_all"(%arg0, %arg1) : (!mhlo.token, !mhlo.token) -> !mhlo.token 25 // CHECK: ROOT %[[RESULT:.*]] = token[] after-all(token[] %[[ARG0]], token[] %[[ARG1]]) 27 // ----- 30 func.func @main(%arg0: tensor<10xf32>) -> tensor<5xf32> { [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/xla/transforms/ |
D | legalize_tf.cc | 7 http://www.apache.org/licenses/LICENSE-2.0 32 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" // from @llvm-project 33 #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project 34 #include "mlir/Dialect/Shape/IR/Shape.h" // from @llvm-project 35 #include "mlir/Dialect/Tensor/IR/Tensor.h" // from @llvm-project 36 #include "mlir/Dialect/Traits.h" // from @llvm-project 37 #include "mlir/IR/Attributes.h" // from @llvm-project 38 #include "mlir/IR/BuiltinOps.h" // from @llvm-project 39 #include "mlir/IR/BuiltinTypes.h" // from @llvm-project 40 #include "mlir/IR/Diagnostics.h" // from @llvm-project [all …]
|
/external/tensorflow/ |
D | RELEASE.md | 57 * tf.einsum is supported with multiple unknown shapes. 78 …xperimental/SharpnessAwareMinimization). This class implements the sharpness-aware minimization te… 82 …* Added support for cross-trainer data caching in tf.data service. This saves computation resour… 91 …mptionCheckpointHandler) to handle worker preemption/maintenance and cluster-wise consistent error… 95 … to 100 times depending on the size of k. When running on CPU and GPU, a non-optimized XLA kernel … 117 …inistically (and `tf.errors.UnimplementedError` is no longer thrown) when op-determinism has been … 141 …-Review-Doctor, DEKHTIARJonathan, Deven Desai, Djacon, Duncan Riach, fedotoff, fo40225, Frederic B… 163 …ers should switch to [TensorFlow Decision Forests](https://github.com/tensorflow/decision-forests). 166 …eels now specifically conform to [manylinux2014](https://peps.python.org/pep-0599/), an upgrade fr… 167 … Forum thread](https://discuss.tensorflow.org/t/tensorflow-linux-wheels-are-being-upgraded-to-many… [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/xla/tests/ |
D | legalize-tf.mlir | 1 // RUN: xla-opt "-xla-legalize-tf=allow-partial-conversion legalize-chlo=false" -split-input-file %… 2 …-opt "-xla-legalize-tf=allow-partial-conversion legalize-chlo=true" -split-input-file -verify-diag… 10 //===----------------------------------------------------------------------===// 12 //===----------------------------------------------------------------------===// 14 // ----- 19 // CHECK-LABEL: fusedBatchNormV2_noTraining 20 …r<8xf32>, %arg2: tensor<8xf32>, %arg3: tensor<8xf32>, %arg4: tensor<8xf32>) -> (tensor<8x8x8x8xf32… 21 …ilon = 1.000000e-03 : f32, feature_index = 3 : i64} : (tensor<8x8x8x8xf32>, tensor<8xf32>, tensor<… 22 …r<8x8x8x8xf32>, tensor<8xf32>, tensor<8xf32>, tensor<8xf32>, tensor<8xf32>) -> (tensor<8x8x8x8xf32… 26 // ----- [all …]
|
D | legalize-tf-no-tf2xla-fallback.mlir | 1 // RUN: xla-opt "-xla-legalize-tf-no-fallback=allow-partial-conversion" -split-input-file %s | FILE… 3 //===----------------------------------------------------------------------===// 5 //===----------------------------------------------------------------------===// 7 // ----- 12 // CHECK-LABEL: fusedBatchNormV2_noTraining 13 …r<8xf32>, %arg2: tensor<8xf32>, %arg3: tensor<8xf32>, %arg4: tensor<8xf32>) -> (tensor<8x8x8x8xf32… 14 …ilon = 1.000000e-03 : f32, feature_index = 3 : i64} : (tensor<8x8x8x8xf32>, tensor<8xf32>, tensor<… 15 …r<8x8x8x8xf32>, tensor<8xf32>, tensor<8xf32>, tensor<8xf32>, tensor<8xf32>) -> (tensor<8x8x8x8xf32… 19 // ----- 21 // CHECK-LABEL: fusedBatchNormV2_training [all …]
|