Home
last modified time | relevance | path

Searched full:symint (Results 1 – 25 of 573) sorted by relevance

12345678910>>...23

/external/pytorch/c10/core/
DSymInt.h21 // SymInt represents either a regular int64_t, or a symbolic integer
22 // (represented in a type erased way as SymNode). The intention is for SymInt
27 // SymInt has an API equivalent to int64_t. In particular, it is a value type.
28 // Internally, SymInt is represented in a clever packed way, so that it only
35 class C10_API SymInt {
41 /*implicit*/ SymInt(int64_t d) : data_(d) { in SymInt() function
47 SymInt() : data_(0) {} in SymInt() function
48 SymInt(SymNode n);
51 // One appropriate use for this is when you are constructing a symint
54 SymInt(Unchecked, int64_t d) : data_(d) {} in SymInt() function
[all …]
DSymInt.cpp3 #include <c10/core/SymInt.h>
13 // SymInt has temporarily violated invariants
14 // Postcondition: invariants on SymInt are fixed
15 void SymInt::promote_to_negative() { in promote_to_negative()
17 SymInt(SymNode(c10::make_intrusive<ConstantSymNodeImpl<int64_t>>(data_))); in promote_to_negative()
23 SymNode SymInt::toSymNode() const { in toSymNode()
25 is_heap_allocated(), "SymInt::toSymNode is_heap_allocated"); in toSymNode()
29 SymInt::SymInt(SymNode sin_sp) { in SymInt() function in c10::SymInt
31 sin_sp->is_int(), "SymInt::SymInt sin_sp->is_int()"); in SymInt()
38 bool SymInt::has_hint() const { in has_hint()
[all …]
DSymIntArrayRef.h3 #include <c10/core/SymInt.h>
12 using SymIntArrayRef = ArrayRef<SymInt>;
26 for (const c10::SymInt& sci : ar) { in asIntArrayRefSlowOpt()
39 for (const c10::SymInt& sci : ar) { in asIntArrayRefSlow()
71 reinterpret_cast<const SymInt*>(array_ref.data()), array_ref.size()); in fromIntArrayRefUnchecked()
81 SymInt::check_range(i), in fromIntArrayRefSlow()
82 "IntArrayRef contains an int that cannot be represented as a SymInt: ", in fromIntArrayRefSlow()
86 reinterpret_cast<const SymInt*>(array_ref.data()), array_ref.size()); in fromIntArrayRefSlow()
/external/pytorch/torchgen/api/types/
Dsignatures.py38 # Is this a symint C++ signature. For BC reasons, functions that take
39 # SymInts still present as int64_t in C++, and the SymInt variant is
42 # NB: If a function RETURNS a SymInt, this is ALWAYS false
43 symint: bool
61 symint=self.symint,
70 symint_overload=False if suppress_symint_suffix else self.symint,
86 self.func.returns, symint=self.symint
106 self.func.returns, symint=self.symint
118 …return f"{cpp.returns_type(self.func.returns, symint=self.symint).cpp_type()} (*)({args_types_str}…
123 … return f"{cpp.returns_type(self.func.returns, symint=self.symint).cpp_type()} ({args_types_str})"
[all …]
/external/pytorch/torchgen/api/
Dcpp.py99 symint: bool = False,
104 elif str(t) == "SymInt":
105 if symint:
117 elem = valuetype_type(t.elem, binds=binds, mutable=mutable, symint=symint)
141 symint: bool = False,
148 symint=symint,
178 elif isinstance(t.elem, ListType) and str(t.elem.elem) == "SymInt":
179 if symint:
183 elem = argumenttype_type(t.elem, mutable=mutable, binds=binds, symint=symint)
192 if str(t.elem) == "SymInt":
[all …]
Dnative.py43 # NB: this is symint aware, you will get the non-SymInt variant for some
44 # dispatch entries and SymInt for others.
58 t: Type, *, mutable: bool, binds: ArgName, symint: bool
74 return cpp.argumenttype_type(t, mutable=mutable, binds=binds, symint=symint)
77 def returns_type(rs: Sequence[Return], *, symint: bool) -> CType:
78 return cpp.returns_type(rs, symint=symint)
81 def argument_type(a: Argument, *, binds: ArgName, symint: bool) -> NamedCType:
82 return argumenttype_type(a.type, mutable=a.is_write, binds=binds, symint=symint)
89 symint: bool,
100 default = cpp.default_expr(a.default, a.type, symint=symint)
[all …]
Ddispatcher.py45 symint: bool = True,
55 symint=symint,
65 symint: bool = True,
72 symint=symint,
76 def returns_type(rs: Sequence[Return], *, symint: bool = True) -> CType:
78 return cpp.returns_type(rs, symint=symint)
105 a: Argument, *, remove_non_owning_ref_types: bool = False, symint: bool = True
112 symint=symint,
119 def arguments(func: FunctionSchema, *, symint: bool = True) -> list[Binding]:
120 return [argument(a, symint=symint) for a in jit_arguments(func)]
Dlazy.py71 typ: Type, properties: LazyIrProperties, *, symint: bool
101 elif typ.name == BaseTy.SymInt:
102 if symint:
123 return OptionalCType(process_ir_type(typ.elem, properties, symint=symint))
131 elif typ.elem == BaseType(BaseTy.SymInt):
133 # the problem with tensorListValueT: if you have SymInt[] you
142 return VectorCType(process_ir_type(typ.elem, properties, symint=symint))
175 return isinstance(typ, BaseType) and typ.name == BaseTy.SymInt
210 # TODO: this is lies, it is false for symint list
213 # Whether or not we are treating this as symint or not
[all …]
/external/pytorch/test/custom_operator/
Dtest_infer_schema_annotation.py34 self.assertEqual(result, "(SymInt x) -> SymInt")
46 self.assertEqual(result, "(str x) -> SymInt")
65 self.assertEqual(result, "(ScalarType x) -> SymInt")
71 self.assertEqual(result, "(Device x) -> SymInt")
78 self.assertEqual(result, "(SymInt? x) -> SymInt")
84 self.assertEqual(result, "(SymInt[] x) -> SymInt")
90 self.assertEqual(result, "(SymInt[] x) -> SymInt")
96 self.assertEqual(result, "(SymInt[]? x) -> SymInt")
102 self.assertEqual(result, "(SymInt[]? x) -> SymInt")
139 self.assertEqual(result, "(SymInt[] x) -> Scalar")
[all …]
/external/pytorch/torch/csrc/autograd/
Dpython_variable_indexing.h3 #include <c10/core/SymInt.h>
12 c10::SymInt start;
13 c10::SymInt stop;
14 c10::SymInt step;
22 c10::SymInt start_sym, stop_sym, step_sym; in __PySlice_Unpack()
25 if (val < c10::SymInt::min_representable_int()) { in __PySlice_Unpack()
35 return (Py_ssize_t)(c10::SymInt::min_representable_int()); in __PySlice_Unpack()
41 step_sym = c10::SymInt(1); in __PySlice_Unpack()
44 step_sym = py::handle(r->step).cast<c10::SymInt>(); in __PySlice_Unpack()
55 step_sym = c10::SymInt(step); in __PySlice_Unpack()
[all …]
/external/executorch/exir/
Dsym_util.py17 def eval_expr(symint: Union[int, torch.SymInt]) -> Optional[int]: argument
19 Evaluate a symint to int. Returns None if symint's symoblic expr
22 if isinstance(symint, int):
23 return symint
24 node = symint.node
34 def eval_upper_bound(maybe_symint: Union[int, torch.SymInt]) -> int: argument
36 Evaluate a symint to its uppper bound value. Returns None if symint's symoblic expr's
69 def eval_shape(shape: Iterable[Union[int, torch.SymInt]]): # pyre-ignore[3] argument
80 def eval_shape_upper_bound(shape: Iterable[Union[int, torch.SymInt]]) -> List[int]: argument
88 shape: Iterable[Union[int, torch.SymInt]] argument
[all …]
/external/executorch/docs/source/
Dcompiler-backend-dialect.md136 * `executorch_prims::add.int(SymInt a, SymInt b) -> SymInt`
139 * `executorch_prims::mul.int(SymInt a, SymInt b) -> SymInt`
142 * `executorch_prims::sub.int(SymInt a, SymInt b) -> SymInt`
145 * `executorch_prims::floordiv.int(SymInt a, SymInt b) -> SymInt`
154 * `executorch_prims::gt.int(SymInt a, SymInt b) -> bool`
157 * `executorch_prims::lt.int(SymInt a, SymInt b) -> bool`
160 * `executorch_prims::ge.int(SymInt a, SymInt b) -> bool`
163 * `executorch_prims::le.int(SymInt a, SymInt b) -> bool`
166 * `executorch_prims::eq.int(SymInt a, SymInt b) -> bool`
169 * `executorch_prims::mod.Scalar(SymInt a, SymInt b) -> SymInt`
/external/pytorch/aten/src/ATen/
DTensorGeometry.h24 c10::SymInt expected_stride = 1; in TensorGeometry()
74 c10::SymInt sym_size(int64_t dim) const { in sym_size()
81 c10::SymInt sym_stride(int64_t dim) const { in sym_stride()
88 c10::SymInt sym_storage_offset() const { in sym_storage_offset()
91 c10::SymInt sym_numel() const { in sym_numel()
116 std::vector<c10::SymInt>& mutable_sizes() { in mutable_sizes()
119 std::vector<c10::SymInt>& mutable_strides() { in mutable_strides()
122 c10::SymInt& mutable_storage_offset() { in mutable_storage_offset()
127 c10::SymInt numel = 1; in recompute()
137 std::vector<c10::SymInt> sizes_;
[all …]
DTensorIndexing.h7 #include <c10/core/SymInt.h>
27 constexpr int64_t INDEX_MIN = c10::SymInt::min_representable_int();
30 enum class TensorIndexType { None, Ellipsis, SymInt, Boolean, Slice, Tensor }; enumerator
42 std::optional<c10::SymInt> start_index = std::nullopt,
43 std::optional<c10::SymInt> stop_index = std::nullopt,
44 std::optional<c10::SymInt> step_index = std::nullopt) {
46 step_ = c10::SymInt(1);
56 start_ = c10::SymInt(step_ < 0 ? INDEX_MAX : 0);
62 stop_ = c10::SymInt(step_ < 0 ? INDEX_MIN : INDEX_MAX);
68 inline c10::SymInt start() const { in start()
[all …]
/external/pytorch/tools/autograd/
Dderivatives.yaml279 - name: affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor
338 - name: as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> …
342 - name: as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -…
567 - name: diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2)…
655 # TODO: this derivative is not SymInt safe, need sum_to support
656 - name: expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
1076 - name: masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor
1224 - name: native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, fl…
1228 - name: native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor…
1234 - name: native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt Hx…
[all …]
Dgen_python_functions.py268 symint: bool = True,
284 symint=symint,
298 symint=symint,
308 symint=symint,
318 symint=symint,
328 symint=symint,
347 symint=symint,
357 symint=symint,
406 symint: bool = True,
419 method_impl(name, module, overloads, method=method, symint=symint)
[all …]
/external/pytorch/torch/_inductor/
Dinductor_prims.py72 "inductor_random(SymInt[] size, Tensor seed, str mode) -> Tensor",
77 "inductor_randint(SymInt low, SymInt high, SymInt[] size, Tensor seed) -> Tensor",
82 "inductor_force_stride_order(Tensor input, SymInt[] stride) -> Tensor",
169 …emory_max_pool2d_with_offsets(Tensor self, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] pad…
176 …mory_max_pool2d_offsets_to_indices(Tensor self, SymInt kernel_w, SymInt input_w, SymInt[2] stride,…
/external/pytorch/aten/src/ATen/native/
Dnative_functions.yaml190 - func: _assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? d…
242 …_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size,
247 …cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropou…
256 …cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropou…
663 - func: affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor
669 - func: affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor
931 - func: as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> …
942 - func: as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -…
1364 - func: broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
1469 - func: tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
[all …]
/external/executorch/backends/vulkan/runtime/graph/containers/
DSymInt.cpp9 #include <executorch/backends/vulkan/runtime/graph/containers/SymInt.h>
13 SymInt::SymInt(api::Context* context_p, const int32_t val) in SymInt() function in vkcompute::SymInt
16 void SymInt::set(const int32_t val) { in set()
20 int32_t SymInt::get() { in get()
24 void SymInt::operator=(const int32_t val) { in operator =()
/external/pytorch/torch/_subclasses/
D_fake_tensor_utils.py7 from torch import SymInt
80 Represents a SymInt, SymFloat, SymBool without the associated ShapeEnv
116 Represents a SymInt in the cached key. Needed because SymInt doesn't
121 # PySymType: This is the 'normal' SymInt value, wrapped so we can use
122 # hash/eq as value hash/eq (normally SymInt does object
179 Represents a SymInt in the cached output.
186 def __init__(self, value: SymInt, key_path: Optional[int]) -> None: argument
192 def extract(self, key: _DispatchCacheKey, shape_env: ShapeEnv) -> SymInt:
194 return SymInt(self.value.extract(shape_env))
197 assert isinstance(src, _PySymInputStub) and isinstance(src.value, SymInt)
[all …]
/external/executorch/backends/cadence/aot/
Dops_registrations.py54 …Tensor weight, Tensor bias, SymInt src_zero_point, SymInt weight_zero_point, SymInt out_multiplier…
57 "quantized_linear.per_tensor(Tensor src, Tensor weight, Tensor bias, SymInt src_zero_point, "
58 …"SymInt weight_zero_point, SymInt out_multiplier, SymInt out_shift, SymInt out_zero_point, Tensor?…
69 …"quantized_conv(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, int[] di…
72 …"quantized_conv.out(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, int[…
75 …"quantized_conv.per_tensor(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] paddin…
78 …"quantized_conv.per_tensor_out(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] pa…
89 "convolution(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, "
93 …"transposed_convolution(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, "
94 "int[] dilation, SymInt[] output_padding, int groups, bool channel_last=False) -> (Tensor Y)"
[all …]
/external/executorch/exir/passes/
Dexecutorch_prim_ops_registry.py15 from torch import SymBool, SymFloat, SymInt
24 _SymScalar = Union[SymBool, SymFloat, SymInt]
88 @bind_pattern_to_op(executorch_prims_lib, "mod.Scalar(SymInt a, SymInt b) -> SymInt")
89 def mod(a: SymInt, b: SymInt) -> SymInt: argument
90 return SymInt(int(a) % int(b))
/external/pytorch/torch/_library/
Dfake_impl.py130 def create_unbacked_symint(self, *, min=2, max=None) -> torch.SymInt:
133 def new_dynamic_size(self, *, min=0, max=None) -> torch.SymInt:
134 """Constructs a new symint (symbolic int) representing a data-dependent value.
141 min (int): A statically known inclusive lower bound for this symint. Default: 0
143 symint. Default: None
154 to the symint also has respects these constraint.
168 >>> # we use the ctx object to construct a new symint that
189 if isinstance(min, torch.SymInt) or isinstance(max, torch.SymInt):
192 f"min and max to be statically known ints but got SymInt. "
/external/pytorch/torch/
Dtypes.py28 SymInt as SymInt, unknown
50 _symsize: TypeAlias = Union[Size, Sequence[Union[int, SymInt]]] # noqa: PYI042,PYI047
53 # int or SymInt
54 IntLikeType: TypeAlias = Union[int, SymInt]
60 py_sym_types = (SymInt, SymFloat, SymBool)
61 PySymType: TypeAlias = Union[SymInt, SymFloat, SymBool]
/external/pytorch/c10/test/core/
DSymInt_test.cpp3 #include <c10/core/SymInt.h>
10 const auto i = SymInt(value); in check()
23 EXPECT_FALSE(SymInt::check_range(INT64_MIN)); in TEST()
29 const auto x = SymInt(INT64_MAX); in TEST()
32 const auto y = SymInt(INT64_MIN); in TEST()

12345678910>>...23