Home
last modified time | relevance | path

Searched full:bias (Results 1 – 25 of 4720) sorted by relevance

12345678910>>...189

/external/pytorch/torch/testing/_internal/
Dcommon_pruning.py57 nn.Linear(7, 5, bias=False),
58 nn.Linear(5, 6, bias=False),
59 nn.Linear(6, 4, bias=False),
61 self.linear1 = nn.Linear(4, 4, bias=False)
62 self.linear2 = nn.Linear(4, 10, bias=False)
73 wrapped in a Sequential. Used to test pruned Linear-Bias-Linear fusion."""
78 nn.Linear(7, 5, bias=True),
79 nn.Linear(5, 6, bias=False),
80 nn.Linear(6, 3, bias=True),
81 nn.Linear(3, 3, bias=True),
[all …]
/external/pytorch/torch/nn/modules/
Dlinear.py60 bias: If set to ``False``, the layer will not learn an additive bias.
74 bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
75 If :attr:`bias` is ``True``, the values are initialized from
97 bias: bool = True,
108 if bias:
109 self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
111 self.register_parameter("bias", None)
119 if self.bias is not None:
122 init.uniform_(self.bias, -bound, bound)
125 return F.linear(input, self.weight, self.bias)
[all …]
/external/pytorch/aten/src/ATen/native/xnnpack/
DLinear.cpp17 const std::optional<Tensor>& bias, in available() argument
27 // Bias in available()
28 ((bias && bias->defined()) ? ((1 == bias->ndimension()) && in available()
29 (bias->device().is_cpu()) && in available()
30 (kFloat == bias->scalar_type()) && in available()
31 (weight.size(Layout::Filter::output)) == bias->size(0) && in available()
32 !bias->requires_grad()) in available()
52 const Tensor& bias, in create_and_run() argument
58 bias, in create_and_run()
68 const std::optional<Tensor>& bias, in create() argument
[all …]
/external/arm-trusted-firmware/fdts/
Dstm32mp15-pinctrl.dtsi24 bias-disable;
30 bias-pull-up;
38 bias-disable;
47 bias-disable;
59 bias-disable;
65 bias-pull-up;
77 bias-disable;
83 bias-pull-up;
98 bias-disable;
104 bias-disable;
[all …]
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dgemm_rewriter.cc64 // If the bias is a sequence of ops that depend only on broadcasts of
65 // constants, materialize the bias if it's small.
67 // Normally the constant-folding pass would materialize the bias if it is
68 // calculated entirely from constants. But if the bias is a broadcast of a
80 // broadcasted bias, if it supports that fusion efficiently.
81 HloInstruction *MaybeConstantFoldBias(HloInstruction *bias) { in MaybeConstantFoldBias() argument
97 if (ShapeUtil::ByteSizeOf(bias->shape()) <= kMaxMaterializeBiasBytes && in MaybeConstantFoldBias()
98 (Match(bias, broadcast_of_nonscalar) || in MaybeConstantFoldBias()
99 Match(bias, m::Reshape(broadcast_of_nonscalar)) || in MaybeConstantFoldBias()
100 Match(bias, m::Transpose(broadcast_of_nonscalar)) || in MaybeConstantFoldBias()
[all …]
/external/pytorch/test/inductor/
Dtest_cpu_select_algorithm.py152 @parametrize("bias", (True, False))
156 self, batch_size, in_features, out_features, bias, input_3d, dtype argument
159 def __init__(self, bias): argument
161 self.linear = torch.nn.Linear(in_features, out_features, bias)
167 mod = M(bias=bias).to(dtype=dtype).eval()
187 @parametrize("bias", (True,))
191 def test_linear_wgt_multi_users(self, in_features, out_features, bias, dtype): argument
193 def __init__(self, bias): argument
196 self.linear = torch.nn.Linear(in_features, out_features, bias)
204 mod = M(bias=bias).to(dtype=dtype).eval()
[all …]
/external/openscreen/cast/streaming/
Dexpanded_value_base_unittest.cc58 for (int64_t bias = -5; bias <= 5; ++bias) { in TEST() local
60 const TestValue original_value(bias + i); in TEST()
62 const TestValue reference(bias); in TEST()
64 << "bias=" << bias << ", i=" << i; in TEST()
71 for (int64_t bias = -5; bias <= 5; ++bias) { in TEST() local
77 const TestValue original_value(bias + i); in TEST()
79 const TestValue reexpanded_value(bias + i - 256); in TEST()
81 const TestValue reference(bias); in TEST()
83 << "bias=" << bias << ", i=" << i; in TEST()
90 for (int64_t bias = -5; bias <= 5; ++bias) { in TEST() local
[all …]
/external/trusty/arm-trusted-firmware/fdts/
Dstm32mp15-pinctrl.dtsi25 bias-disable;
31 bias-pull-up;
40 bias-disable;
50 bias-disable;
63 bias-disable;
76 bias-disable;
86 bias-pull-up;
96 bias-pull-up;
112 bias-disable;
118 bias-disable;
[all …]
/external/pytorch/torch/ao/nn/intrinsic/qat/modules/
Dconv_fused.py55 bias, argument
87 if bias:
88 self.bias = Parameter(torch.empty(out_channels))
90 self.register_parameter("bias", None)
111 init.zeros_(self.bn.bias)
113 if self.bias is not None:
116 init.uniform_(self.bias, -bound, bound)
150 # using zero bias here since the bias for original conv
152 if self.bias is not None:
153 zero_bias = torch.zeros_like(self.bias, dtype=input.dtype)
[all …]
Dlinear_fused.py38 bias=True, argument
49 nn.modules.linear.Linear.__init__(self, in_features, out_features, bias)
55 if bias:
56 self.bias = Parameter(torch.empty(out_features))
58 self.register_parameter("bias", None)
77 init.zeros_(self.bn.bias)
108 # # do the linear transformation without bias
110 # # reverse the scaling and add original bias
123 if self.bias is not None:
124 zero_bias = torch.zeros_like(self.bias)
[all …]
/external/pytorch/torch/ao/nn/quantized/modules/
Dnormalization.py27 bias, argument
43 self.bias = bias
52 bias=self.bias,
67 mod.bias,
80 mod.bias,
103 bias, argument
114 self.bias = bias
123 self.bias,
139 mod.bias,
161 bias, argument
[all …]
/external/pytorch/test/
Dtest_stateless.py32 self.tied_bias = self.l1.bias
45 bias = torch.tensor([0.0], device=device)
49 f'{prefix}.l1.bias': bias,
53 'l1.bias': bias,
157 bias = torch.tensor([0.0], requires_grad=True)
160 'l1.bias': bias,
166 self.assertIsNotNone(bias.grad)
170 self.assertIsNone(module.l1.bias.grad)
204 bias = torch.tensor([0.0])
207 'l1.bias': bias,
[all …]
Dtest_mkldnn_fusion.py65 def __init__(self, in_channels, out_channels, bias, **kwargs): argument
67 self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=bias, **kwargs)
82 for bias, dilation, groups in options:
87 bias,
104 def __init__(self, unary_fn, in_channels, out_channels, bias, **kwargs): argument
106 self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=bias, **kwargs)
119 for bias in [True, False]:
121 … m = M(unary_fn, 3, oC, bias, kernel_size=(3, 3)).to(memory_format=memory_format)
133 def __init__(self, m, in_channels, out_channels, bias, **kwargs): argument
135 self.conv = m(in_channels, out_channels, bias=bias, **kwargs)
[all …]
/external/pytorch/torch/ao/pruning/_experimental/pruner/
Dprune_functions.py4 Also contains utilities for bias propagation
16 # BIAS PROPAGATION
31 r"""Returns new adjusted bias for the second supported module"""
44 # Propagating first layer pruned biases and calculating the new second layer bias
46 # so adding bias involves broadcasting, logically:
67 ): # next_layer is parametrized & has original bias ._bias
70 not parametrize.is_parametrized(next_layer) and next_layer.bias is not None
71 ): # next_layer not parametrized & has .bias
72 adjusted_bias = nn.Parameter(scaled_biases + next_layer.bias)
73 else: # next_layer has no bias
[all …]
/external/pytorch/test/distributed/_composable/fully_shard/
Dtest_fully_shard_util.py57 ["l1.weight", "l1.bias", "l2.weight", "l2.bias"],
60 "u1.l1.bias",
62 "u1.seq.1.bias",
64 "u1.l2.bias",
68 "u2.l1.bias",
70 "u2.seq.1.bias",
72 "u2.l2.bias",
93 "l1.bias",
95 "u2.l1.bias",
97 "u2.seq.1.bias",
[all …]
/external/deqp-deps/glslang/Test/
Dspv.textureGatherBiasLod.frag16 in float bias;
27 texel += textureGather(s2D, c2, 0, bias);
28 texel += textureGather(s2DArray, c3, 1, bias);
29 texel += textureGather(sCube, c3, 2, bias);
30 texel += textureGather(sCubeArray, c4, 3, bias);
32 texel += textureGatherOffset(s2D, c2, offsets[0], 0, bias);
33 texel += textureGatherOffset(s2DArray, c3, offsets[1], 1, bias);
35 texel += textureGatherOffsets(s2D, c2, offsets, 0, bias);
36 texel += textureGatherOffsets(s2DArray, c3, offsets, 1, bias);
38 sparseTextureGatherARB(s2D, c2, result, 0, bias);
[all …]
/external/angle/third_party/glslang/src/Test/
Dspv.textureGatherBiasLod.frag16 in float bias;
27 texel += textureGather(s2D, c2, 0, bias);
28 texel += textureGather(s2DArray, c3, 1, bias);
29 texel += textureGather(sCube, c3, 2, bias);
30 texel += textureGather(sCubeArray, c4, 3, bias);
32 texel += textureGatherOffset(s2D, c2, offsets[0], 0, bias);
33 texel += textureGatherOffset(s2DArray, c3, offsets[1], 1, bias);
35 texel += textureGatherOffsets(s2D, c2, offsets, 0, bias);
36 texel += textureGatherOffsets(s2DArray, c3, offsets, 1, bias);
38 sparseTextureGatherARB(s2D, c2, result, 0, bias);
[all …]
/external/pytorch/torch/nn/attention/
Dbias.py2 """Defines bias subclasses that work with scaled_dot_product_attention"""
39 `UPPER_LEFT`: Represents upper-left triangular bias for standard causal attention.
40 The equivalent pytorch code for constructing this bias is:
46 For instance, with `shape=(3,4)`, the materialized bias tensor will be:
55 … `LOWER_RIGHT`: Represents lower-right triangular bias, the include values are aligned to the lower
58 The equivalent pytorch code for constructing this bias is:
68 For instance, with `shape=(3,4)`, the materialized bias tensor will be:
88 …A bias representing causal attention patterns. For an overview of the bias structure, see the :cla…
90 …This class is used for defining causal (triangular) attention biases. For construing the bias, the…
97 from torch.nn.attention.bias import causal_lower_right
[all …]
/external/tensorflow/tensorflow/lite/delegates/gpu/gl/kernels/
Dconv_test.cc41 Tensor<Linear, DataType::FLOAT32> bias; in TEST() local
42 bias.shape.v = 2; in TEST()
43 bias.id = 1; in TEST()
44 bias.data = {1, 1}; in TEST()
45 attr.bias = std::move(bias); in TEST()
79 Tensor<Linear, DataType::FLOAT32> bias; in TEST() local
80 bias.shape.v = 2; in TEST()
81 bias.id = 1; in TEST()
82 bias.data.push_back(0.0); in TEST()
83 attr.bias = std::move(bias); in TEST()
[all …]
Dtranspose_conv_test.cc41 Tensor<Linear, DataType::FLOAT32> bias; in TEST() local
42 bias.shape.v = 2; in TEST()
43 bias.id = 1; in TEST()
44 bias.data = {1, 1}; in TEST()
45 attr.bias = std::move(bias); in TEST()
80 Tensor<Linear, DataType::FLOAT32> bias; in TEST() local
81 bias.shape.v = 2; in TEST()
82 bias.id = 1; in TEST()
83 bias.data.push_back(0.0); in TEST()
84 attr.bias = std::move(bias); in TEST()
[all …]
/external/pytorch/torch/utils/
Dmkldnn.py9 if dense_module.bias is not None:
10 # Bias can be fp32 or bf16 for OneDNN bf16 path, but for good accuracy,
12 self.register_buffer('bias', dense_module.bias.to_mkldnn())
16 'bias',
21 return (self.weight.to_dense(), self.bias.to_dense(), self.training)
26 self.bias = state[1].to_mkldnn()
32 y_mkldnn = torch._C._nn.mkldnn_linear(x_mkldnn, self.weight, self.bias)
50 if dense_module.bias is not None:
51 self.register_buffer('bias', dense_module.bias.to_mkldnn())
53 # Bias can be fp32 or bf16 for OneDNN bf16 path, but for good accuracy,
[all …]
/external/pytorch/torch/csrc/jit/passes/
Dmetal_rewrite.cpp27 graph(%input, %weight, %bias): in insertPrePackedLinearOp()
28 %r = aten::linear(%input, %weight, %bias) in insertPrePackedLinearOp()
31 graph(%input, %weight, %bias): in insertPrePackedLinearOp()
34 %weight, %bias, %output_min_max, %output_min_max) in insertPrePackedLinearOp()
47 graph(%input, %weight, %bias, %stride:int[], %padding:int[], %dilation:int[], %groups:int): in insertPrePackedConv2dOp()
48 %r = aten::conv2d(%input, %weight, %bias, %stride, %padding, %dilation, %groups) in insertPrePackedConv2dOp()
52 graph(%input, %weight, %bias, %stride:int[], %padding:int[], in insertPrePackedConv2dOp()
56 %weight, %bias, %stride, %padding, %dilation, %groups, in insertPrePackedConv2dOp()
71 graph(%input, %weight, %bias, %dummy_min_max): in fuseReluWithPackedOps()
75 %weight, %bias, %output_min, %output_max) in fuseReluWithPackedOps()
[all …]
/external/pytorch/torch/ao/nn/quantizable/modules/
Dactivation.py38 bias: add bias as module parameter. Default: True.
39 add_bias_kv: add bias to the key and value sequences at dim=0.
67 bias: bool = True,
81 bias,
90 self.embed_dim, self.embed_dim, bias=bias, **factory_kwargs
93 self.kdim, self.embed_dim, bias=bias, **factory_kwargs
96 self.vdim, self.embed_dim, bias=bias, **factory_kwargs
99 …self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=bias, **factory_kwargs) # type: ig…
138 observed.out_proj.bias = other.out_proj.bias # type: ignore[has-type]
141 bias = other.in_proj_bias
[all …]
/external/tensorflow/tensorflow/lite/kernels/
Dfully_connected.cc135 const TfLiteTensor* bias, TfLiteTensor* output, in CheckTypes() argument
144 // optional bias tensor. in CheckTypes()
145 const bool is_optional_bias_float = !bias || (bias->type == kTfLiteFloat32); in CheckTypes()
147 !bias || (bias->type == kTfLiteInt32) || (bias->type == kTfLiteInt64); in CheckTypes()
210 const TfLiteTensor* bias = in PrepareImpl() local
220 CheckTypes(context, input, filter, bias, output, params)); in PrepareImpl()
257 if (bias) { in PrepareImpl()
258 TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 0)); in PrepareImpl()
268 context, input, filter, bias, output, &real_multiplier)); in PrepareImpl()
479 const TfLiteTensor* bias, TfLiteTensor* output) { in EvalPie() argument
[all …]
/external/tensorflow/tensorflow/compiler/mlir/tfr/examples/mnist/
Dmnist_ops_test.py35 bias = tf.zeros([8])
39 'bias': bias,
48 self._assertOpAndComposite([input_, filter_, bias],
55 bias = tf.zeros([8])
59 'bias': bias,
68 self._assertOpAndComposite([input_, filter_, bias],
76 bias = tf.zeros([8])
80 'bias': bias,
89 self._assertOpAndComposite([input_, filter_, bias],
96 bias = tf.zeros([3])
[all …]

12345678910>>...189