Home
last modified time | relevance | path

Searched full:reduced (Results 1 – 25 of 2748) sorted by relevance

12345678910>>...110

/external/libkmsxx/kms++/src/
Dmodedb_dmt.cpp47 // 0xd - 800 x 600 @ 120Hz CVT (Reduced Blanking)
48 …DRM_MODE("800 x 600 @ 120Hz CVT (Reduced Blanking)", 73250, 800, 48, 32, 80, 600, 3, 4, 29, DRM_MO…
61 // 0x14 - 1024 x 768 @ 120Hz CVT (Reduced Blanking)
62 …DRM_MODE("1024 x 768 @ 120Hz CVT (Reduced Blanking)", 115500, 1024, 48, 32, 80, 768, 3, 4, 38, DRM…
67 // 0x16 - 1280 x 768 @ 60Hz CVT (Reduced Blanking)
68 …DRM_MODE("1280 x 768 @ 60Hz CVT (Reduced Blanking)", 68250, 1280, 48, 32, 80, 768, 3, 7, 12, DRM_M…
75 // 0x1a - 1280 x 768 @ 120Hz CVT (Reduced Blanking)
76 …DRM_MODE("1280 x 768 @ 120Hz CVT (Reduced Blanking)", 140250, 1280, 48, 32, 80, 768, 3, 7, 35, DRM…
77 // 0x1b - 1280 x 800 @ 60Hz CVT (Reduced Blanking)
78 …DRM_MODE("1280 x 800 @ 60Hz CVT (Reduced Blanking)", 71000, 1280, 48, 32, 80, 800, 3, 6, 14, DRM_M…
[all …]
/external/zucchini/
Dequivalence_map.cc351 for (auto reduced = current + 1; reduced != next; ++reduced) in PruneEquivalencesAndSortBySource() local
352 reduced->length = 0; in PruneEquivalencesAndSortBySource()
357 for (auto reduced = current + 1; reduced != next; ++reduced) { in PruneEquivalencesAndSortBySource() local
358 offset_t delta = current->src_end() - reduced->src_offset; in PruneEquivalencesAndSortBySource()
359 reduced->length -= std::min(reduced->length, delta); in PruneEquivalencesAndSortBySource()
360 reduced->src_offset += delta; in PruneEquivalencesAndSortBySource()
361 reduced->dst_offset += delta; in PruneEquivalencesAndSortBySource()
362 DCHECK_EQ(reduced->src_offset, current->src_end()); in PruneEquivalencesAndSortBySource()
522 for (auto reduced = current + 1; reduced != next; ++reduced) { in Prune() local
523 reduced->eq.length = 0; in Prune()
[all …]
/external/mbedtls/library/
Decp_invasive.h84 * Upon return this holds the reduced value. The bitlength
85 * of the reduced value is the same as that of the modulus
101 * Upon return holds the reduced value which is
103 * The bitlength of the reduced value is the same as
123 * Upon return holds the reduced value which is
125 * The bitlength of the reduced value is the same as
145 * holds the reduced value. The reduced value is
167 * Upon return holds the reduced value which is
169 * The bitlength of the reduced value is the same as
190 * Upon return holds the reduced value which is
[all …]
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/AggressiveInstCombine/
DAggressiveInstCombineInternal.h31 // and for each eligible dag, it will create a reduced bit-width expression and
36 // 3. Can be evaluated into type with reduced legal bit-width (or Trunc type).
39 // new reduced type chosen in (3).
69 /// The reduced value generated to replace the old instruction.
101 /// Check if it is eligible to be reduced to a smaller type.
103 /// \return the scalar version of the new type to be used for the reduced
105 /// to be reduced.
108 /// Given a \p V value and a \p SclTy scalar type return the generated reduced
111 /// \param V value to be reduced.
113 /// \return the new reduced value.
[all …]
/external/swiftshader/third_party/llvm-16.0/llvm/lib/Transforms/AggressiveInstCombine/
DAggressiveInstCombineInternal.h27 // instructions and for each eligible graph, it will create a reduced bit-width
32 // 3. Can be evaluated into type with reduced legal bit-width (or Trunc type).
35 // new reduced type chosen in (3).
72 /// The reduced value generated to replace the old instruction.
104 /// Check if it is eligible to be reduced to a smaller type.
106 /// \return the scalar version of the new type to be used for the reduced
108 /// eligible to be reduced.
123 /// Given a \p V value and a \p SclTy scalar type return the generated reduced
126 /// \param V value to be reduced.
128 /// \return the new reduced value.
[all …]
/external/XNNPACK/src/math/
Dexp-f32-avx512f-rr2-p5-scalef.c40 // Compute reduced argument n := round(x / log(2)). in xnn_math_f32_exp__avx512f_rr2_p5_scalef()
44 …// For large positive or negative inputs the range reduction may produce degenerate reduced argum… in xnn_math_f32_exp__avx512f_rr2_p5_scalef()
45 …// - Reduced argument t can fall outside of [-log(2)/2, log(2)/2] range, leading to polynomial app… in xnn_math_f32_exp__avx512f_rr2_p5_scalef()
48 …// - Reduced argument n can overflow and become +inf or -inf, and leading to NaN in reduced argume… in xnn_math_f32_exp__avx512f_rr2_p5_scalef()
52 // Compute reduced argument t := x - n * log(2). in xnn_math_f32_exp__avx512f_rr2_p5_scalef()
54 …asking to explicitly zero the result for large positive inputs, to avoid propagating NaN in reduced in xnn_math_f32_exp__avx512f_rr2_p5_scalef()
55 …// argument t into further computations. Zeroing the reduced argument t would instead result in po… in xnn_math_f32_exp__avx512f_rr2_p5_scalef()
/external/tensorflow/tensorflow/python/distribute/
Dstrategy_common_test.py329 reduced = strategy.extended._replica_ctx_all_reduce(
331 return reduced
350 reduced = strategy.extended._replica_ctx_all_reduce(
352 return reduced
382 reduced = strategy.extended._replica_ctx_all_reduce(
384 return reduced
476 reduced = rep_ctx.all_reduce(reduce_util.ReduceOp.SUM, value)
477 return reduced
497 reduced = rep_ctx.all_reduce(reduce_util.ReduceOp.MEAN, value)
498 return reduced
[all …]
/external/llvm/test/DebugInfo/X86/
Dasm-macro-line-number.s3 # 1 "reduced.S"
5 # 1 "reduced.S" 2
17 # 7 "reduced.S"
21 # CHECK: .file 2 "reduced.S"
27 # 42 "reduced.S"
/external/tensorflow/tensorflow/python/distribute/v1/
Dall_reduce.py264 un_op: an optional unary operator to apply to fully reduced values.
310 list of list of `tf.Tensor` of (partially) reduced values where
311 exactly num_subchunks chunks at each device are fully reduced.
387 list of `tf.Tensor` which are the fully reduced tensors, one
434 each device has 1/n of the fully reduced values. During the
435 scatter phase each device exchanges its fully reduced
438 until each device has all of the fully reduced values.
447 input_tensors: list of `tf.Tensor` to be elementwise reduced.
449 un_op: an optional unary elementwise Op to apply to reduced values.
452 list of `tf.Tensor` which are the fully reduced tensors, one
[all …]
/external/tensorflow/tensorflow/python/keras/optimizer_v2/
Dutils.py25 """Returns all-reduced gradients aggregated via summation.
31 List of (gradient, variable) pairs where gradients have been all-reduced.
38reduced = distribute_ctx.get_strategy().extended._replica_ctx_all_reduce( # pylint: disable=prote…
42 reduced = distribute_ctx.get_replica_context().merge_call(
45 reduced = []
46 # Copy 'reduced' but add None gradients back in
53 reduced_with_nones.append((reduced[reduced_pos], v))
55 assert reduced_pos == len(reduced), "Failed to add all gradients"
/external/libxaac/encoder/drc_src/
Dimpd_drc_uni_drc.h49 #define GAIN_SET_COUNT_MAX 8 /* reduced size */
70 #define DOWNMIX_INSTRUCTIONS_COUNT_MAX 8 /* reduced size */
71 #define DRC_COEFFICIENTS_UNIDRC_V1_COUNT_MAX 2 /* reduced size */
72 #define DRC_INSTRUCTIONS_UNIDRC_V1_COUNT_MAX 8 /* reduced size */
73 #define SPLIT_CHARACTERISTIC_COUNT_MAX 8 /* reduced size */
74 #define SHAPE_FILTER_COUNT_MAX 8 /* reduced size */
80 #define FILTER_ELEMENT_COUNT_MAX 16 /* reduced size */
89 #define UNIQUE_SUBBAND_GAIN_COUNT_MAX 16 /* reduced size */
91 #define FILTER_ELEMENT_COUNT_MAX 16 /* reduced size */
92 #define UNIQUE_SUBBAND_GAINS_COUNT_MAX 8 /* reduced size */
[all …]
/external/python/cpython3/Lib/lib2to3/
Dbtm_utils.py129 reduced = reduce_tree(child, new_node)
130 if reduced is not None:
131 new_node.children.append(reduced)
137 reduced = reduce_tree(child, new_node)
138 if reduced:
139 new_node.children.append(reduced)
141 # delete the group if all of the children were reduced to None
228 reduced = reduce_tree(child, new_node)
229 if reduced is not None:
230 new_node.children.append(reduced)
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_SparseReduceMax.pbtxt31 `R-K`-D. The reduced Tensor.
37 If true, retain reduced dimensions with length 1.
47 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
48 `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
51 If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
Dapi_def_SparseReduceSum.pbtxt31 `R-K`-D. The reduced Tensor.
37 If true, retain reduced dimensions with length 1.
47 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
48 `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
51 If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
Dapi_def_Prod.pbtxt26 The reduced tensor.
32 If true, retain reduced dimensions with length 1.
38 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
39 `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
Dapi_def_All.pbtxt26 The reduced tensor.
32 If true, retain reduced dimensions with length 1.
38 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
39 `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
Dapi_def_Mean.pbtxt26 The reduced tensor.
32 If true, retain reduced dimensions with length 1.
38 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
39 `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
Dapi_def_EuclideanNorm.pbtxt23 The reduced tensor.
29 If true, retain reduced dimensions with length 1.
35 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
36 `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
Dapi_def_Max.pbtxt26 The reduced tensor.
32 If true, retain reduced dimensions with length 1.
38 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
39 `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
Dapi_def_Any.pbtxt26 The reduced tensor.
32 If true, retain reduced dimensions with length 1.
38 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
39 `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
Dapi_def_Min.pbtxt26 The reduced tensor.
32 If true, retain reduced dimensions with length 1.
38 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
39 `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
Dapi_def_Sum.pbtxt26 The reduced tensor.
32 If true, retain reduced dimensions with length 1.
38 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
39 `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
/external/python/cpython2/Lib/lib2to3/
Dbtm_utils.py130 reduced = reduce_tree(child, new_node)
131 if reduced is not None:
132 new_node.children.append(reduced)
138 reduced = reduce_tree(child, new_node)
139 if reduced:
140 new_node.children.append(reduced)
142 # delete the group if all of the children were reduced to None
230 reduced = reduce_tree(child, new_node)
231 if reduced is not None:
232 new_node.children.append(reduced)
/external/tensorflow/tensorflow/cc/gradients/
Dlinalg_grad.cc112 // Returns reduced subscripts and their corresponding dimensions and axes.
117 // from `reduced_label_set` in any order. For example, for the reduced label
138 // Concatenate the sequence of reduced axis labels. in EinsumGetReducedSubscripts()
141 // Get the axis (may be positive, negative or zero) for each of the reduced in EinsumGetReducedSubscripts()
155 // Get the corresponding dimensions for each reduced axis. in EinsumGetReducedSubscripts()
190 // 'd' are reduced with input_shape [2,2,5,5,3,4]. Then obtain the reduced in EinsumGradReducedHelper()
206 // Compute the input subscripts without the reduced axis labels, e.g. "aac" in EinsumGradReducedHelper()
219 // If we're not dealing with repeated labels, and the non-reduced labels in EinsumGradReducedHelper()
228 // reduced shape [2,1,3,1]. in EinsumGradReducedHelper()
237 // reduced dimensions to the front. E.g. Given the equation "aabbcd->ca" we'd in EinsumGradReducedHelper()
[all …]
/external/tensorflow/tensorflow/lite/kernels/
Dcpu_backend_gemm_custom_gemv.h361 // will need to be horizontally reduced at the end.
544 int32x4_t reduced = vcombine_s32(reduced_lo, reduced_hi);
545 // End of horizontal reduction: now `reduced` is a single int32x4
552 reduced = vaddq_s32(reduced, bias_vec);
572 reduced = vshlq_s32(reduced, exponent_positive_part);
574 reduced = vqrdmulhq_s32(reduced, multiplier_fixedpoint);
578 reduced = vrshlq_s32(reduced, exponent_negative_part);
582 reduced = vaddq_s32(reduced, output_offset_vec);
585 ClampAndStore(reduced, params.clamp_min, params.clamp_max,
673 // will need to be horizontally reduced at the end.
[all …]

12345678910>>...110