/external/apache-commons-math/src/main/java/org/apache/commons/math/util/ |
D | MathUtils.java | 1538 private static double roundUnscaled(double unscaled, double sign, 1543 unscaled = FastMath.floor(nextAfter(unscaled, Double.NEGATIVE_INFINITY)); 1545 unscaled = FastMath.ceil(nextAfter(unscaled, Double.POSITIVE_INFINITY)); 1549 unscaled = FastMath.floor(nextAfter(unscaled, Double.NEGATIVE_INFINITY)); 1553 unscaled = FastMath.ceil(nextAfter(unscaled, Double.POSITIVE_INFINITY)); 1555 unscaled = FastMath.floor(nextAfter(unscaled, Double.NEGATIVE_INFINITY)); 1559 unscaled = nextAfter(unscaled, Double.NEGATIVE_INFINITY); 1560 double fraction = unscaled - FastMath.floor(unscaled); 1562 unscaled = FastMath.ceil(unscaled); 1564 unscaled = FastMath.floor(unscaled); [all …]
|
/external/eigen/bench/ |
D | benchFFT.cpp | 44 void bench(int nfft,bool fwd,bool unscaled=false, bool halfspec=false) in bench() argument 53 if (unscaled) { in bench()
|
/external/apache-commons-math/src/main/java/org/apache/commons/math/transform/ |
D | FastHadamardTransformer.java | 60 final double[] unscaled = in inversetransform() local 62 return FastFourierTransformer.scaleArray(unscaled, 1.0 / n); in inversetransform()
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-scaled_iv.ll | 3 ; Make loop-reduce prefer unscaled accesses.
|
D | fast-isel-addressing-modes.ll | 142 ; Max supported unscaled offset 152 ; Min un-supported unscaled offset 208 ; Max supported unscaled offset 218 ; Min un-supported unscaled offset
|
D | arm64-scvt.ll | 67 ; 5. load with unscaled imm to float. 68 ; 6. load with unscaled imm to double. 286 ; ********* 5. load with unscaled imm to float. ********* 346 ; ********* 6. load with unscaled imm to double. ********* 647 ; ********* 5s. load with unscaled imm to float. ********* 714 ; ********* 6s. load with unscaled imm to double. *********
|
D | ldp-stp-scaled-unscaled-pairs.ll | 105 ; Pair an unscaled store with a scaled store where the scaled store has a
|
D | arm64-vector-ldst.ll | 266 ; registers for unscaled vector accesses
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | arm64-scaled_iv.ll | 3 ; Make loop-reduce prefer unscaled accesses.
|
D | fast-isel-addressing-modes.ll | 142 ; Max supported unscaled offset 152 ; Min un-supported unscaled offset 208 ; Max supported unscaled offset 218 ; Min un-supported unscaled offset
|
D | arm64-scvt.ll | 67 ; 5. load with unscaled imm to float. 68 ; 6. load with unscaled imm to double. 286 ; ********* 5. load with unscaled imm to float. ********* 346 ; ********* 6. load with unscaled imm to double. ********* 647 ; ********* 5s. load with unscaled imm to float. ********* 714 ; ********* 6s. load with unscaled imm to double. *********
|
D | ldp-stp-scaled-unscaled-pairs.ll | 105 ; Pair an unscaled store with a scaled store where the scaled store has a
|
D | arm64-vector-ldst.ll | 266 ; registers for unscaled vector accesses
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | avoid_complex_am.ll | 3 ; Make loop-reduce prefer unscaled accesses.
|
/external/libaom/libaom/av1/common/ |
D | resize.h | 78 YV12_BUFFER_CONFIG *unscaled,
|
D | resize.c | 1285 YV12_BUFFER_CONFIG *unscaled, in av1_scale_if_required() argument 1288 if (cm->width != unscaled->y_crop_width || in av1_scale_if_required() 1289 cm->height != unscaled->y_crop_height) { in av1_scale_if_required() 1290 av1_resize_and_extend_frame(unscaled, scaled, (int)cm->seq_params.bit_depth, in av1_scale_if_required() 1294 return unscaled; in av1_scale_if_required()
|
/external/llvm/test/CodeGen/X86/ |
D | avoid_complex_am.ll | 3 ; Make loop-reduce prefer unscaled accesses.
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_encoder.c | 4386 VP9_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled, argument 4389 if (cm->mi_cols * MI_SIZE != unscaled->y_width || 4390 cm->mi_rows * MI_SIZE != unscaled->y_height) { 4393 vp9_scale_and_extend_frame(unscaled, scaled_temp, filter_type2, 4398 scale_and_extend_frame(unscaled, scaled_temp, (int)cm->bit_depth, 4404 vp9_scale_and_extend_frame(unscaled, scaled_temp, filter_type2, 4410 return unscaled; 4415 VP9_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled, argument 4417 if (cm->mi_cols * MI_SIZE != unscaled->y_width || 4418 cm->mi_rows * MI_SIZE != unscaled->y_height) { [all …]
|
D | vp9_encoder.h | 1002 VP9_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled, 1007 VP9_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled,
|
/external/vixl/doc/ |
D | changelog.md | 73 + Allow explicit use of unscaled-offset loads and stores.
|
/external/vixl/doc/aarch64/ |
D | supported-instructions-aarch64.md | 655 Load integer or FP register (with unscaled offset). 664 Load byte (with unscaled offset). 673 Load half-word (with unscaled offset). 682 Load byte with sign extension (and unscaled offset). 691 Load half-word with sign extension (and unscaled offset). 920 Prefetch memory (with unscaled offset). 1160 Store integer or FP register (with unscaled offset). 1169 Store byte (with unscaled offset). 1178 Store half-word (with unscaled offset).
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AArch64/ |
D | AArch64SVEInstrInfo.td | 406 // Gathers using unscaled 32-bit offsets, e.g. 458 // Gathers using unscaled 64-bit offsets, e.g. 488 // Gathers using unscaled 32-bit offsets unpacked in 64-bits elements, e.g. 554 // Scatters using unscaled 32-bit offsets, e.g. 587 // Scatters using unscaled 64-bit offsets, e.g.
|
D | AArch64SchedThunderX2T99.td | 591 // Load register, unscaled immed 860 // Store register, unscaled immed
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64SchedVulcan.td | 303 // Load register, unscaled immed 350 // Store register, unscaled immed
|
D | AArch64InstrInfo.td | 1693 // (unscaled immediate) 1776 // unscaled zext 1897 // (unscaled immediate, unprivileged) 2201 // (unscaled immediate) 2283 // unscaled i64 truncating stores 2314 // (unscaled immediate, unprivileged)
|