/external/boringssl/src/util/ |
D | compare_benchmarks.go | 44 func printResult(result Result, baseline *Result) error { 45 if baseline != nil { 46 if result.Description != baseline.Description { 47 …return fmt.Errorf("result did not match baseline: %q vs %q", result.Description, baseline.Descript… 50 if result.BytesPerCall != baseline.BytesPerCall { 51 … did not match baseline: %d vs %d", result.Description, result.BytesPerCall, baseline.BytesPerCall) 57 if baseline != nil { 58 oldSpeed, _ := baseline.Speed() 80 baseline, err := readResults(*baselineFile) 87 for _, result := range baseline { [all …]
|
/external/caliper/caliper/src/main/java/com/google/caliper/worker/ |
D | AllocationStats.java | 84 AllocationStats minus(AllocationStats baseline) { in minus() argument 85 for (Entry<Allocation> entry : baseline.allocations.entrySet()) { in minus() 98 return new AllocationStats(allocationCount - baseline.allocationCount, in minus() 99 allocationSize - baseline.allocationSize, in minus() 100 reps - baseline.reps, in minus() 101 Multisets.difference(allocations, baseline.allocations)); in minus() 107 baseline, this), e); in minus() 116 public Delta delta(AllocationStats baseline) { in delta() argument 118 allocationCount - baseline.allocationCount, in delta() 119 allocationSize - baseline.allocationSize, in delta() [all …]
|
D | MicrobenchmarkAllocationWorker.java | 91 AllocationStats baseline = null; in verifyBenchmarkIsDeterministic() local 96 if (stats.equals(baseline)) { in verifyBenchmarkIsDeterministic() 104 baseline = stats; in verifyBenchmarkIsDeterministic() 128 AllocationStats baseline = measureAllocations(benchmark, benchmarkMethod, 0); in measure() local 132 return measurement.minus(baseline).toMeasurements(); in measure()
|
/external/pdfium/testing/image_diff/ |
D | image_diff.cpp | 131 void CountImageSizeMismatchAsPixelDifference(const Image& baseline, in CountImageSizeMismatchAsPixelDifference() argument 134 int w = std::min(baseline.w(), actual.w()); in CountImageSizeMismatchAsPixelDifference() 135 int h = std::min(baseline.h(), actual.h()); in CountImageSizeMismatchAsPixelDifference() 138 int max_w = std::max(baseline.w(), actual.w()); in CountImageSizeMismatchAsPixelDifference() 139 int max_h = std::max(baseline.h(), actual.h()); in CountImageSizeMismatchAsPixelDifference() 146 float PercentageDifferent(const Image& baseline, const Image& actual) { in PercentageDifferent() argument 147 int w = std::min(baseline.w(), actual.w()); in PercentageDifferent() 148 int h = std::min(baseline.h(), actual.h()); in PercentageDifferent() 154 if (baseline.pixel_at(x, y) != actual.pixel_at(x, y)) in PercentageDifferent() 159 CountImageSizeMismatchAsPixelDifference(baseline, actual, &pixels_different); in PercentageDifferent() [all …]
|
/external/tensorflow/tensorflow/python/estimator/canned/ |
D | baseline.py | 26 from tensorflow_estimator.python.estimator.canned import baseline 30 baseline.__all__ = [s for s in dir(baseline) if not s.startswith('__')] 32 from tensorflow_estimator.python.estimator.canned.baseline import *
|
/external/rust/crates/plotters/src/series/ |
D | histogram.rs | 29 baseline: Box<dyn Fn(&BR::ValueType) -> A + 'a>, field 45 baseline: Box::new(|_| A::default()), in empty() 67 pub fn baseline(mut self, baseline: A) -> Self in baseline() function 71 self.baseline = Box::new(move |_| baseline.clone()); in baseline() 77 self.baseline = Box::new(func); in baseline_func() 149 let base = (self.baseline)(&x); in next() 173 let base = (self.baseline)(&y); in next()
|
D | area_series.rs | 10 baseline: Y, field 19 baseline: Y, in new() 24 baseline, in new() 45 data.push((data[data.len() - 1].0.clone(), self.baseline.clone())); in next() 46 data.push((data[0].0.clone(), self.baseline.clone())); in next()
|
/external/llvm-project/compiler-rt/test/fuzzer/ |
D | keep-seed.test | 11 RUN: rm -rf %t-corpus-baseline 12 RUN: mkdir %t-corpus-baseline 13 RUN: echo -n SELECTxFROMxWHERE > %t-corpus-baseline/valid-fragments 17 RUN: %run %t-KeepSeedTest -seed=1 -runs=4000000 %t-corpus-baseline -print_final_stats=1
|
/external/rust/crates/crc32fast/src/specialized/ |
D | aarch64.rs | 72 let mut baseline = super::super::super::baseline::State::new(init); localVariable 78 baseline.update(&chunk); 81 baseline.update(&chunk[offset..]); 85 aarch64.finalize() == baseline.finalize()
|
D | pclmulqdq.rs | 97 return ::baseline::update_fast_16(crc, data); in calculate() 186 ::baseline::update_fast_16(!c, data) in calculate() 209 let mut baseline = super::super::super::baseline::State::new(init); localVariable 215 baseline.update(&chunk); 218 baseline.update(&chunk[offset..]); 222 pclmulqdq.finalize() == baseline.finalize()
|
/external/skia/samplecode/ |
D | SampleGlyphTransform.cpp | 41 double baseline = this->height() / 2; in onDrawContent() local 42 canvas->drawLine(0, baseline, this->width(), baseline, paint); in onDrawContent() 47 ctm.postTranslate(fTranslate.fX + this->width() * 0.8, fTranslate.fY + baseline); in onDrawContent()
|
/external/kotlinx.coroutines/kotlinx-coroutines-core/common/test/ |
D | EmptyContext.kt | 11 val baseline = Result.failure<T>(IllegalStateException("Block was suspended")) in withEmptyContext() constant 12 var result: Result<T> = baseline in withEmptyContext() 14 while (result == baseline) yield() in withEmptyContext()
|
/external/skqp/samplecode/ |
D | SampleGlyphTransform.cpp | 47 double baseline = this->height() / 2; in onDrawContent() local 48 canvas->drawLine(0, baseline, this->width(), baseline, paint); in onDrawContent() 53 ctm.postTranslate(fTranslate.fX + this->width() * 0.8, fTranslate.fY + baseline); in onDrawContent()
|
/external/rust/crates/grpcio-sys/grpc/third_party/upb/benchmarks/ |
D | compare.py | 60 baseline = "master" variable 65 baseline = sys.argv[1] variable 68 with GitWorktree(baseline): 75 with GitWorktree(baseline):
|
/external/skia/tools/calmbench/ |
D | calmbench.py | 159 ['cp', args.ninjadir + '/nanobench', nano_path(args, args.baseline)], 168 compile_branch(args, args.baseline) 186 args.branch + ("_A" if args.branch == args.baseline else ""), 187 args.baseline + ("_B" if args.branch == args.baseline else ""), 189 nano_path(args, args.baseline),
|
/external/skqp/tools/calmbench/ |
D | calmbench.py | 157 ['cp', args.ninjadir + '/nanobench', nano_path(args, args.baseline)], 166 compile_branch(args, args.baseline) 184 args.branch + ("_A" if args.branch == args.baseline else ""), 185 args.baseline + ("_B" if args.branch == args.baseline else ""), 187 nano_path(args, args.baseline),
|
/external/tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/ |
D | hashing_benchmark.py | 97 baseline = self.run_dataset_implementation(batch_size) 99 "dataset implementation baseline": baseline, 100 "delta seconds": (baseline - avg_time), 101 "delta percent": ((baseline - avg_time) / baseline) * 100
|
D | discretization_adapt_benchmark.py | 102 baseline = self.run_dataset_implementation(num_elements, batch_size) 104 "tf.data implementation baseline": baseline, 105 "delta seconds": (baseline - avg_time), 106 "delta percent": ((baseline - avg_time) / baseline) * 100
|
D | category_crossing_benchmark.py | 98 baseline = self.run_dataset_implementation(batch_size) 100 "dataset implementation baseline": baseline, 101 "delta seconds": (baseline - avg_time), 102 "delta percent": ((baseline - avg_time) / baseline) * 100
|
D | index_lookup_adapt_benchmark.py | 113 baseline = self.run_numpy_implementation(num_elements, batch_size, k) 115 "numpy implementation baseline": baseline, 116 "delta seconds": (baseline - avg_time), 117 "delta percent": ((baseline - avg_time) / baseline) * 100
|
/external/eigen/bench/btl/generic_bench/timers/ |
D | STL_timer.hh | 32 STL_Timer(){ baseline = false; }; // Default constructor in STL_Timer() 44 baseline = true; in start_baseline() 74 bool baseline; member in STL_Timer
|
/external/skia/modules/skparagraph/src/ |
D | Decorations.cpp | 23 …vas* canvas, const TextStyle& textStyle, const TextLine::ClipContext& context, SkScalar baseline) { in paint() argument 59 … calculateGaps(context, SkRect::MakeXYWH(left, y, width, fThickness), baseline, fThickness); in paint() 61 … calculateGaps(context, SkRect::MakeXYWH(left, bottom, width, fThickness), baseline, fThickness); in paint() 74 calculateGaps(context, SkRect::MakeXYWH(left, y, width, fThickness), baseline, 0); in paint() 84 … calculateGaps(context, SkRect::MakeXYWH(left, y, width, fThickness), baseline, fThickness); in paint() 96 SkScalar baseline, SkScalar halo) { in calculateGaps() argument 111 const SkScalar bounds[2] = {rect.fTop - baseline, rect.fBottom - baseline}; in calculateGaps()
|
/external/rust/crates/criterion/src/analysis/ |
D | mod.rs | 50 if let Baseline::Compare = criterion.baseline { in common() 64 if let Some(baseline) = &criterion.load_baseline { in common() 67 sample_path.push(baseline); in common() 74 base = baseline, err = err in common() 251 if let Baseline::Save = criterion.baseline { in common() 261 fn base_dir_exists(id: &BenchmarkId, baseline: &str, output_directory: &Path) -> bool { in base_dir_exists() 264 base_dir.push(baseline); in base_dir_exists() 339 fn copy_new_dir_to_base(id: &str, baseline: &str, output_directory: &Path) { in copy_new_dir_to_base() 341 let base_dir = root_dir.join(baseline); in copy_new_dir_to_base()
|
/external/rust/crates/grpcio-sys/grpc/tools/profiling/microbenchmarks/bm_diff/ |
D | README.md | 6 a baseline commit, then quickly compare data from your working branch to that 7 baseline data to see if you have made any performance wins. 57 `tools/profiling/microbenchmarks/bm_diff/bm_build.py -b bm_error -n baseline` 71 `tools/profiling/microbenchmarks/bm_diff/bm_run.py -b bm_error -b baseline -l 5` 73 Then an example output file would be `bm_error.opt.baseline.0.json` 82 For example, assuming you had already built and run a 'baseline' microbenchmark 86 `tools/profiling/microbenchmarks/bm_diff/bm_diff.py -b bm_error -o baseline -n current -l 5` 110 unnecessary to build and run the baseline commit every time. That is why we 111 provide a different flag in case you are sure that the baseline benchmark has 113 of the baseline. This will only build and run the current branch. For example:
|
/external/grpc-grpc/tools/profiling/microbenchmarks/bm_diff/ |
D | README.md | 6 a baseline commit, then quickly compare data from your working branch to that 7 baseline data to see if you have made any performance wins. 57 `tools/profiling/microbenchmarks/bm_diff/bm_build.py -b bm_error -n baseline` 71 `tools/profiling/microbenchmarks/bm_diff/bm_run.py -b bm_error -b baseline -l 5` 73 Then an example output file would be `bm_error.opt.baseline.0.json` 82 For example, assuming you had already built and run a 'baseline' microbenchmark 86 `tools/profiling/microbenchmarks/bm_diff/bm_diff.py -b bm_error -o baseline -n current -l 5` 110 unnecessary to build and run the baseline commit every time. That is why we 111 provide a different flag in case you are sure that the baseline benchmark has 113 of the baseline. This will only build and run the current branch. For example:
|