Home
last modified time | relevance | path

Searched refs:benchmark (Results 1 – 25 of 533) sorted by relevance

12345678910>>...22

/external/toolchain-utils/
Dupdate_telemetry_defaults.py46 def ListCurrentDefaults(self, benchmark=all): argument
52 if benchmark == all:
59 elif benchmark in self._defaults:
60 results = self._defaults[benchmark]
61 out_str = benchmark + ' : '
66 print("Error: Unrecognized benchmark '%s'" % benchmark)
68 def AddDefault(self, benchmark, result): argument
69 if benchmark in self._defaults:
70 resultList = self._defaults[benchmark]
74 self._defaults[benchmark] = resultList
[all …]
/external/libcxx/benchmarks/
DContainerBenchmarks.hpp12 void BM_ConstructIterIter(benchmark::State& st, Container, GenInputs gen) { in BM_ConstructIterIter()
16 benchmark::DoNotOptimize(&in); in BM_ConstructIterIter()
19 benchmark::DoNotOptimize(c.data()); in BM_ConstructIterIter()
24 void BM_InsertValue(benchmark::State& st, Container c, GenInputs gen) { in BM_InsertValue()
30 benchmark::DoNotOptimize(&(*c.insert(*it).first)); in BM_InsertValue()
32 benchmark::ClobberMemory(); in BM_InsertValue()
37 void BM_InsertValueRehash(benchmark::State& st, Container c, GenInputs gen) { in BM_InsertValueRehash()
44 benchmark::DoNotOptimize(&(*c.insert(*it).first)); in BM_InsertValueRehash()
46 benchmark::ClobberMemory(); in BM_InsertValueRehash()
52 void BM_InsertDuplicate(benchmark::State& st, Container c, GenInputs gen) { in BM_InsertDuplicate()
[all …]
Dfilesystem.bench.cpp13 void BM_PathConstructString(benchmark::State &st, GenInputs gen) { in BM_PathConstructString()
19 benchmark::DoNotOptimize(PP.native().data()); in BM_PathConstructString()
22 benchmark::DoNotOptimize(P.native().data()); in BM_PathConstructString()
30 void BM_PathConstructCStr(benchmark::State &st, GenInputs gen) { in BM_PathConstructCStr()
36 benchmark::DoNotOptimize(PP.native().data()); in BM_PathConstructCStr()
39 benchmark::DoNotOptimize(P.native().data()); in BM_PathConstructCStr()
47 void BM_PathConstructIter(benchmark::State &st, GenInputs gen) { in BM_PathConstructIter()
56 benchmark::DoNotOptimize(PP.native().data()); in BM_PathConstructIter()
57 benchmark::DoNotOptimize(Start); in BM_PathConstructIter()
58 benchmark::DoNotOptimize(End); in BM_PathConstructIter()
[all …]
Dutil_smartptr.bench.cpp14 static void BM_SharedPtrCreateDestroy(benchmark::State& st) { in BM_SharedPtrCreateDestroy()
17 benchmark::DoNotOptimize(sp.get()); in BM_SharedPtrCreateDestroy()
22 static void BM_SharedPtrIncDecRef(benchmark::State& st) { in BM_SharedPtrIncDecRef()
24 benchmark::DoNotOptimize(sp.get()); in BM_SharedPtrIncDecRef()
27 benchmark::ClobberMemory(); in BM_SharedPtrIncDecRef()
32 static void BM_WeakPtrIncDecRef(benchmark::State& st) { in BM_WeakPtrIncDecRef()
34 benchmark::DoNotOptimize(sp.get()); in BM_WeakPtrIncDecRef()
37 benchmark::ClobberMemory(); in BM_WeakPtrIncDecRef()
/external/libcxx/utils/google-benchmark/test/
Dbasic_test.cc6 void BM_empty(benchmark::State& state) { in BM_empty()
8 benchmark::DoNotOptimize(state.iterations()); in BM_empty()
14 void BM_spin_empty(benchmark::State& state) { in BM_spin_empty()
17 benchmark::DoNotOptimize(x); in BM_spin_empty()
24 void BM_spin_pause_before(benchmark::State& state) { in BM_spin_pause_before()
26 benchmark::DoNotOptimize(i); in BM_spin_pause_before()
30 benchmark::DoNotOptimize(i); in BM_spin_pause_before()
37 void BM_spin_pause_during(benchmark::State& state) { in BM_spin_pause_during()
41 benchmark::DoNotOptimize(i); in BM_spin_pause_during()
45 benchmark::DoNotOptimize(i); in BM_spin_pause_during()
[all …]
Dregister_benchmark_test.cc11 class TestReporter : public benchmark::ConsoleReporter {
29 typedef benchmark::BenchmarkReporter::Run Run;
58 typedef benchmark::internal::Benchmark* ReturnVal;
63 void BM_function(benchmark::State& state) { in BM_function()
68 ReturnVal dummy = benchmark::RegisterBenchmark(
79 void BM_extra_args(benchmark::State& st, const char* label) { in BM_extra_args()
88 benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second); in RegisterFromFunction()
101 void operator()(benchmark::State& st) { in operator ()()
111 benchmark::RegisterBenchmark("custom_fixture", fx); in TestRegistrationAtRuntime()
118 auto capturing_lam = [=](benchmark::State& st) { in TestRegistrationAtRuntime()
[all …]
Ddonotoptimize_test.cc32 benchmark::DoNotOptimize(buffer8); in main()
35 benchmark::DoNotOptimize(buffer20); in main()
38 benchmark::DoNotOptimize(buffer1024); in main()
39 benchmark::DoNotOptimize(&buffer1024[0]); in main()
42 benchmark::DoNotOptimize(x); in main()
43 benchmark::DoNotOptimize(&x); in main()
44 benchmark::DoNotOptimize(x += 42); in main()
46 benchmark::DoNotOptimize(double_up(x)); in main()
49 benchmark::DoNotOptimize(BitRef::Make()); in main()
51 benchmark::DoNotOptimize(lval); in main()
Dreporter_output_test.cc29 auto const& Caches = benchmark::CPUInfo::Get().caches; in AddContextCases()
57 void BM_basic(benchmark::State& state) { in BM_basic()
76 void BM_bytes_per_second(benchmark::State& state) { in BM_bytes_per_second()
98 void BM_items_per_second(benchmark::State& state) { in BM_items_per_second()
120 void BM_label(benchmark::State& state) { in BM_label()
142 void BM_error(benchmark::State& state) { in BM_error()
160 void BM_no_arg_name(benchmark::State& state) { in BM_no_arg_name()
173 void BM_arg_name(benchmark::State& state) { in BM_arg_name()
186 void BM_arg_names(benchmark::State& state) { in BM_arg_names()
200 void BM_Complexity_O1(benchmark::State& state) { in BM_Complexity_O1()
[all …]
Dbenchmark_test.cc54 static void BM_Factorial(benchmark::State& state) { in BM_Factorial()
65 static void BM_CalculatePiRange(benchmark::State& state) { in BM_CalculatePiRange()
74 static void BM_CalculatePi(benchmark::State& state) { in BM_CalculatePi()
77 benchmark::DoNotOptimize(CalculatePi(depth)); in BM_CalculatePi()
84 static void BM_SetInsert(benchmark::State& state) { in BM_SetInsert()
102 static void BM_Sequential(benchmark::State& state) { in BM_Sequential()
120 static void BM_StringCompare(benchmark::State& state) { in BM_StringCompare()
123 for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2)); in BM_StringCompare()
127 static void BM_SetupTeardown(benchmark::State& state) { in BM_SetupTeardown()
147 static void BM_LongTest(benchmark::State& state) { in BM_LongTest()
[all …]
/external/google-benchmark/test/
Dbasic_test.cc6 void BM_empty(benchmark::State& state) { in BM_empty()
8 benchmark::DoNotOptimize(state.iterations()); in BM_empty()
14 void BM_spin_empty(benchmark::State& state) { in BM_spin_empty()
17 benchmark::DoNotOptimize(x); in BM_spin_empty()
24 void BM_spin_pause_before(benchmark::State& state) { in BM_spin_pause_before()
26 benchmark::DoNotOptimize(i); in BM_spin_pause_before()
30 benchmark::DoNotOptimize(i); in BM_spin_pause_before()
37 void BM_spin_pause_during(benchmark::State& state) { in BM_spin_pause_during()
41 benchmark::DoNotOptimize(i); in BM_spin_pause_during()
45 benchmark::DoNotOptimize(i); in BM_spin_pause_during()
[all …]
Dregister_benchmark_test.cc11 class TestReporter : public benchmark::ConsoleReporter {
29 typedef benchmark::BenchmarkReporter::Run Run;
58 typedef benchmark::internal::Benchmark* ReturnVal;
63 void BM_function(benchmark::State& state) { in BM_function()
68 ReturnVal dummy = benchmark::RegisterBenchmark(
79 void BM_extra_args(benchmark::State& st, const char* label) { in BM_extra_args()
88 benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second); in RegisterFromFunction()
101 void operator()(benchmark::State& st) { in operator ()()
111 benchmark::RegisterBenchmark("custom_fixture", fx); in TestRegistrationAtRuntime()
118 auto capturing_lam = [=](benchmark::State& st) { in TestRegistrationAtRuntime()
[all …]
Ddonotoptimize_test.cc32 benchmark::DoNotOptimize(buffer8); in main()
35 benchmark::DoNotOptimize(buffer20); in main()
38 benchmark::DoNotOptimize(buffer1024); in main()
39 benchmark::DoNotOptimize(&buffer1024[0]); in main()
42 benchmark::DoNotOptimize(x); in main()
43 benchmark::DoNotOptimize(&x); in main()
44 benchmark::DoNotOptimize(x += 42); in main()
46 benchmark::DoNotOptimize(double_up(x)); in main()
49 benchmark::DoNotOptimize(BitRef::Make()); in main()
51 benchmark::DoNotOptimize(lval); in main()
Dreporter_output_test.cc29 auto const& Caches = benchmark::CPUInfo::Get().caches; in AddContextCases()
57 void BM_basic(benchmark::State& state) { in BM_basic()
76 void BM_bytes_per_second(benchmark::State& state) { in BM_bytes_per_second()
98 void BM_items_per_second(benchmark::State& state) { in BM_items_per_second()
120 void BM_label(benchmark::State& state) { in BM_label()
142 void BM_error(benchmark::State& state) { in BM_error()
160 void BM_no_arg_name(benchmark::State& state) { in BM_no_arg_name()
173 void BM_arg_name(benchmark::State& state) { in BM_arg_name()
186 void BM_arg_names(benchmark::State& state) { in BM_arg_names()
200 void BM_Complexity_O1(benchmark::State& state) { in BM_Complexity_O1()
[all …]
Dbenchmark_test.cc54 static void BM_Factorial(benchmark::State& state) { in BM_Factorial()
65 static void BM_CalculatePiRange(benchmark::State& state) { in BM_CalculatePiRange()
74 static void BM_CalculatePi(benchmark::State& state) { in BM_CalculatePi()
77 benchmark::DoNotOptimize(CalculatePi(depth)); in BM_CalculatePi()
84 static void BM_SetInsert(benchmark::State& state) { in BM_SetInsert()
102 static void BM_Sequential(benchmark::State& state) { in BM_Sequential()
120 static void BM_StringCompare(benchmark::State& state) { in BM_StringCompare()
123 for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2)); in BM_StringCompare()
127 static void BM_SetupTeardown(benchmark::State& state) { in BM_SetupTeardown()
147 static void BM_LongTest(benchmark::State& state) { in BM_LongTest()
[all …]
/external/eigen/bench/
Dbenchmark_suite4 $CXX -O3 -I .. -DNDEBUG benchmark.cpp -o benchmark && time ./benchmark >/dev/null
6 $CXX -O3 -I .. benchmark.cpp -o benchmark && time ./benchmark >/dev/null
8 $CXX -O3 -I .. -DEIGEN_DEFAULT_TO_ROW_MAJOR -DNDEBUG benchmark.cpp -o benchmark && time ./benchmark
10 $CXX -O3 -I .. -DEIGEN_DEFAULT_TO_ROW_MAJOR benchmark.cpp -o benchmark && time ./benchmark >/dev/nu…
/external/autotest/tko/
Dmachine_benchmark.cgi27 benchmark = row[0]
28 testname = re.sub(r'\..*', '', benchmark)
29 all_benchmarks.append(benchmark)
33 for benchmark in all_benchmarks:
35 where_tests = { 'subdir': benchmark, 'status_word' : 'GOOD' }
42 available_params.add("%s - %s" % (benchmark,
86 for benchmark in benchmark_key:
88 where = { 'subdir': benchmark, 'status_word' : 'GOOD' }
94 benchmark_data[benchmark] = data
100 for benchmark in benchmark_key:
[all …]
/external/google-benchmark/
DREADME.md1 # benchmark chapter
2 …uild Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/goog…
3 …7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master)
4 …age Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/
8 Discussion group: https://groups.google.com/d/forum/benchmark-discuss
22 $ git clone https://github.com/google/benchmark.git
24 $ git clone https://github.com/google/googletest.git benchmark/googletest
26 $ cmake -G <generator> [options] ../benchmark
34 * Checkout the GTest sources into `benchmark/googletest`.
56 git clone https://github.com/google/benchmark.git
[all …]
/external/libcxx/utils/google-benchmark/
DREADME.md1 # benchmark chapter
2 …uild Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/goog…
3 …7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master)
4 …age Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/
9 Discussion group: https://groups.google.com/d/forum/benchmark-discuss
23 $ git clone https://github.com/google/benchmark.git
25 $ git clone https://github.com/google/googletest.git benchmark/googletest
27 $ cmake -G <generator> [options] ../benchmark
35 * Checkout the GTest sources into `benchmark/googletest`.
60 git clone https://github.com/google/benchmark.git
[all …]
/external/toolchain-utils/crosperf/
Dbenchmark_run.py32 def __init__(self, name, benchmark, label, iteration, cache_conditions, argument
38 self.benchmark = benchmark
52 self.test_args = benchmark.test_args
70 self.benchmark.test_name, self.iteration, self.test_args,
74 self.benchmark.suite, self.benchmark.show_all_results,
75 self.benchmark.run_local)
98 retval, self.benchmark.test_name, self.benchmark.suite)
177 if self.benchmark.perf_args and self.benchmark.suite == 'telemetry':
179 self.benchmark.perf_args = ''
181 if self.benchmark.perf_args and self.benchmark.suite == 'test_that':
[all …]
Dsuite_runner.py60 def Run(self, machine, label, benchmark, test_args, profiler_args): argument
61 for i in range(0, benchmark.retries + 1):
63 if benchmark.suite == 'telemetry':
65 ret_tup = self.Telemetry_Run(machine, label, benchmark, profiler_args)
66 elif benchmark.suite == 'telemetry_Crosperf':
68 ret_tup = self.Telemetry_Crosperf_Run(machine, label, benchmark,
71 ret_tup = self.Test_That_Run(machine, label, benchmark, test_args,
75 (benchmark.name, benchmark.retries - i))
78 (benchmark.name, i))
82 'benchmark %s succeded on first try' % benchmark.name)
[all …]
/external/libcxx/utils/google-benchmark/docs/
Dtools.md9 $ compare_bench.py <old-benchmark> <new-benchmark> [benchmark options]...
12benchmark>` and `<new-benchmark>` either specify a benchmark executable file, or a JSON output fil…
14 `[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binar…
39 When a benchmark executable is run, the raw output from the benchmark is printed in real time to st…
85 $ compare.py benchmarks <benchmark_baseline> <benchmark_contender> [benchmark options]...
87benchmark executable file, or a JSON output file. The type of the input file is automatically dete…
89 `[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binar…
141benchmark from the first run it looks for the benchmark with exactly the same name in the second r…
144 2. Compare two different filters of one benchmark
148 $ compare.py filters <benchmark> <filter_baseline> <filter_contender> [benchmark options]...
[all …]
/external/google-benchmark/docs/
Dtools.md9 $ compare_bench.py <old-benchmark> <new-benchmark> [benchmark options]...
12benchmark>` and `<new-benchmark>` either specify a benchmark executable file, or a JSON output fil…
14 `[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binar…
39 When a benchmark executable is run, the raw output from the benchmark is printed in real time to st…
85 $ compare.py benchmarks <benchmark_baseline> <benchmark_contender> [benchmark options]...
87benchmark executable file, or a JSON output file. The type of the input file is automatically dete…
89 `[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binar…
141benchmark from the first run it looks for the benchmark with exactly the same name in the second r…
144 2. Compare two different filters of one benchmark
148 $ compare.py filters <benchmark> <filter_baseline> <filter_contender> [benchmark options]...
[all …]
/external/conscrypt/
Dsettings.gradle6 include ":conscrypt-benchmark-android"
7 include ":conscrypt-benchmark-base"
8 include ":conscrypt-benchmark-graphs"
9 include ":conscrypt-benchmark-jmh"
21 project(':conscrypt-benchmark-android').projectDir = "$rootDir/benchmark-android" as File
22 project(':conscrypt-benchmark-base').projectDir = "$rootDir/benchmark-base" as File
23 project(':conscrypt-benchmark-graphs').projectDir = "$rootDir/benchmark-graphs" as File
24 project(':conscrypt-benchmark-jmh').projectDir = "$rootDir/benchmark-jmh" as File
/external/autotest/client/site_tests/telemetry_AFDOGenerateClient/
Dtelemetry_AFDOGenerateClient.py77 for benchmark in PAGE_CYCLER_BENCHMARKS:
78 self._try_page_cycler(cr, benchmark)
80 def _try_page_cycler(self, cr, benchmark): argument
90 lambda: self._navigate_page_cycler(cr, benchmark)):
91 logging.info('Browser died while navigating %s', benchmark)
96 def _navigate_page_cycler(self, cr, benchmark): argument
110 logging.info('Navigating to page cycler %s', benchmark)
112 benchmark_start_page = PC_START_PAGE % benchmark
119 benchmark, end_time - start_time)
123 benchmark, end_time - start_time, str(unk_exc))
/external/autotest/server/site_tests/telemetry_AFDOGenerate/
Dtelemetry_AFDOGenerate.py106 benchmark = benchmark_info[0]
109 self._run_test_with_retry(benchmark, *args)
115 benchmark)
186 def _run_test(self, benchmark, *args): argument
196 logging.info('Starting run for Telemetry benchmark %s', benchmark)
199 benchmark, None, *args)
202 benchmark, end_time - start_time)
207 benchmark, end_time - start_time, str(e))
215 logging.info('Benchmark %s succeeded', benchmark)
218 ' benchmark: %s' % benchmark)
[all …]

12345678910>>...22