/external/toolchain-utils/crosperf/ |
D | experiment_runner.py | 32 def _WriteJSONReportToFile(experiment, results_dir, json_report): argument 34 has_llvm = any('llvm' in l.compiler for l in experiment.labels) 36 board = experiment.labels[0].board 57 experiment, argument 62 self._experiment = experiment 63 self.l = log or logger.GetLogger(experiment.log_dir) 68 if experiment.log_level != 'verbose': 121 def _LockAllMachines(self, experiment): argument 133 experiment.locked_machines = self.locked_machines 135 experiment.lock_mgr = lock_machine.LockManager( [all …]
|
D | experiment_status.py | 19 def __init__(self, experiment): argument 20 self.experiment = experiment 21 self.num_total = len(self.experiment.benchmark_runs) 24 self.log_level = experiment.log_level 40 if self.experiment.start_time: 41 elapsed_time = current_time - self.experiment.start_time 45 if self.completed != self.experiment.num_complete: 46 self.completed = self.experiment.num_complete 71 float(self.num_total - self.experiment.num_complete - 1) * 72 time_completed_jobs / self.experiment.num_run_complete + [all …]
|
D | results_report.py | 336 def __init__(self, results, email=False, experiment=None): argument 339 self.experiment = experiment 354 def FromExperiment(experiment, email=False): argument 355 results = BenchmarkResults.FromExperiment(experiment) 356 return TextResultsReport(results, email, experiment) 366 for benchmark_run in self.experiment.benchmark_runs: 382 for dut in self.experiment.machine_manager.GetMachines(): 389 experiment = self.experiment 392 if experiment is not None: 393 title_contents = "Results report for '%s'" % (experiment.name,) [all …]
|
D | results_report_unittest.py | 106 experiment = ExperimentFactory().GetExperiment(efile, 109 for label in experiment.labels: 111 return experiment 114 def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0): argument 118 num_configs = len(experiment.benchmarks) * len(experiment.labels) 119 num_runs = len(experiment.benchmark_runs) // num_configs 123 bench = experiment.benchmarks[for_benchmark] 144 for label in experiment.labels: 145 experiment.benchmark_runs.extend( 147 return experiment [all …]
|
D | crosperf.py | 65 def Cleanup(experiment): argument 67 experiment.Cleanup() 120 experiment = ExperimentFactory().GetExperiment(experiment_file, 126 atexit.register(Cleanup, experiment) 129 runner = MockExperimentRunner(experiment, json_report) 132 experiment, json_report, using_schedv2=(not options.noschedv2))
|
/external/caliper/caliper/src/main/java/com/google/caliper/runner/ |
D | TrialModule.java | 44 private final Experiment experiment; field in TrialModule 46 TrialModule(UUID trialId, int trialNumber, Experiment experiment) { in TrialModule() argument 49 this.experiment = experiment; in TrialModule() 69 return experiment; in provideExperiment() 74 static BenchmarkSpec provideBenchmarkSpec(Experiment experiment) { in provideBenchmarkSpec() argument 76 .className(experiment.instrumentation().benchmarkMethod().getDeclaringClass().getName()) in provideBenchmarkSpec() 77 .methodName(experiment.instrumentation().benchmarkMethod().getName()) in provideBenchmarkSpec() 78 .addAllParameters(experiment.userParameters()) in provideBenchmarkSpec() 91 static MeasurementCollectingVisitor provideMeasurementCollectingVisitor(Experiment experiment) { in provideMeasurementCollectingVisitor() argument 92 return experiment.instrumentation().getMeasurementCollectingVisitor(); in provideMeasurementCollectingVisitor() [all …]
|
D | ScheduledTrial.java | 27 private final Experiment experiment; field in ScheduledTrial 30 @Inject ScheduledTrial(Experiment experiment, TrialRunLoop runLoop, in ScheduledTrial() argument 33 this.experiment = experiment; in ScheduledTrial() 41 Experiment experiment() { in experiment() method in ScheduledTrial 42 return experiment; in experiment()
|
D | TrialResult.java | 25 private final Experiment experiment; field in TrialResult 28 TrialResult(Trial trial, Experiment experiment, ImmutableList<String> trialMessages) { in TrialResult() argument 30 this.experiment = experiment; in TrialResult() 35 return experiment; in getExperiment()
|
D | TrialOutputLogger.java | 43 private final Experiment experiment; field in TrialOutputLogger 48 @TrialId UUID trialId, Experiment experiment) { in TrialOutputLogger() argument 52 this.experiment = experiment; in TrialOutputLogger() 81 writer.println("Experiment: " + experiment); in printHeader()
|
D | ExperimentingCaliperRun.java | 103 @Override public String apply(Experiment experiment) { in run() 104 return experiment.instrumentation().benchmarkMethod().getName(); in run() 243 for (Experiment experiment : experimentsToRun) { in createScheduledTrials() 246 new TrialModule(UUID.randomUUID(), trialNumber, experiment)); in createScheduledTrials() 264 for (Experiment experiment : experiments) { in dryRun() 267 mainComponent.newExperimentComponent(ExperimentModule.forExperiment(experiment)); in dryRun() 271 experiment.instrumentation().dryRun(benchmark); in dryRun() 272 builder.add(experiment); in dryRun()
|
D | ExperimentModule.java | 47 public static ExperimentModule forExperiment(Experiment experiment) { in forExperiment() argument 48 Method benchmarkMethod = experiment.instrumentation().benchmarkMethod(); in forExperiment() 51 experiment.userParameters()); in forExperiment()
|
D | WorkerProcess.java | 79 Experiment experiment, in WorkerProcess() argument 86 buildProcess(trialId, experiment, benchmarkSpec, localPort, benchmarkClass); in WorkerProcess() 145 Experiment experiment, in buildProcess() argument 150 Instrumentation instrumentation = experiment.instrumentation(); in buildProcess() 164 VirtualMachine vm = experiment.vm(); in buildProcess()
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | optimize_dataset_op.cc | 101 string experiment = pair.first; in MakeDataset() local 102 if (std::find(optimizations.begin(), optimizations.end(), experiment) != in MakeDataset() 104 VLOG(1) << "The live experiment \"" << experiment << "\" is applied."; in MakeDataset() 105 metrics::RecordTFDataExperiment(experiment); in MakeDataset() 118 for (auto& experiment : graduated_experiments) { in MakeDataset() local 119 if (std::find(optimizations.begin(), optimizations.end(), experiment) == in MakeDataset() 121 optimizations.push_back(experiment); in MakeDataset() 123 VLOG(1) << "The graduated experiment \"" << experiment << "\" is applied."; in MakeDataset()
|
/external/webrtc/video/adaptation/ |
D | quality_rampup_experiment_helper.cc | 23 QualityRampupExperiment experiment) in QualityRampUpExperimentHelper() argument 26 quality_rampup_experiment_(std::move(experiment)), in QualityRampUpExperimentHelper() 37 QualityRampupExperiment experiment = QualityRampupExperiment::ParseSettings(); in CreateIfEnabled() local 38 if (experiment.Enabled()) { in CreateIfEnabled() 41 experiment)); in CreateIfEnabled()
|
D | quality_rampup_experiment_helper.h | 58 QualityRampupExperiment experiment);
|
/external/toolchain-utils/crosperf/experiment_files/ |
D | README.md | 3 To use these experiment files, replace the board, remote and images 9 The final experiment file should look something like the following (but with 27 # Paste experiment benchmarks here. Example, I pasted 30 # This experiment just runs a short autotest which measures the performance
|
/external/perfetto/test/trace_processor/profiling/ |
D | stack_profile_symbols.out | 4 "(anonymous namespace)::OtherFn(unsigned int, unsigned long)","/builds/master/experiment/external/p… 5 "main","/builds/master/experiment/external/perfetto/out/linux_clang_release/../../src/profiling/mem…
|
/external/libaom/libaom/build/cmake/ |
D | aom_config_defaults.cmake | 112 # AV1 experiment flags. 113 set_aom_config_var(CONFIG_SPEED_STATS 0 "AV1 experiment flag.") 114 set_aom_config_var(CONFIG_COLLECT_RD_STATS 0 "AV1 experiment flag.") 115 set_aom_config_var(CONFIG_DIST_8X8 0 "AV1 experiment flag.") 116 set_aom_config_var(CONFIG_ENTROPY_STATS 0 "AV1 experiment flag.") 117 set_aom_config_var(CONFIG_INTER_STATS_ONLY 0 "AV1 experiment flag.") 118 set_aom_config_var(CONFIG_RD_DEBUG 0 "AV1 experiment flag.") 119 set_aom_config_var(CONFIG_SHARP_SETTINGS 0 "AV1 experiment flag.")
|
/external/toolchain-utils/bestflags/ |
D | example_algorithms.py | 141 for experiment in experiments: 142 if experiment == 'GA': 144 _ProcessGA(experiments[experiment])
|
/external/caliper/caliper/src/test/java/com/google/caliper/runner/ |
D | WorkerProcessTest.java | 83 Experiment experiment = new Experiment( in simpleArgsTest() local 91 ProcessBuilder builder = createProcess(experiment, spec); in simpleArgsTest() 163 private ProcessBuilder createProcess(Experiment experiment, BenchmarkSpec benchmarkSpec) { in createProcess() argument 164 return WorkerProcess.buildProcess(TRIAL_ID, experiment, benchmarkSpec, PORT_NUMBER, in createProcess()
|
/external/toolchain-utils/bestflags/examples/omnetpp/ |
D | README.md | 6 information to run the experiment. 13 the experiment will try.
|
/external/llvm-project/llvm/test/Instrumentation/AddressSanitizer/ |
D | experiment.ll | 2 ; -asan-force-experiment flag turns all memory accesses into experiments. 3 ; RUN: opt < %s -asan -asan-module -enable-new-pm=0 -asan-force-experiment=42 -S | FileCheck %s 4 ; RUN: opt < %s -passes='asan-pipeline' -asan-force-experiment=42 -S | FileCheck %s
|
D | experiment-call.ll | 2 ; -asan-force-experiment flag turns all memory accesses into experiments. 3 ; RUN: opt < %s -asan -asan-module -enable-new-pm=0 -asan-force-experiment=42 -asan-instrumentation… 4 ; RUN: opt < %s -passes='asan-pipeline' -asan-force-experiment=42 -asan-instrumentation-with-call-t…
|
/external/llvm/test/Instrumentation/AddressSanitizer/ |
D | experiment-call.ll | 2 ; -asan-force-experiment flag turns all memory accesses into experiments. 3 ; RUN: opt < %s -asan -asan-module -asan-force-experiment=42 -asan-instrumentation-with-call-thresh…
|
D | experiment.ll | 2 ; -asan-force-experiment flag turns all memory accesses into experiments. 3 ; RUN: opt < %s -asan -asan-module -asan-force-experiment=42 -S | FileCheck %s
|