Searched refs:score_name (Results 1 – 5 of 5) sorted by relevance
/external/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/ |
D | export.py | 121 for tab_index, score_name in enumerate(score_names): 126 self._FormatName(score_name))) 133 for tab_index, score_name in enumerate(score_names): 138 html.append(self._BuildScoreTab(score_name, ('s{}'.format(tab_index),))) 155 def _BuildScoreTab(self, score_name, anchor_data): argument 159 self._scores_data_frame.eval_score_name == score_name] 185 score_name, apm_config[0], test_data_gen_info[0], 190 score_name, test_data_gen_info[0], test_data_gen_info[1], 198 score_name, apm_configs, test_data_gen_configs, 203 def _BuildScoreTableCell(self, score_name, test_data_gen, argument [all …]
|
D | collect_data.py | 110 test_data_gen_name, score_name, args): argument 134 (score_name, args.eval_scores), 167 score_name) = _GetScoreDescriptors(score_filepath) 176 score_name, 185 score_name) 208 score_name,
|
/external/webrtc/modules/audio_processing/test/py_quality_assessment/ |
D | apm_quality_assessment_optimize.py | 81 for score_name in score_names: 82 scores = data_frame[data_frame.eval_score_name == score_name].score 83 normalization_constants[score_name] = max(scores) 94 for score_name in score_names: 96 score_name].score 97 scores[score_name] = sum(data_cell_scores) / len(data_cell_scores) 98 scores[score_name] /= normalization_constants[score_name]
|
D | apm_quality_assessment_boxplot.py | 54 def FilterScoresByParams(data_frame, filter_params, score_name, config_dir): argument 82 score_name]
|
D | README.md | 104 -v <score_name>
|