1# SPDX-License-Identifier: Apache-2.0 2# 3# Copyright (C) 2015, ARM Limited and contributors. 4# 5# Licensed under the Apache License, Version 2.0 (the "License"); you may 6# not use this file except in compliance with the License. 7# You may obtain a copy of the License at 8# 9# http://www.apache.org/licenses/LICENSE-2.0 10# 11# Unless required by applicable law or agreed to in writing, software 12# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14# See the License for the specific language governing permissions and 15# limitations under the License. 16# 17 18 19import argparse 20import fnmatch as fnm 21import json 22import math 23import numpy as np 24import os 25import re 26import sys 27import logging 28 29from collections import defaultdict 30from colors import TestColors 31from results import Results 32 33 34# By default compare all the possible combinations 35DEFAULT_COMPARE = [(r'base_', r'test_')] 36 37class Report(object): 38 39 40 def __init__(self, results_dir, compare=None, formats=['relative']): 41 self.results_json = results_dir + '/results.json' 42 self.results = {} 43 44 self.compare = [] 45 46 # Setup logging 47 self._log = logging.getLogger('Report') 48 49 # Parse results (if required) 50 if not os.path.isfile(self.results_json): 51 Results(results_dir) 52 53 # Load results from file (if already parsed) 54 self._log.info('Load results from [%s]...', 55 self.results_json) 56 with open(self.results_json) as infile: 57 self.results = json.load(infile) 58 59 # Setup configuration comparisons 60 if compare is None: 61 compare = DEFAULT_COMPARE 62 self._log.warning('Comparing all the possible combination') 63 for (base_rexp, test_rexp) in compare: 64 self._log.info('Configured regexps for comparisions ' 65 '(bases , tests): (%s, %s)', 66 base_rexp, test_rexp) 67 base_rexp = re.compile(base_rexp, re.DOTALL) 68 test_rexp = re.compile(test_rexp, re.DOTALL) 69 self.compare.append((base_rexp, test_rexp)) 70 71 # Report all supported workload classes 72 self.__rtapp_report(formats) 73 self.__default_report(formats) 74 75 ############################### REPORT RTAPP ############################### 76 77 def __rtapp_report(self, formats): 78 79 if 'rtapp' not in self.results.keys(): 80 self._log.debug('No RTApp workloads to report') 81 return 82 83 self._log.debug('Reporting RTApp workloads') 84 85 # Setup lables depending on requested report 86 if 'absolute' in formats: 87 nrg_lable = 'Energy Indexes (Absolute)' 88 prf_lable = 'Performance Indexes (Absolute)' 89 self._log.info('') 90 self._log.info('Absolute comparisions:') 91 print '' 92 else: 93 nrg_lable = 'Energy Indexes (Relative)' 94 prf_lable = 'Performance Indexes (Relative)' 95 self._log.info('') 96 self._log.info('Relative comparisions:') 97 print '' 98 99 # Dump headers 100 print '{:13s} {:20s} |'\ 101 ' {:33s} | {:54s} |'\ 102 .format('Test Id', 'Comparision', 103 nrg_lable, prf_lable) 104 print '{:13s} {:20s} |'\ 105 ' {:>10s} {:>10s} {:>10s} |'\ 106 ' {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} |'\ 107 .format('', '', 108 'LITTLE', 'big', 'Total', 109 'PerfIndex', 'NegSlacks', 'EDP1', 'EDP2', 'EDP3') 110 111 # For each test 112 _results = self.results['rtapp'] 113 for tid in sorted(_results.keys()): 114 new_test = True 115 # For each configuration... 116 for base_idx in sorted(_results[tid].keys()): 117 # Which matches at least on base regexp 118 for (base_rexp, test_rexp) in self.compare: 119 if not base_rexp.match(base_idx): 120 continue 121 # Look for a configuration which matches the test regexp 122 for test_idx in sorted(_results[tid].keys()): 123 if test_idx == base_idx: 124 continue 125 if new_test: 126 print '{:-<37s}+{:-<35s}+{:-<56s}+'\ 127 .format('','', '') 128 self.__rtapp_reference(tid, base_idx) 129 new_test = False 130 if test_rexp.match(test_idx) == None: 131 continue 132 self.__rtapp_compare(tid, base_idx, test_idx, formats) 133 134 print '' 135 136 def __rtapp_reference(self, tid, base_idx): 137 _results = self.results['rtapp'] 138 139 self._log.debug('Test %s: compare against [%s] base', 140 tid, base_idx) 141 res_line = '{0:12s}: {1:22s} | '.format(tid, base_idx) 142 143 # Dump all energy metrics 144 for cpus in ['LITTLE', 'big', 'Total']: 145 res_base = _results[tid][base_idx]['energy'][cpus]['avg'] 146 # Dump absolute values 147 res_line += ' {0:10.3f}'.format(res_base) 148 res_line += ' |' 149 150 # If available, dump also performance results 151 if 'performance' not in _results[tid][base_idx].keys(): 152 print res_line 153 return 154 155 for pidx in ['perf_avg', 'slack_pct', 'edp1', 'edp2', 'edp3']: 156 res_base = _results[tid][base_idx]['performance'][pidx]['avg'] 157 158 self._log.debug('idx: %s, base: %s', pidx, res_base) 159 160 if pidx in ['perf_avg']: 161 res_line += ' {0:s}'.format(TestColors.rate(res_base)) 162 continue 163 if pidx in ['slack_pct']: 164 res_line += ' {0:s}'.format( 165 TestColors.rate(res_base, positive_is_good = False)) 166 continue 167 if 'edp' in pidx: 168 res_line += ' {0:10.2e}'.format(res_base) 169 continue 170 res_line += ' |' 171 print res_line 172 173 def __rtapp_compare(self, tid, base_idx, test_idx, formats): 174 _results = self.results['rtapp'] 175 176 self._log.debug('Test %s: compare %s with %s', 177 tid, base_idx, test_idx) 178 res_line = '{0:12s}: {1:20s} | '.format(tid, test_idx) 179 180 # Dump all energy metrics 181 for cpus in ['LITTLE', 'big', 'Total']: 182 res_base = _results[tid][base_idx]['energy'][cpus]['avg'] 183 res_test = _results[tid][test_idx]['energy'][cpus]['avg'] 184 speedup_cnt = res_test - res_base 185 if 'absolute' in formats: 186 res_line += ' {0:10.2f}'.format(speedup_cnt) 187 else: 188 speedup_pct = 0 189 if res_base != 0: 190 speedup_pct = 100.0 * speedup_cnt / res_base 191 res_line += ' {0:s}'\ 192 .format(TestColors.rate( 193 speedup_pct, 194 positive_is_good = False)) 195 res_line += ' |' 196 197 # If available, dump also performance results 198 if 'performance' not in _results[tid][base_idx].keys(): 199 print res_line 200 return 201 202 for pidx in ['perf_avg', 'slack_pct', 'edp1', 'edp2', 'edp3']: 203 res_base = _results[tid][base_idx]['performance'][pidx]['avg'] 204 res_test = _results[tid][test_idx]['performance'][pidx]['avg'] 205 206 self._log.debug('idx: %s, base: %s, test: %s', 207 pidx, res_base, res_test) 208 209 if pidx in ['perf_avg']: 210 res_line += ' {0:s}'.format(TestColors.rate(res_test)) 211 continue 212 213 if pidx in ['slack_pct']: 214 res_line += ' {0:s}'.format( 215 TestColors.rate(res_test, positive_is_good = False)) 216 continue 217 218 # Compute difference base-vs-test 219 if 'edp' in pidx: 220 speedup_cnt = res_base - res_test 221 if 'absolute': 222 res_line += ' {0:10.2e}'.format(speedup_cnt) 223 else: 224 res_line += ' {0:s}'.format(TestColors.rate(speedup_pct)) 225 226 res_line += ' |' 227 print res_line 228 229 ############################### REPORT DEFAULT ############################# 230 231 def __default_report(self, formats): 232 233 # Build list of workload types which can be rendered using the default parser 234 wtypes = [] 235 for supported_wtype in DEFAULT_WTYPES: 236 if supported_wtype in self.results.keys(): 237 wtypes.append(supported_wtype) 238 239 if len(wtypes) == 0: 240 self._log.debug('No Default workloads to report') 241 return 242 243 self._log.debug('Reporting Default workloads') 244 245 # Setup lables depending on requested report 246 if 'absolute' in formats: 247 nrg_lable = 'Energy Indexes (Absolute)' 248 prf_lable = 'Performance Indexes (Absolute)' 249 self._log.info('') 250 self._log.info('Absolute comparisions:') 251 print '' 252 else: 253 nrg_lable = 'Energy Indexes (Relative)' 254 prf_lable = 'Performance Indexes (Relative)' 255 self._log.info('') 256 self._log.info('Relative comparisions:') 257 print '' 258 259 # Dump headers 260 print '{:9s} {:20s} |'\ 261 ' {:33s} | {:54s} |'\ 262 .format('Test Id', 'Comparision', 263 nrg_lable, prf_lable) 264 print '{:9s} {:20s} |'\ 265 ' {:>10s} {:>10s} {:>10s} |'\ 266 ' {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} |'\ 267 .format('', '', 268 'LITTLE', 'big', 'Total', 269 'Perf', 'CTime', 'EDP1', 'EDP2', 'EDP3') 270 271 # For each default test 272 for wtype in wtypes: 273 _results = self.results[wtype] 274 for tid in sorted(_results.keys()): 275 new_test = True 276 # For each configuration... 277 for base_idx in sorted(_results[tid].keys()): 278 # Which matches at least on base regexp 279 for (base_rexp, test_rexp) in self.compare: 280 if not base_rexp.match(base_idx): 281 continue 282 # Look for a configuration which matches the test regexp 283 for test_idx in sorted(_results[tid].keys()): 284 if test_idx == base_idx: 285 continue 286 if new_test: 287 print '{:-<37s}+{:-<35s}+{:-<56s}+'\ 288 .format('','', '') 289 new_test = False 290 if not test_rexp.match(test_idx): 291 continue 292 self.__default_compare(wtype, tid, base_idx, test_idx, formats) 293 294 print '' 295 296 def __default_compare(self, wtype, tid, base_idx, test_idx, formats): 297 _results = self.results[wtype] 298 299 self._log.debug('Test %s: compare %s with %s', 300 tid, base_idx, test_idx) 301 res_comp = '{0:s} vs {1:s}'.format(test_idx, base_idx) 302 res_line = '{0:8s}: {1:22s} | '.format(tid, res_comp) 303 304 # Dump all energy metrics 305 for cpus in ['LITTLE', 'big', 'Total']: 306 307 # If either base of test have a 0 MAX energy, this measn that 308 # energy has not been collected 309 base_max = _results[tid][base_idx]['energy'][cpus]['max'] 310 test_max = _results[tid][test_idx]['energy'][cpus]['max'] 311 if base_max == 0 or test_max == 0: 312 res_line += ' {0:10s}'.format('NA') 313 continue 314 315 # Otherwise, report energy values 316 res_base = _results[tid][base_idx]['energy'][cpus]['avg'] 317 res_test = _results[tid][test_idx]['energy'][cpus]['avg'] 318 319 speedup_cnt = res_test - res_base 320 if 'absolute' in formats: 321 res_line += ' {0:10.2f}'.format(speedup_cnt) 322 else: 323 speedup_pct = 100.0 * speedup_cnt / res_base 324 res_line += ' {0:s}'\ 325 .format(TestColors.rate( 326 speedup_pct, 327 positive_is_good = False)) 328 res_line += ' |' 329 330 # If available, dump also performance results 331 if 'performance' not in _results[tid][base_idx].keys(): 332 print res_line 333 return 334 335 for pidx in ['perf_avg', 'ctime_avg', 'edp1', 'edp2', 'edp3']: 336 res_base = _results[tid][base_idx]['performance'][pidx]['avg'] 337 res_test = _results[tid][test_idx]['performance'][pidx]['avg'] 338 339 self._log.debug('idx: %s, base: %s, test: %s', 340 pidx, res_base, res_test) 341 342 # Compute difference base-vs-test 343 speedup_cnt = 0 344 if res_base != 0: 345 if pidx in ['perf_avg']: 346 speedup_cnt = res_test - res_base 347 else: 348 speedup_cnt = res_base - res_test 349 350 # Compute speedup if required 351 speedup_pct = 0 352 if 'absolute' in formats: 353 if 'edp' in pidx: 354 res_line += ' {0:10.2e}'.format(speedup_cnt) 355 else: 356 res_line += ' {0:10.2f}'.format(speedup_cnt) 357 else: 358 if res_base != 0: 359 if pidx in ['perf_avg']: 360 # speedup_pct = 100.0 * speedup_cnt / res_base 361 speedup_pct = speedup_cnt 362 else: 363 speedup_pct = 100.0 * speedup_cnt / res_base 364 res_line += ' {0:s}'.format(TestColors.rate(speedup_pct)) 365 res_line += ' |' 366 print res_line 367 368# List of workload types which can be parsed using the default test parser 369DEFAULT_WTYPES = ['perf_bench_messaging', 'perf_bench_pipe'] 370 371#vim :set tabstop=4 shiftwidth=4 expandtab 372