• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/python
2
3import optparse
4import sys
5import sqlite3
6import scipy.stats
7import numpy
8
9import adbutil
10from devices import DEVICES
11
12DB_PATH="/data/data/com.android.benchmark/databases/BenchmarkResults"
13OUT_PATH = "db/"
14
15QUERY_BAD_FRAME = ("select run_id, name, total_duration from ui_results "
16                   "where total_duration >=12 order by run_id, name")
17QUERY_PERCENT_JANK = ("select run_id, name, sum(jank_frame) as jank_count, count (*) as total "
18                      "from ui_results group by run_id, name")
19
20class IterationResult:
21    def __init__(self):
22        self.durations = []
23        self.jank_count = 0
24        self.total_count = 0
25
26
27def get_scoremap(dbpath):
28    db = sqlite3.connect(dbpath)
29    rows = db.execute(QUERY_BAD_FRAME)
30
31    scoremap = {}
32    for row in rows:
33        run_id = row[0]
34        name = row[1]
35        total_duration = row[2]
36
37        if not run_id in scoremap:
38            scoremap[run_id] = {}
39
40        if not name in scoremap[run_id]:
41            scoremap[run_id][name] = IterationResult()
42
43
44        scoremap[run_id][name].durations.append(float(total_duration))
45
46    for row in db.execute(QUERY_PERCENT_JANK):
47        run_id = row[0]
48        name = row[1]
49        jank_count = row[2]
50        total_count = row[3]
51
52        if run_id in scoremap.keys() and name in scoremap[run_id].keys():
53            scoremap[run_id][name].jank_count = long(jank_count)
54            scoremap[run_id][name].total_count = long(total_count)
55
56
57    db.close()
58    return scoremap
59
60def score_device(name, serial, pull = False, verbose = False):
61    dbpath = OUT_PATH + name + ".db"
62
63    if pull:
64        adbutil.root(serial)
65        adbutil.pull(serial, DB_PATH, dbpath)
66
67    scoremap = None
68    try:
69        scoremap = get_scoremap(dbpath)
70    except sqlite3.DatabaseError:
71        print "Database corrupt, fetching..."
72        adbutil.root(serial)
73        adbutil.pull(serial, DB_PATH, dbpath)
74        scoremap = get_scoremap(dbpath)
75
76    per_test_score = {}
77    per_test_sample_count = {}
78    global_overall = {}
79
80    for run_id in iter(scoremap):
81        overall = []
82        if len(scoremap[run_id]) < 1:
83            if verbose:
84                print "Skipping short run %s" % run_id
85            continue
86        print "Run: %s" % run_id
87        for test in iter(scoremap[run_id]):
88            if verbose:
89                print "\t%s" % test
90            scores = []
91            sample_count = 0
92            res = scoremap[run_id][test]
93            stddev = numpy.std(res.durations)
94            mean = numpy.mean(res.durations)
95            sample_count = len(res.durations)
96            pj = 100 * res.jank_count / float(res.total_count)
97            score = stddev * mean *pj
98            if score == 0:
99                score = 1
100            scores.append(score)
101            if verbose:
102                print "\tScore = %f x %f x %f = %f (%d samples)" % (stddev, mean, pj, score, len(res.durations))
103
104            geo_run = scipy.stats.gmean(scores)
105            if test not in per_test_score:
106                per_test_score[test] = []
107
108            if test not in per_test_sample_count:
109                per_test_sample_count[test] = []
110
111            per_test_score[test].append(geo_run)
112            per_test_sample_count[test].append(int(sample_count))
113            overall.append(geo_run)
114
115            if not verbose:
116                print "\t%s:\t%0.2f (%0.2f avg. sample count)" % (test, geo_run, sample_count)
117            else:
118                print "\tOverall:\t%0.2f (%0.2f avg. sample count)" % (geo_run, sample_count)
119                print ""
120
121        global_overall[run_id] = scipy.stats.gmean(overall)
122        print "Run Overall: %f" % global_overall[run_id]
123        print ""
124
125    print ""
126    print "Variability (CV) - %s:" % name
127
128    for test in per_test_score:
129        print "\t%s:\t%0.2f%% (%0.2f avg sample count)" % (test, 100 * scipy.stats.variation(per_test_score[test]), numpy.mean(per_test_sample_count[test]))
130
131    print "\tOverall: %0.2f%%" % (100 * scipy.stats.variation([x for x in global_overall.values()]))
132    print ""
133
134def parse_options(argv):
135    usage = 'Usage: %prog [options]'
136    desc = 'Example: %prog'
137    parser = optparse.OptionParser(usage=usage, description=desc)
138    parser.add_option("-p", dest='pull', action="store_true")
139    parser.add_option("-d", dest='device', action="store")
140    parser.add_option("-v", dest='verbose', action="store_true")
141    options, categories = parser.parse_args(argv[1:])
142    return options
143
144def main():
145    options = parse_options(sys.argv)
146    if options.device != None:
147        score_device(options.device, DEVICES[options.device], options.pull, options.verbose)
148    else:
149        for name, serial in DEVICES.iteritems():
150            print "======== %s =========" % name
151            score_device(name, serial, options.pull, options.verbose)
152
153if __name__ == "__main__":
154    main()
155