1# Copyright 2014 The Chromium Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5"""This module implements a simple WSGI server for the memory_inspector Web UI. 6 7The WSGI server essentially handles two kinds of requests: 8 - /ajax/foo/bar: The AJAX endpoints which exchange JSON data with the JS. 9 Requests routing is achieved using a simple @uri decorator which simply 10 performs regex matching on the request path. 11 - /static/content: Anything not matching the /ajax/ prefix is treated as a 12 static content request (for serving the index.html and JS/CSS resources). 13 14The following HTTP status code are returned by the server: 15 - 200 - OK: The request was handled correctly. 16 - 404 - Not found: None of the defined handlers did match the /request/path. 17 - 410 - Gone: The path was matched but the handler returned an empty response. 18 This typically happens when the target device is disconnected. 19""" 20 21import cgi 22import collections 23import datetime 24import dateutil.parser 25import glob 26import os 27import memory_inspector 28import mimetypes 29import json 30import re 31import urlparse 32import uuid 33import wsgiref.simple_server 34 35from memory_inspector import constants 36from memory_inspector.core import backends 37from memory_inspector.core import memory_map 38from memory_inspector.classification import mmap_classifier 39from memory_inspector.classification import native_heap_classifier 40from memory_inspector.data import serialization 41from memory_inspector.data import file_storage 42from memory_inspector.frontends import background_tasks 43 44 45_HTTP_OK = '200 - OK' 46_HTTP_GONE = '410 - Gone' 47_HTTP_NOT_FOUND = '404 - Not Found' 48_PERSISTENT_STORAGE_PATH = os.path.join( 49 os.path.expanduser('~'), '.config', 'memory_inspector') 50_CONTENT_DIR = os.path.abspath(os.path.join( 51 os.path.dirname(__file__), 'www_content')) 52_APP_PROCESS_RE = r'^[\w.:]+$' # Regex for matching app processes. 53_STATS_HIST_SIZE = 120 # Keep at most 120 samples of stats per process. 54_CACHE_LEN = 10 # Max length of |_cached_objs|. 55 56# |_cached_objs| keeps the state of short-lived objects that the client needs to 57# _cached_objs subsequent AJAX calls. 58_cached_objs = collections.OrderedDict() 59_persistent_storage = file_storage.Storage(_PERSISTENT_STORAGE_PATH) 60_proc_stats_history = {} # /Android/device/PID -> deque([stats@T=0, stats@T=1]) 61 62 63class UriHandler(object): 64 """Base decorator used to automatically route /requests/by/path. 65 66 Each handler is called with the following args: 67 args: a tuple of the matching regex groups. 68 req_vars: a dictionary of request args (querystring for GET, body for POST). 69 Each handler must return a tuple with the following elements: 70 http_code: a string with the HTTP status code (e.g., '200 - OK') 71 headers: a list of HTTP headers (e.g., [('Content-Type': 'foo/bar')]) 72 body: the HTTP response body. 73 """ 74 _handlers = [] 75 76 def __init__(self, path_regex, verb='GET', output_filter=None): 77 self._path_regex = path_regex 78 self._verb = verb 79 default_output_filter = lambda *x: x # Just return the same args unchanged. 80 self._output_filter = output_filter or default_output_filter 81 82 def __call__(self, handler): 83 UriHandler._handlers += [( 84 self._verb, self._path_regex, self._output_filter, handler)] 85 86 @staticmethod 87 def Handle(method, path, req_vars): 88 """Finds a matching handler and calls it (or returns a 404 - Not Found).""" 89 for (match_method, path_regex, output_filter, fn) in UriHandler._handlers: 90 if method != match_method: 91 continue 92 m = re.match(path_regex, path) 93 if not m: 94 continue 95 (http_code, headers, body) = fn(m.groups(), req_vars) 96 return output_filter(http_code, headers, body) 97 return (_HTTP_NOT_FOUND, [], 'No AJAX handlers found') 98 99 100class AjaxHandler(UriHandler): 101 """Decorator for routing AJAX requests. 102 103 This decorator essentially groups the JSON serialization and the cache headers 104 which is shared by most of the handlers defined below. 105 """ 106 def __init__(self, path_regex, verb='GET'): 107 super(AjaxHandler, self).__init__( 108 path_regex, verb, AjaxHandler.AjaxOutputFilter) 109 110 @staticmethod 111 def AjaxOutputFilter(http_code, headers, body): 112 serialized_content = json.dumps(body, cls=serialization.Encoder) 113 extra_headers = [('Cache-Control', 'no-cache'), 114 ('Expires', 'Fri, 19 Sep 1986 05:00:00 GMT')] 115 return http_code, headers + extra_headers, serialized_content 116 117 118@AjaxHandler('/ajax/backends') 119def _ListBackends(args, req_vars): # pylint: disable=W0613 120 return _HTTP_OK, [], [backend.name for backend in backends.ListBackends()] 121 122 123@AjaxHandler('/ajax/devices') 124def _ListDevices(args, req_vars): # pylint: disable=W0613 125 resp = [] 126 for device in backends.ListDevices(): 127 # The device settings must loaded at discovery time (i.e. here), not during 128 # startup, because it might have been plugged later. 129 for k, v in _persistent_storage.LoadSettings(device.id).iteritems(): 130 device.settings[k] = v 131 132 resp += [{'backend': device.backend.name, 133 'id': device.id, 134 'name': device.name}] 135 return _HTTP_OK, [], resp 136 137 138@AjaxHandler(r'/ajax/dump/mmap/(\w+)/(\w+)/(\d+)') 139def _DumpMmapsForProcess(args, req_vars): # pylint: disable=W0613 140 """Dumps memory maps for a process. 141 142 The response is formatted according to the Google Charts DataTable format. 143 """ 144 process = _GetProcess(args) 145 if not process: 146 return _HTTP_GONE, [], 'Device not found or process died' 147 mmap = process.DumpMemoryMaps() 148 table = _ConvertMmapToGTable(mmap) 149 150 # Store the dump in the cache. The client might need it later for profiling. 151 cache_id = _CacheObject(mmap) 152 return _HTTP_OK, [], {'table': table, 'id': cache_id} 153 154 155@AjaxHandler('/ajax/initialize/(\w+)/(\w+)$', 'POST') 156def _InitializeDevice(args, req_vars): # pylint: disable=W0613 157 device = _GetDevice(args) 158 if not device: 159 return _HTTP_GONE, [], 'Device not found' 160 device.Initialize() 161 if req_vars['enableNativeTracing']: 162 device.EnableNativeTracing(True) 163 return _HTTP_OK, [], { 164 'isNativeTracingEnabled': device.IsNativeTracingEnabled()} 165 166 167@AjaxHandler(r'/ajax/profile/create', 'POST') 168def _CreateProfile(args, req_vars): # pylint: disable=W0613 169 """Creates (and caches) a profile from a set of dumps. 170 171 The profiling data can be retrieved afterwards using the /profile/{PROFILE_ID} 172 endpoints (below). 173 """ 174 classifier = None # A classifier module (/classification/*_classifier.py). 175 dumps = {} # dump-time -> obj. to classify (e.g., |memory_map.Map|). 176 for arg in 'type', 'source', 'ruleset': 177 assert(arg in req_vars), 'Expecting %s argument in POST data' % arg 178 179 # Step 1: collect the memory dumps, according to what the client specified in 180 # the 'type' and 'source' POST arguments. 181 182 # Case 1a: The client requests to load data from an archive. 183 if req_vars['source'] == 'archive': 184 archive = _persistent_storage.OpenArchive(req_vars['archive']) 185 if not archive: 186 return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars['archive'] 187 first_timestamp = None 188 for timestamp_str in req_vars['snapshots']: 189 timestamp = dateutil.parser.parse(timestamp_str) 190 first_timestamp = first_timestamp or timestamp 191 time_delta = int((timestamp - first_timestamp).total_seconds()) 192 if req_vars['type'] == 'mmap': 193 dumps[time_delta] = archive.LoadMemMaps(timestamp) 194 elif req_vars['type'] == 'nheap': 195 dumps[time_delta] = archive.LoadNativeHeap(timestamp) 196 197 # Case 1b: Use a dump recently cached (only mmap, via _DumpMmapsForProcess). 198 elif req_vars['source'] == 'cache': 199 assert(req_vars['type'] == 'mmap'), 'Only cached mmap dumps are supported.' 200 dumps[0] = _GetCacheObject(req_vars['id']) 201 202 if not dumps: 203 return _HTTP_GONE, [], 'No memory dumps could be retrieved' 204 205 # Initialize the classifier (mmap or nheap) and prepare symbols for nheap. 206 if req_vars['type'] == 'mmap': 207 classifier = mmap_classifier 208 elif req_vars['type'] == 'nheap': 209 classifier = native_heap_classifier 210 if not archive.HasSymbols(): 211 return _HTTP_GONE, [], 'No symbols in archive %s' % req_vars['archive'] 212 symbols = archive.LoadSymbols() 213 for nheap in dumps.itervalues(): 214 nheap.SymbolizeUsingSymbolDB(symbols) 215 216 if not classifier: 217 return _HTTP_GONE, [], 'Classifier %s not supported.' % req_vars['type'] 218 219 # Step 2: Load the rule-set specified by the client in the 'ruleset' POST arg. 220 if req_vars['ruleset'] == 'heuristic': 221 assert(req_vars['type'] == 'nheap'), ( 222 'heuristic rules are supported only for nheap') 223 rules = native_heap_classifier.InferHeuristicRulesFromHeap(dumps[0]) 224 else: 225 rules_path = os.path.join(constants.CLASSIFICATION_RULES_PATH, 226 req_vars['ruleset']) 227 if not os.path.isfile(rules_path): 228 return _HTTP_GONE, [], 'Cannot find the rule-set %s' % rules_path 229 with open(rules_path) as f: 230 rules = classifier.LoadRules(f.read()) 231 232 # Step 3: Aggregate the dump data using the classifier and generate the 233 # profile data (which will be kept cached here in the server). 234 # The resulting profile will consist of 1+ snapshots (depending on the number 235 # dumps the client has requested to process) and a number of 1+ metrics 236 # (depending on the buckets' keys returned by the classifier). 237 238 # Converts the {time: dump_obj} dict into a {time: |AggregatedResult|} dict. 239 # using the classifier. 240 snapshots = collections.OrderedDict((time, classifier.Classify(dump, rules)) 241 for time, dump in sorted(dumps.iteritems())) 242 243 # Add the profile to the cache (and eventually discard old items). 244 # |profile_id| is the key that the client will use in subsequent requests 245 # (to the /ajax/profile/{ID}/ endpoints) to refer to this particular profile. 246 profile_id = _CacheObject(snapshots) 247 248 first_snapshot = next(snapshots.itervalues()) 249 return _HTTP_OK, [], {'id': profile_id, 250 'times': snapshots.keys(), 251 'metrics': first_snapshot.keys, 252 'rootBucket': first_snapshot.total.name + '/'} 253 254 255@AjaxHandler(r'/ajax/profile/(\w+)/tree/(\d+)/(\d+)') 256def _GetProfileTreeDataForSnapshot(args, req_vars): # pylint: disable=W0613 257 """Gets the data for the tree chart for a given time and metric. 258 259 The response is formatted according to the Google Charts DataTable format. 260 """ 261 snapshot_id = args[0] 262 metric_index = int(args[1]) 263 time = int(args[2]) 264 snapshots = _GetCacheObject(snapshot_id) 265 if not snapshots: 266 return _HTTP_GONE, [], 'Cannot find the selected profile.' 267 if time not in snapshots: 268 return _HTTP_GONE, [], 'Cannot find snapshot at T=%d.' % time 269 snapshot = snapshots[time] 270 if metric_index >= len(snapshot.keys): 271 return _HTTP_GONE, [], 'Invalid metric id %d' % metric_index 272 273 resp = {'cols': [{'label': 'bucket', 'type': 'string'}, 274 {'label': 'parent', 'type': 'string'}], 275 'rows': []} 276 277 def VisitBucketAndAddRows(bucket, parent_id=''): 278 """Recursively creates the (node, parent) visiting |ResultTree| in DFS.""" 279 node_id = parent_id + bucket.name + '/' 280 node_label = '<dl><dt>%s</dt><dd>%s</dd></dl>' % ( 281 bucket.name, _StrMem(bucket.values[metric_index])) 282 resp['rows'] += [{'c': [ 283 {'v': node_id, 'f': node_label}, 284 {'v': parent_id, 'f': None}, 285 ]}] 286 for child in bucket.children: 287 VisitBucketAndAddRows(child, node_id) 288 289 VisitBucketAndAddRows(snapshot.total) 290 return _HTTP_OK, [], resp 291 292 293@AjaxHandler(r'/ajax/profile/(\w+)/time_serie/(\d+)/(.*)$') 294def _GetTimeSerieForSnapshot(args, req_vars): # pylint: disable=W0613 295 """Gets the data for the area chart for a given metric and bucket. 296 297 The response is formatted according to the Google Charts DataTable format. 298 """ 299 snapshot_id = args[0] 300 metric_index = int(args[1]) 301 bucket_path = args[2] 302 snapshots = _GetCacheObject(snapshot_id) 303 if not snapshots: 304 return _HTTP_GONE, [], 'Cannot find the selected profile.' 305 if metric_index >= len(next(snapshots.itervalues()).keys): 306 return _HTTP_GONE, [], 'Invalid metric id %d' % metric_index 307 308 def FindBucketByPath(bucket, path, parent_path=''): # Essentially a DFS. 309 cur_path = parent_path + bucket.name + '/' 310 if cur_path == path: 311 return bucket 312 for child in bucket.children: 313 res = FindBucketByPath(child, path, cur_path) 314 if res: 315 return res 316 return None 317 318 # The resulting data table will look like this (assuming len(metrics) == 2): 319 # Time Ashmem Dalvik Other 320 # 0 (1024,0) (4096,1024) (0,0) 321 # 30 (512,512) (1024,1024) (0,512) 322 # 60 (0,512) (1024,0) (512,0) 323 resp = {'cols': [], 'rows': []} 324 for time, aggregated_result in snapshots.iteritems(): 325 bucket = FindBucketByPath(aggregated_result.total, bucket_path) 326 if not bucket: 327 return _HTTP_GONE, [], 'Bucket %s not found' % bucket_path 328 329 # If the user selected a non-leaf bucket, display the breakdown of its 330 # direct children. Otherwise just the leaf bucket. 331 children_buckets = bucket.children if bucket.children else [bucket] 332 333 # Create the columns (form the buckets) when processing the first snapshot. 334 if not resp['cols']: 335 resp['cols'] += [{'label': 'Time', 'type': 'string'}] 336 for child_bucket in children_buckets: 337 resp['cols'] += [{'label': child_bucket.name, 'type': 'number'}] 338 339 row = [{'v': str(time), 'f': None}] 340 for child_bucket in children_buckets: 341 row += [{'v': child_bucket.values[metric_index] / 1024, 'f': None}] 342 resp['rows'] += [{'c': row}] 343 344 return _HTTP_OK, [], resp 345 346@AjaxHandler(r'/ajax/profile/rules') 347def _ListProfilingRules(args, req_vars): # pylint: disable=W0613 348 """Lists the classification rule files available for profiling.""" 349 rules = glob.glob(constants.CLASSIFICATION_RULES_PATH + 350 os.sep + '*' + os.sep + '*.py') 351 rules = [x.replace(constants.CLASSIFICATION_RULES_PATH, '')[1:] # Strip /. 352 for x in rules] 353 resp = {'mmap': filter(lambda x: 'mmap-' in x, rules), 354 'nheap': filter(lambda x: 'nheap-' in x, rules)} 355 resp['nheap'].insert(0, 'heuristic') 356 return _HTTP_OK, [], resp 357 358 359@AjaxHandler(r'/ajax/ps/(\w+)/(\w+)$') # /ajax/ps/Android/a0b1c2[?all=1] 360def _ListProcesses(args, req_vars): # pylint: disable=W0613 361 """Lists processes and their CPU / mem stats. 362 363 The response is formatted according to the Google Charts DataTable format. 364 """ 365 device = _GetDevice(args) 366 if not device: 367 return _HTTP_GONE, [], 'Device not found' 368 resp = { 369 'cols': [ 370 {'label': 'Pid', 'type':'number'}, 371 {'label': 'Name', 'type':'string'}, 372 {'label': 'Cpu %', 'type':'number'}, 373 {'label': 'Mem RSS Kb', 'type':'number'}, 374 {'label': '# Threads', 'type':'number'}, 375 ], 376 'rows': []} 377 for process in device.ListProcesses(): 378 # Exclude system apps if the request didn't contain the ?all=1 arg. 379 if not req_vars.get('all') and not re.match(_APP_PROCESS_RE, process.name): 380 continue 381 stats = process.GetStats() 382 resp['rows'] += [{'c': [ 383 {'v': process.pid, 'f': None}, 384 {'v': process.name, 'f': None}, 385 {'v': stats.cpu_usage, 'f': None}, 386 {'v': stats.vm_rss, 'f': None}, 387 {'v': stats.threads, 'f': None}, 388 ]}] 389 return _HTTP_OK, [], resp 390 391 392@AjaxHandler(r'/ajax/stats/(\w+)/(\w+)$') # /ajax/stats/Android/a0b1c2 393def _GetDeviceStats(args, req_vars): # pylint: disable=W0613 394 """Lists device CPU / mem stats. 395 396 The response is formatted according to the Google Charts DataTable format. 397 """ 398 device = _GetDevice(args) 399 if not device: 400 return _HTTP_GONE, [], 'Device not found' 401 device_stats = device.GetStats() 402 403 cpu_stats = { 404 'cols': [ 405 {'label': 'CPU', 'type':'string'}, 406 {'label': 'Usr %', 'type':'number'}, 407 {'label': 'Sys %', 'type':'number'}, 408 {'label': 'Idle %', 'type':'number'}, 409 ], 410 'rows': []} 411 412 for cpu_idx in xrange(len(device_stats.cpu_times)): 413 cpu = device_stats.cpu_times[cpu_idx] 414 cpu_stats['rows'] += [{'c': [ 415 {'v': '# %d' % cpu_idx, 'f': None}, 416 {'v': cpu['usr'], 'f': None}, 417 {'v': cpu['sys'], 'f': None}, 418 {'v': cpu['idle'], 'f': None}, 419 ]}] 420 421 mem_stats = { 422 'cols': [ 423 {'label': 'Section', 'type':'string'}, 424 {'label': 'MB', 'type':'number', 'pattern': ''}, 425 ], 426 'rows': []} 427 428 for key, value in device_stats.memory_stats.iteritems(): 429 mem_stats['rows'] += [{'c': [ 430 {'v': key, 'f': None}, 431 {'v': value / 1024, 'f': None} 432 ]}] 433 434 return _HTTP_OK, [], {'cpu': cpu_stats, 'mem': mem_stats} 435 436 437@AjaxHandler(r'/ajax/stats/(\w+)/(\w+)/(\d+)$') # /ajax/stats/Android/a0b1c2/42 438def _GetProcessStats(args, req_vars): # pylint: disable=W0613 439 """Lists CPU / mem stats for a given process (and keeps history). 440 441 The response is formatted according to the Google Charts DataTable format. 442 """ 443 process = _GetProcess(args) 444 if not process: 445 return _HTTP_GONE, [], 'Device not found' 446 447 proc_uri = '/'.join(args) 448 cur_stats = process.GetStats() 449 if proc_uri not in _proc_stats_history: 450 _proc_stats_history[proc_uri] = collections.deque(maxlen=_STATS_HIST_SIZE) 451 history = _proc_stats_history[proc_uri] 452 history.append(cur_stats) 453 454 cpu_stats = { 455 'cols': [ 456 {'label': 'T', 'type':'string'}, 457 {'label': 'CPU %', 'type':'number'}, 458 {'label': '# Threads', 'type':'number'}, 459 ], 460 'rows': [] 461 } 462 463 mem_stats = { 464 'cols': [ 465 {'label': 'T', 'type':'string'}, 466 {'label': 'Mem RSS Kb', 'type':'number'}, 467 {'label': 'Page faults', 'type':'number'}, 468 ], 469 'rows': [] 470 } 471 472 for stats in history: 473 cpu_stats['rows'] += [{'c': [ 474 {'v': str(datetime.timedelta(seconds=stats.run_time)), 'f': None}, 475 {'v': stats.cpu_usage, 'f': None}, 476 {'v': stats.threads, 'f': None}, 477 ]}] 478 mem_stats['rows'] += [{'c': [ 479 {'v': str(datetime.timedelta(seconds=stats.run_time)), 'f': None}, 480 {'v': stats.vm_rss, 'f': None}, 481 {'v': stats.page_faults, 'f': None}, 482 ]}] 483 484 return _HTTP_OK, [], {'cpu': cpu_stats, 'mem': mem_stats} 485 486 487@AjaxHandler(r'/ajax/settings/(\w+)/?(\w+)?$') # /ajax/settings/Android[/id] 488def _GetDeviceOrBackendSettings(args, req_vars): # pylint: disable=W0613 489 backend = backends.GetBackend(args[0]) 490 if not backend: 491 return _HTTP_GONE, [], 'Backend not found' 492 if args[1]: 493 device = _GetDevice(args) 494 if not device: 495 return _HTTP_GONE, [], 'Device not found' 496 settings = device.settings 497 else: 498 settings = backend.settings 499 500 assert(isinstance(settings, backends.Settings)) 501 resp = {} 502 for key in settings.expected_keys: 503 resp[key] = {'description': settings.expected_keys[key], 504 'value': settings.values[key]} 505 return _HTTP_OK, [], resp 506 507 508@AjaxHandler(r'/ajax/settings/(\w+)/?(\w+)?$', 'POST') 509def _SetDeviceOrBackendSettings(args, req_vars): # pylint: disable=W0613 510 backend = backends.GetBackend(args[0]) 511 if not backend: 512 return _HTTP_GONE, [], 'Backend not found' 513 if args[1]: 514 device = _GetDevice(args) 515 if not device: 516 return _HTTP_GONE, [], 'Device not found' 517 settings = device.settings 518 storage_name = device.id 519 else: 520 settings = backend.settings 521 storage_name = backend.name 522 523 for key in req_vars.iterkeys(): 524 settings[key] = req_vars[key] 525 _persistent_storage.StoreSettings(storage_name, settings.values) 526 return _HTTP_OK, [], '' 527 528 529@AjaxHandler(r'/ajax/storage/list') 530def _ListStorage(args, req_vars): # pylint: disable=W0613 531 resp = { 532 'cols': [ 533 {'label': 'Archive', 'type':'string'}, 534 {'label': 'Snapshot', 'type':'string'}, 535 {'label': 'Mem maps', 'type':'boolean'}, 536 {'label': 'N. Heap', 'type':'boolean'}, 537 ], 538 'rows': []} 539 for archive_name in _persistent_storage.ListArchives(): 540 archive = _persistent_storage.OpenArchive(archive_name) 541 first_timestamp = None 542 for timestamp in archive.ListSnapshots(): 543 first_timestamp = timestamp if not first_timestamp else first_timestamp 544 time_delta = '%d s.' % (timestamp - first_timestamp).total_seconds() 545 resp['rows'] += [{'c': [ 546 {'v': archive_name, 'f': None}, 547 {'v': timestamp.isoformat(), 'f': time_delta}, 548 {'v': archive.HasMemMaps(timestamp), 'f': None}, 549 {'v': archive.HasNativeHeap(timestamp), 'f': None}, 550 ]}] 551 return _HTTP_OK, [], resp 552 553 554@AjaxHandler(r'/ajax/storage/(.+)/(.+)/mmaps') 555def _LoadMmapsFromStorage(args, req_vars): # pylint: disable=W0613 556 archive = _persistent_storage.OpenArchive(args[0]) 557 if not archive: 558 return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars['archive'] 559 560 timestamp = dateutil.parser.parse(args[1]) 561 if not archive.HasMemMaps(timestamp): 562 return _HTTP_GONE, [], 'No mmaps for snapshot %s' % timestamp 563 mmap = archive.LoadMemMaps(timestamp) 564 return _HTTP_OK, [], {'table': _ConvertMmapToGTable(mmap)} 565 566 567@AjaxHandler(r'/ajax/storage/(.+)/(.+)/nheap') 568def _LoadNheapFromStorage(args, req_vars): 569 """Returns a Google Charts DataTable dictionary for the nheap.""" 570 archive = _persistent_storage.OpenArchive(args[0]) 571 if not archive: 572 return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars['archive'] 573 574 timestamp = dateutil.parser.parse(args[1]) 575 if not archive.HasNativeHeap(timestamp): 576 return _HTTP_GONE, [], 'No native heap dump for snapshot %s' % timestamp 577 578 nheap = archive.LoadNativeHeap(timestamp) 579 symbols = archive.LoadSymbols() 580 nheap.SymbolizeUsingSymbolDB(symbols) 581 582 resp = { 583 'cols': [ 584 {'label': 'Total size [KB]', 'type':'number'}, 585 {'label': 'Alloc size [B]', 'type':'number'}, 586 {'label': 'Count', 'type':'number'}, 587 {'label': 'Stack Trace', 'type':'string'}, 588 ], 589 'rows': []} 590 for alloc in nheap.allocations: 591 strace = '<dl>' 592 for frame in alloc.stack_trace.frames: 593 # Use the fallback libname.so+0xaddr if symbol info is not available. 594 symbol_name = frame.symbol.name if frame.symbol else '??' 595 source_info = (str(frame.symbol.source_info[0]) if 596 frame.symbol and frame.symbol.source_info else frame.raw_address) 597 strace += '<dd title="%s">%s</dd><dt>%s</dt>' % ( 598 cgi.escape(source_info), 599 cgi.escape(os.path.basename(source_info)), 600 cgi.escape(symbol_name)) 601 strace += '</dl>' 602 603 resp['rows'] += [{'c': [ 604 {'v': alloc.total_size, 'f': alloc.total_size / 1024}, 605 {'v': alloc.size, 'f': None}, 606 {'v': alloc.count, 'f': None}, 607 {'v': strace, 'f': None}, 608 ]}] 609 return _HTTP_OK, [], resp 610 611 612# /ajax/tracer/start/Android/device-id/pid 613@AjaxHandler(r'/ajax/tracer/start/(\w+)/(\w+)/(\d+)', 'POST') 614def _StartTracer(args, req_vars): 615 for arg in 'interval', 'count', 'traceNativeHeap': 616 assert(arg in req_vars), 'Expecting %s argument in POST data' % arg 617 process = _GetProcess(args) 618 if not process: 619 return _HTTP_GONE, [], 'Device not found or process died' 620 task_id = background_tasks.StartTracer( 621 storage_path=_PERSISTENT_STORAGE_PATH, 622 process=process, 623 interval=int(req_vars['interval']), 624 count=int(req_vars['count']), 625 trace_native_heap=req_vars['traceNativeHeap']) 626 return _HTTP_OK, [], task_id 627 628 629@AjaxHandler(r'/ajax/tracer/status/(\d+)') # /ajax/tracer/status/{task_id} 630def _GetTracerStatus(args, req_vars): # pylint: disable=W0613 631 task = background_tasks.Get(int(args[0])) 632 if not task: 633 return _HTTP_GONE, [], 'Task not found' 634 return _HTTP_OK, [], task.GetProgress() 635 636 637@UriHandler(r'^(?!/ajax)/(.*)$') 638def _StaticContent(args, req_vars): # pylint: disable=W0613 639 # Give the browser a 1-day TTL cache to minimize the start-up time. 640 cache_headers = [('Cache-Control', 'max-age=86400, public')] 641 req_path = args[0] if args[0] else 'index.html' 642 file_path = os.path.abspath(os.path.join(_CONTENT_DIR, req_path)) 643 if (os.path.isfile(file_path) and 644 os.path.commonprefix([file_path, _CONTENT_DIR]) == _CONTENT_DIR): 645 mtype = 'text/plain' 646 guessed_mime = mimetypes.guess_type(file_path) 647 if guessed_mime and guessed_mime[0]: 648 mtype = guessed_mime[0] 649 with open(file_path, 'rb') as f: 650 body = f.read() 651 return _HTTP_OK, cache_headers + [('Content-Type', mtype)], body 652 return _HTTP_NOT_FOUND, cache_headers, file_path + ' not found' 653 654 655def _GetDevice(args): 656 """Returns a |backends.Device| instance from a /backend/device URI.""" 657 assert(len(args) >= 2), 'Malformed request. Expecting /backend/device' 658 return backends.GetDevice(backend_name=args[0], device_id=args[1]) 659 660 661def _GetProcess(args): 662 """Returns a |backends.Process| instance from a /backend/device/pid URI.""" 663 assert(len(args) >= 3 and args[2].isdigit()), ( 664 'Malformed request. Expecting /backend/device/pid') 665 device = _GetDevice(args) 666 if not device: 667 return None 668 return device.GetProcess(int(args[2])) 669 670def _ConvertMmapToGTable(mmap): 671 """Returns a Google Charts DataTable dictionary for the given mmap.""" 672 assert(isinstance(mmap, memory_map.Map)) 673 table = { 674 'cols': [ 675 {'label': 'Start', 'type':'string'}, 676 {'label': 'End', 'type':'string'}, 677 {'label': 'Length Kb', 'type':'number'}, 678 {'label': 'Prot', 'type':'string'}, 679 {'label': 'Priv. Dirty Kb', 'type':'number'}, 680 {'label': 'Priv. Clean Kb', 'type':'number'}, 681 {'label': 'Shared Dirty Kb', 'type':'number'}, 682 {'label': 'Shared Clean Kb', 'type':'number'}, 683 {'label': 'File', 'type':'string'}, 684 {'label': 'Offset', 'type':'number'}, 685 {'label': 'Resident Pages', 'type':'string'}, 686 ], 687 'rows': []} 688 for entry in mmap.entries: 689 table['rows'] += [{'c': [ 690 {'v': '%08x' % entry.start, 'f': None}, 691 {'v': '%08x' % entry.end, 'f': None}, 692 {'v': entry.len / 1024, 'f': None}, 693 {'v': entry.prot_flags, 'f': None}, 694 {'v': entry.priv_dirty_bytes / 1024, 'f': None}, 695 {'v': entry.priv_clean_bytes / 1024, 'f': None}, 696 {'v': entry.shared_dirty_bytes / 1024, 'f': None}, 697 {'v': entry.shared_clean_bytes / 1024, 'f': None}, 698 {'v': entry.mapped_file, 'f': None}, 699 {'v': entry.mapped_offset, 'f': None}, 700 {'v': '[%s]' % (','.join(map(str, entry.resident_pages))), 'f': None}, 701 ]}] 702 return table 703 704def _CacheObject(obj_to_store): 705 """Stores an object in the server-side cache and returns its unique id.""" 706 if len(_cached_objs) >= _CACHE_LEN: 707 _cached_objs.popitem(last=False) 708 obj_id = uuid.uuid4().hex 709 _cached_objs[obj_id] = obj_to_store 710 return str(obj_id) 711 712 713def _GetCacheObject(obj_id): 714 """Retrieves an object in the server-side cache by its id.""" 715 return _cached_objs.get(obj_id) 716 717 718def _StrMem(nbytes): 719 """Converts a number (of bytes) into a human readable string (kb, mb).""" 720 if nbytes < 2**10: 721 return '%d B' % nbytes 722 if nbytes < 2**20: 723 return '%.1f KB' % round(nbytes / 1024.0) 724 return '%.1f MB' % (nbytes / 1048576.0) 725 726 727def _HttpRequestHandler(environ, start_response): 728 """Parses a single HTTP request and delegates the handling through UriHandler. 729 730 This essentially wires up wsgiref.simple_server with our @UriHandler(s). 731 """ 732 path = environ['PATH_INFO'] 733 method = environ['REQUEST_METHOD'] 734 if method == 'POST': 735 req_body_size = int(environ.get('CONTENT_LENGTH', 0)) 736 req_body = environ['wsgi.input'].read(req_body_size) 737 req_vars = json.loads(req_body) 738 else: 739 req_vars = urlparse.parse_qs(environ['QUERY_STRING']) 740 (http_code, headers, body) = UriHandler.Handle(method, path, req_vars) 741 start_response(http_code, headers) 742 return [body] 743 744 745def Start(http_port): 746 # Load the saved backends' settings (some of them might be needed to bootstrap 747 # as, for instance, the adb path for the Android backend). 748 memory_inspector.RegisterAllBackends() 749 for backend in backends.ListBackends(): 750 for k, v in _persistent_storage.LoadSettings(backend.name).iteritems(): 751 backend.settings[k] = v 752 753 httpd = wsgiref.simple_server.make_server('', http_port, _HttpRequestHandler) 754 try: 755 httpd.serve_forever() 756 except KeyboardInterrupt: 757 pass # Don't print useless stack traces when the user hits CTRL-C. 758 background_tasks.TerminateAll()