1// Copyright (C) 2018 The Android Open Source Project 2// 3// Licensed under the Apache License, Version 2.0 (the "License"); 4// you may not use this file except in compliance with the License. 5// You may obtain a copy of the License at 6// 7// http://www.apache.org/licenses/LICENSE-2.0 8// 9// Unless required by applicable law or agreed to in writing, software 10// distributed under the License is distributed on an "AS IS" BASIS, 11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12// See the License for the specific language governing permissions and 13// limitations under the License. 14 15import '../tracks/all_controller'; 16 17import * as uuidv4 from 'uuid/v4'; 18 19import {assertExists, assertTrue} from '../base/logging'; 20import { 21 Actions, 22 DeferredAction, 23} from '../common/actions'; 24import {Engine} from '../common/engine'; 25import {NUM, NUM_NULL, rawQueryToRows, STR_NULL} from '../common/protos'; 26import {SCROLLING_TRACK_GROUP} from '../common/state'; 27import {TimeSpan} from '../common/time'; 28import {QuantizedLoad, ThreadDesc} from '../frontend/globals'; 29import {ANDROID_LOGS_TRACK_KIND} from '../tracks/android_log/common'; 30import {SLICE_TRACK_KIND} from '../tracks/chrome_slices/common'; 31import {CPU_FREQ_TRACK_KIND} from '../tracks/cpu_freq/common'; 32import {CPU_SLICE_TRACK_KIND} from '../tracks/cpu_slices/common'; 33import { 34 PROCESS_SCHEDULING_TRACK_KIND 35} from '../tracks/process_scheduling/common'; 36import {PROCESS_SUMMARY_TRACK} from '../tracks/process_summary/common'; 37import {THREAD_STATE_TRACK_KIND} from '../tracks/thread_state/common'; 38 39import {Child, Children, Controller} from './controller'; 40import {globals} from './globals'; 41import {LogsController} from './logs_controller'; 42import {QueryController, QueryControllerArgs} from './query_controller'; 43import { 44 SelectionController, 45 SelectionControllerArgs 46} from './selection_controller'; 47import {TrackControllerArgs, trackControllerRegistry} from './track_controller'; 48 49type States = 'init'|'loading_trace'|'ready'; 50 51 52declare interface FileReaderSync { readAsArrayBuffer(blob: Blob): ArrayBuffer; } 53 54declare var FileReaderSync: 55 {prototype: FileReaderSync; new (): FileReaderSync;}; 56 57// TraceController handles handshakes with the frontend for everything that 58// concerns a single trace. It owns the WASM trace processor engine, handles 59// tracks data and SQL queries. There is one TraceController instance for each 60// trace opened in the UI (for now only one trace is supported). 61export class TraceController extends Controller<States> { 62 private readonly engineId: string; 63 private engine?: Engine; 64 65 constructor(engineId: string) { 66 super('init'); 67 this.engineId = engineId; 68 } 69 70 onDestroy() { 71 if (this.engine !== undefined) globals.destroyEngine(this.engine.id); 72 } 73 74 run() { 75 const engineCfg = assertExists(globals.state.engines[this.engineId]); 76 switch (this.state) { 77 case 'init': 78 globals.dispatch(Actions.setEngineReady({ 79 engineId: this.engineId, 80 ready: false, 81 })); 82 this.loadTrace().then(() => { 83 globals.dispatch(Actions.setEngineReady({ 84 engineId: this.engineId, 85 ready: true, 86 })); 87 }); 88 this.updateStatus('Opening trace'); 89 this.setState('loading_trace'); 90 break; 91 92 case 'loading_trace': 93 // Stay in this state until loadTrace() returns and marks the engine as 94 // ready. 95 if (this.engine === undefined || !engineCfg.ready) return; 96 this.setState('ready'); 97 break; 98 99 case 'ready': 100 // At this point we are ready to serve queries and handle tracks. 101 const engine = assertExists(this.engine); 102 assertTrue(engineCfg.ready); 103 const childControllers: Children = []; 104 105 // Create a TrackController for each track. 106 for (const trackId of Object.keys(globals.state.tracks)) { 107 const trackCfg = globals.state.tracks[trackId]; 108 if (trackCfg.engineId !== this.engineId) continue; 109 if (!trackControllerRegistry.has(trackCfg.kind)) continue; 110 const trackCtlFactory = trackControllerRegistry.get(trackCfg.kind); 111 const trackArgs: TrackControllerArgs = {trackId, engine}; 112 childControllers.push(Child(trackId, trackCtlFactory, trackArgs)); 113 } 114 115 // Create a QueryController for each query. 116 for (const queryId of Object.keys(globals.state.queries)) { 117 const queryArgs: QueryControllerArgs = {queryId, engine}; 118 childControllers.push(Child(queryId, QueryController, queryArgs)); 119 } 120 121 const selectionArgs: SelectionControllerArgs = {engine}; 122 childControllers.push( 123 Child('selection', SelectionController, selectionArgs)); 124 125 childControllers.push(Child('logs', LogsController, { 126 engine, 127 app: globals, 128 })); 129 130 return childControllers; 131 132 default: 133 throw new Error(`unknown state ${this.state}`); 134 } 135 return; 136 } 137 138 private async loadTrace() { 139 this.updateStatus('Creating trace processor'); 140 const engineCfg = assertExists(globals.state.engines[this.engineId]); 141 this.engine = globals.createEngine(); 142 143 const statusHeader = 'Opening trace'; 144 if (engineCfg.source instanceof File) { 145 const blob = engineCfg.source as Blob; 146 const reader = new FileReaderSync(); 147 const SLICE_SIZE = 1024 * 1024; 148 for (let off = 0; off < blob.size; off += SLICE_SIZE) { 149 const slice = blob.slice(off, off + SLICE_SIZE); 150 const arrBuf = reader.readAsArrayBuffer(slice); 151 await this.engine.parse(new Uint8Array(arrBuf)); 152 const progress = Math.round((off + slice.size) / blob.size * 100); 153 this.updateStatus(`${statusHeader} ${progress} %`); 154 } 155 } else { 156 const resp = await fetch(engineCfg.source); 157 if (resp.status !== 200) { 158 this.updateStatus(`HTTP error ${resp.status}`); 159 throw new Error(`fetch() failed with HTTP error ${resp.status}`); 160 } 161 // tslint:disable-next-line no-any 162 const rd = (resp.body as any).getReader() as ReadableStreamReader; 163 const tStartMs = performance.now(); 164 let tLastUpdateMs = 0; 165 for (let off = 0;;) { 166 const readRes = await rd.read() as {value: Uint8Array, done: boolean}; 167 if (readRes.value !== undefined) { 168 off += readRes.value.length; 169 await this.engine.parse(readRes.value); 170 } 171 // For traces loaded from the network there doesn't seem to be a 172 // reliable way to compute the %. The content-length exposed by GCS is 173 // before compression (which is handled transparently by the browser). 174 const nowMs = performance.now(); 175 if (nowMs - tLastUpdateMs > 100) { 176 tLastUpdateMs = nowMs; 177 const mb = off / 1e6; 178 const tElapsed = (nowMs - tStartMs) / 1e3; 179 let status = `${statusHeader} ${mb.toFixed(1)} MB `; 180 status += `(${(mb / tElapsed).toFixed(1)} MB/s)`; 181 this.updateStatus(status); 182 } 183 if (readRes.done) break; 184 } 185 } 186 187 await this.engine.notifyEof(); 188 189 const traceTime = await this.engine.getTraceTimeBounds(); 190 const traceTimeState = { 191 startSec: traceTime.start, 192 endSec: traceTime.end, 193 }; 194 const actions: DeferredAction[] = [ 195 Actions.setTraceTime(traceTimeState), 196 Actions.navigate({route: '/viewer'}), 197 ]; 198 199 if (globals.state.frontendLocalState.lastUpdate === 0) { 200 actions.push(Actions.setVisibleTraceTime({ 201 time: traceTimeState, 202 lastUpdate: Date.now() / 1000, 203 })); 204 } 205 206 globals.dispatchMultiple(actions); 207 208 { 209 // When we reload from a permalink don't create extra tracks: 210 const {pinnedTracks, scrollingTracks} = globals.state; 211 if (!pinnedTracks.length && !scrollingTracks.length) { 212 await this.listTracks(); 213 } 214 } 215 216 await this.listThreads(); 217 await this.loadTimelineOverview(traceTime); 218 } 219 220 private async listTracks() { 221 this.updateStatus('Loading tracks'); 222 223 const engine = assertExists<Engine>(this.engine); 224 const addToTrackActions: DeferredAction[] = []; 225 const numCpus = await engine.getNumberOfCpus(); 226 227 // TODO(hjd): Renable Vsync tracks when fixed. 228 //// TODO(hjd): Move this code out of TraceController. 229 // for (const counterName of ['VSYNC-sf', 'VSYNC-app']) { 230 // const hasVsync = 231 // !!(await engine.query( 232 // `select ts from counters where name like "${ 233 // counterName 234 // }" limit 1`)) 235 // .numRecords; 236 // if (!hasVsync) continue; 237 // addToTrackActions.push(Actions.addTrack({ 238 // engineId: this.engineId, 239 // kind: 'VsyncTrack', 240 // name: `${counterName}`, 241 // config: { 242 // counterName, 243 // } 244 // })); 245 //} 246 const maxFreq = await engine.query(` 247 select max(value) 248 from counters 249 where name = 'cpufreq'; 250 `); 251 252 for (let cpu = 0; cpu < numCpus; cpu++) { 253 addToTrackActions.push(Actions.addTrack({ 254 engineId: this.engineId, 255 kind: CPU_SLICE_TRACK_KIND, 256 name: `Cpu ${cpu}`, 257 trackGroup: SCROLLING_TRACK_GROUP, 258 config: { 259 cpu, 260 } 261 })); 262 263 // Only add a cpu freq track if we have 264 // cpu freq data. 265 // TODO(taylori): Find a way to display cpu idle 266 // events even if there are no cpu freq events. 267 const freqExists = await engine.query(` 268 select value 269 from counters 270 where name = 'cpufreq' and ref = ${cpu} 271 limit 1; 272 `); 273 if (freqExists.numRecords > 0) { 274 addToTrackActions.push(Actions.addTrack({ 275 engineId: this.engineId, 276 kind: CPU_FREQ_TRACK_KIND, 277 name: `Cpu ${cpu} Frequency`, 278 trackGroup: SCROLLING_TRACK_GROUP, 279 config: { 280 cpu, 281 maximumValue: +maxFreq.columns[0].doubleValues![0], 282 } 283 })); 284 } 285 } 286 287 const counters = await engine.query(` 288 select name, ref, ref_type, count(ref_type) 289 from counter_definitions 290 where ref is not null 291 group by name, ref, ref_type 292 order by ref_type desc 293 `); 294 const counterUpids = new Set<number>(); 295 const counterUtids = new Set<number>(); 296 for (let i = 0; i < counters.numRecords; i++) { 297 const ref = +counters.columns[1].longValues![i]; 298 const refType = counters.columns[2].stringValues![i]; 299 if (refType === 'upid') counterUpids.add(ref); 300 if (refType === 'utid') counterUtids.add(ref); 301 } 302 303 // Add all the global counter tracks that are not bound to any pid/tid, 304 // the ones for which refType == NULL. 305 for (let i = 0; i < counters.numRecords; i++) { 306 const name = counters.columns[0].stringValues![i]; 307 const refType = counters.columns[2].stringValues![i]; 308 if (refType !== '[NULL]') continue; 309 addToTrackActions.push(Actions.addTrack({ 310 engineId: this.engineId, 311 kind: 'CounterTrack', 312 name, 313 trackGroup: SCROLLING_TRACK_GROUP, 314 config: { 315 name, 316 ref: 0, 317 } 318 })); 319 } 320 321 // Local experiments shows getting maxDepth separately is ~2x faster than 322 // joining with threads and processes. 323 const maxDepthQuery = 324 await engine.query('select utid, max(depth) from slices group by utid'); 325 326 const utidToMaxDepth = new Map<number, number>(); 327 for (let i = 0; i < maxDepthQuery.numRecords; i++) { 328 const utid = maxDepthQuery.columns[0].longValues![i] as number; 329 const maxDepth = maxDepthQuery.columns[1].longValues![i] as number; 330 utidToMaxDepth.set(utid, maxDepth); 331 } 332 333 // Return all threads 334 // sorted by: 335 // total cpu time *for the whole parent process* 336 // upid 337 // utid 338 const threadQuery = await engine.query(` 339 select 340 utid, 341 tid, 342 upid, 343 pid, 344 thread.name as threadName, 345 process.name as processName, 346 total_dur as totalDur 347 from 348 thread 349 left join process using(upid) 350 left join (select upid, sum(dur) as total_dur 351 from sched join thread using(utid) 352 group by upid 353 ) using(upid) group by utid, upid 354 order by total_dur desc, upid, utid`); 355 356 const upidToUuid = new Map<number, string>(); 357 const utidToUuid = new Map<number, string>(); 358 const addSummaryTrackActions: DeferredAction[] = []; 359 const addTrackGroupActions: DeferredAction[] = []; 360 361 for (const row of rawQueryToRows(threadQuery, { 362 utid: NUM, 363 upid: NUM_NULL, 364 tid: NUM_NULL, 365 pid: NUM_NULL, 366 threadName: STR_NULL, 367 processName: STR_NULL, 368 totalDur: NUM_NULL, 369 })) { 370 const utid = row.utid; 371 const tid = row.tid; 372 const upid = row.upid; 373 const pid = row.pid; 374 const threadName = row.threadName; 375 const processName = row.processName; 376 const hasSchedEvents = !!row.totalDur; 377 const threadSched = 378 await engine.query(`select count(1) from sched where utid = ${utid}`); 379 const threadHasSched = threadSched.columns[0].longValues![0] > 0; 380 381 const maxDepth = utid === null ? undefined : utidToMaxDepth.get(utid); 382 if (maxDepth === undefined && 383 (upid === null || !counterUpids.has(upid)) && 384 !counterUtids.has(utid) && !threadHasSched) { 385 continue; 386 } 387 388 // Group by upid if present else by utid. 389 let pUuid = upid === null ? utidToUuid.get(utid) : upidToUuid.get(upid); 390 if (pUuid === undefined) { 391 pUuid = uuidv4(); 392 const summaryTrackId = uuidv4(); 393 if (upid === null) { 394 utidToUuid.set(utid, pUuid); 395 } else { 396 upidToUuid.set(upid, pUuid); 397 } 398 399 const pidForColor = pid || tid || upid || utid || 0; 400 const kind = hasSchedEvents ? PROCESS_SCHEDULING_TRACK_KIND : 401 PROCESS_SUMMARY_TRACK; 402 addSummaryTrackActions.push(Actions.addTrack({ 403 id: summaryTrackId, 404 engineId: this.engineId, 405 kind, 406 name: `${upid === null ? tid : pid} summary`, 407 config: {pidForColor, upid, utid}, 408 })); 409 410 addTrackGroupActions.push(Actions.addTrackGroup({ 411 engineId: this.engineId, 412 summaryTrackId, 413 name: upid === null ? `${threadName} ${tid}` : 414 `${processName} ${pid}`, 415 id: pUuid, 416 collapsed: true, 417 })); 418 419 for (let i = 0; i < counters.numRecords; i++) { 420 const name = counters.columns[0].stringValues![i]; 421 const ref = counters.columns[1].longValues![i]; 422 const refType = counters.columns[2].stringValues![i]; 423 if (refType !== 'upid' || ref !== upid) continue; 424 addTrackGroupActions.push(Actions.addTrack({ 425 engineId: this.engineId, 426 kind: 'CounterTrack', 427 name, 428 trackGroup: pUuid, 429 config: { 430 name, 431 ref, 432 } 433 })); 434 } 435 } 436 437 for (let i = 0; i < counters.numRecords; i++) { 438 const name = counters.columns[0].stringValues![i]; 439 const ref = counters.columns[1].longValues![i]; 440 const refType = counters.columns[2].stringValues![i]; 441 442 if (refType !== 'utid' || ref !== utid) continue; 443 addTrackGroupActions.push(Actions.addTrack({ 444 engineId: this.engineId, 445 kind: 'CounterTrack', 446 name, 447 trackGroup: pUuid, 448 config: { 449 name, 450 ref, 451 } 452 })); 453 } 454 455 if (threadHasSched) { 456 addToTrackActions.push(Actions.addTrack({ 457 engineId: this.engineId, 458 kind: THREAD_STATE_TRACK_KIND, 459 name: `${threadName} [${tid}]`, 460 trackGroup: pUuid, 461 config: {utid} 462 })); 463 } 464 465 if (maxDepth !== undefined) { 466 addToTrackActions.push(Actions.addTrack({ 467 engineId: this.engineId, 468 kind: SLICE_TRACK_KIND, 469 name: `${threadName} [${tid}]`, 470 trackGroup: pUuid, 471 config: {upid, utid, maxDepth}, 472 })); 473 } 474 } 475 476 const logCount = await engine.query(`select count(1) from android_logs`); 477 if (logCount.columns[0].longValues![0] > 0) { 478 addToTrackActions.push(Actions.addTrack({ 479 engineId: this.engineId, 480 kind: ANDROID_LOGS_TRACK_KIND, 481 name: 'Android logs', 482 trackGroup: SCROLLING_TRACK_GROUP, 483 config: {} 484 })); 485 } 486 487 const allActions = 488 addSummaryTrackActions.concat(addTrackGroupActions, addToTrackActions); 489 globals.dispatchMultiple(allActions); 490 } 491 492 private async listThreads() { 493 this.updateStatus('Reading thread list'); 494 const sqlQuery = `select utid, tid, pid, thread.name, 495 ifnull( 496 case when length(process.name) > 0 then process.name else null end, 497 thread.name) 498 from thread left join process using(upid)`; 499 const threadRows = await assertExists(this.engine).query(sqlQuery); 500 const threads: ThreadDesc[] = []; 501 for (let i = 0; i < threadRows.numRecords; i++) { 502 const utid = threadRows.columns[0].longValues![i] as number; 503 const tid = threadRows.columns[1].longValues![i] as number; 504 const pid = threadRows.columns[2].longValues![i] as number; 505 const threadName = threadRows.columns[3].stringValues![i]; 506 const procName = threadRows.columns[4].stringValues![i]; 507 threads.push({utid, tid, threadName, pid, procName}); 508 } // for (record ...) 509 globals.publish('Threads', threads); 510 } 511 512 private async loadTimelineOverview(traceTime: TimeSpan) { 513 const engine = assertExists<Engine>(this.engine); 514 const numSteps = 100; 515 const stepSec = traceTime.duration / numSteps; 516 let hasSchedOverview = false; 517 for (let step = 0; step < numSteps; step++) { 518 this.updateStatus( 519 'Loading overview ' + 520 `${Math.round((step + 1) / numSteps * 1000) / 10}%`); 521 const startSec = traceTime.start + step * stepSec; 522 const startNs = Math.floor(startSec * 1e9); 523 const endSec = startSec + stepSec; 524 const endNs = Math.ceil(endSec * 1e9); 525 526 // Sched overview. 527 const schedRows = await engine.query( 528 `select sum(dur)/${stepSec}/1e9, cpu from sched ` + 529 `where ts >= ${startNs} and ts < ${endNs} and utid != 0 ` + 530 'group by cpu order by cpu'); 531 const schedData: {[key: string]: QuantizedLoad} = {}; 532 for (let i = 0; i < schedRows.numRecords; i++) { 533 const load = schedRows.columns[0].doubleValues![i]; 534 const cpu = schedRows.columns[1].longValues![i] as number; 535 schedData[cpu] = {startSec, endSec, load}; 536 hasSchedOverview = true; 537 } // for (record ...) 538 globals.publish('OverviewData', schedData); 539 } // for (step ...) 540 541 if (hasSchedOverview) { 542 return; 543 } 544 545 // Slices overview. 546 const traceStartNs = traceTime.start * 1e9; 547 const stepSecNs = stepSec * 1e9; 548 const sliceSummaryQuery = await engine.query( 549 `select bucket, upid, sum(utid_sum) / cast(${stepSecNs} as float) ` + 550 `as upid_sum from thread inner join ` + 551 `(select cast((ts - ${traceStartNs})/${stepSecNs} as int) as bucket, ` + 552 `sum(dur) as utid_sum, utid from slices group by bucket, utid) ` + 553 `using(utid) group by bucket, upid`); 554 555 const slicesData: {[key: string]: QuantizedLoad[]} = {}; 556 for (let i = 0; i < sliceSummaryQuery.numRecords; i++) { 557 const bucket = sliceSummaryQuery.columns[0].longValues![i] as number; 558 const upid = sliceSummaryQuery.columns[1].longValues![i] as number; 559 const load = sliceSummaryQuery.columns[2].doubleValues![i]; 560 561 const startSec = traceTime.start + stepSec * bucket; 562 const endSec = startSec + stepSec; 563 564 const upidStr = upid.toString(); 565 let loadArray = slicesData[upidStr]; 566 if (loadArray === undefined) { 567 loadArray = slicesData[upidStr] = []; 568 } 569 loadArray.push({startSec, endSec, load}); 570 } 571 globals.publish('OverviewData', slicesData); 572 } 573 574 private updateStatus(msg: string): void { 575 globals.dispatch(Actions.updateStatus({ 576 msg, 577 timestamp: Date.now() / 1000, 578 })); 579 } 580} 581