1 2<!DOCTYPE html> 3<html> 4 <head> 5 <meta charset="UTF-8" /> 6 <title>window.performance User Timing measure() method is working properly</title> 7 <link rel="author" title="Microsoft" href="http://www.microsoft.com/" /> 8 <link rel="help" href="https://w3c.github.io/user-timing/#dom-performance-measure"/> 9 <script src="/resources/testharness.js"></script> 10 <script src="/resources/testharnessreport.js"></script> 11 <script src="/common/performance-timeline-utils.js"></script> 12 <script src="resources/webperftestharness.js"></script> 13 14 <script> 15 // test data 16 var startMarkName = "mark_start"; 17 var startMarkValue; 18 var endMarkName = "mark_end"; 19 var endMarkValue; 20 var measures; 21 var testThreshold = 20; 22 23 // test measures 24 var measureTestDelay = 200; 25 var TEST_MEASURES = 26 [ 27 { 28 name: "measure_no_start_no_end", 29 startMark: undefined, 30 endMark: undefined, 31 startTime: undefined, 32 duration: undefined, 33 entryType: "measure", 34 entryMatch: undefined, 35 order: undefined, 36 found: false 37 }, 38 { 39 name: "measure_start_no_end", 40 startMark: "mark_start", 41 endMark: undefined, 42 startTime: undefined, 43 duration: undefined, 44 entryType: "measure", 45 entryMatch: undefined, 46 order: undefined, 47 found: false 48 }, 49 { 50 name: "measure_start_end", 51 startMark: "mark_start", 52 endMark: "mark_end", 53 startTime: undefined, 54 duration: undefined, 55 entryType: "measure", 56 entryMatch: undefined, 57 order: undefined, 58 found: false 59 }, 60 { 61 name: "measure_no_start_end", 62 startMark: undefined, 63 endMark: "mark_end", 64 startTime: undefined, 65 duration: undefined, 66 entryType: "measure", 67 entryMatch: undefined, 68 order: undefined, 69 found: false 70 }, 71 // intentional duplicate of the first measure, used to confirm names can be re-used 72 { 73 name: "measure_no_start_no_end", 74 startMark: undefined, 75 endMark: undefined, 76 startTime: undefined, 77 duration: undefined, 78 entryType: "measure", 79 entryMatch: undefined, 80 order: undefined, 81 found: false 82 } 83 ]; 84 // the index of the duplicate "measure_no_start_no_end" 85 const duplicate_index = TEST_MEASURES.map(m=>m.name).lastIndexOf('measure_no_start_no_end'); 86 87 setup({explicit_done: true}); 88 89 test_namespace(); 90 91 function onload_test() 92 { 93 // test for existence of User Timing and Performance Timeline interface 94 if (!has_required_interfaces()) 95 { 96 test_true(false, 97 "The User Timing and Performance Timeline interfaces, which are required for this test, " + 98 "are defined."); 99 100 done(); 101 } 102 else 103 { 104 // create the start mark for the test measures 105 window.performance.mark(startMarkName); 106 107 // get the start mark's value 108 startMarkValue = window.performance.getEntriesByName(startMarkName)[0].startTime; 109 110 // create the test end mark using the test delay; this will allow for a significant difference between 111 // the mark values that should be represented in the duration of measures using these marks 112 step_timeout(measure_test_cb, measureTestDelay); 113 } 114 } 115 116 function measure_test_cb() 117 { 118 // create the end mark for the test measures 119 window.performance.mark(endMarkName); 120 121 // get the end mark's value 122 endMarkValue = window.performance.getEntriesByName(endMarkName)[0].startTime; 123 124 // loop through all measure scenarios and create the corresponding measures 125 for (var i in TEST_MEASURES) 126 { 127 var scenario = TEST_MEASURES[i]; 128 129 if (scenario.startMark == undefined && scenario.endMark == undefined) 130 { 131 // both startMark and endMark are undefined, don't provide either parameters 132 window.performance.measure(scenario.name); 133 134 // when startMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding 135 // to the navigationStart attribute with a timebase of the same attribute is used; this is 136 // equivalent to 0 137 scenario.startTime = 0; 138 139 // when endMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding to 140 // the current time with a timebase of the navigationStart attribute is used 141 scenario.duration = (new Date()) - window.performance.timing.navigationStart; 142 } 143 else if (scenario.startMark != undefined && scenario.endMark == undefined) 144 { 145 // only startMark is defined, provide startMark and don't provide endMark 146 window.performance.measure(scenario.name, scenario.startMark); 147 148 // when startMark is provided to the measure() call, the value of the mark whose name is 149 // provided is used for the startMark 150 scenario.startTime = startMarkValue; 151 152 // when endMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding to 153 // the current time with a timebase of the navigationStart attribute is used 154 scenario.duration = window.performance.now() - 155 startMarkValue; 156 } 157 else if (scenario.startMark != undefined && scenario.endMark != undefined) 158 { 159 // both startMark and endMark are defined, provide both parameters 160 window.performance.measure(scenario.name, scenario.startMark, scenario.endMark); 161 162 // when startMark is provided to the measure() call, the value of the mark whose name is 163 // provided is used for the startMark 164 scenario.startTime = startMarkValue; 165 166 // when endMark is provided to the measure() call, the value of the mark whose name is 167 // provided is used for the endMark 168 scenario.duration = endMarkValue - startMarkValue; 169 } 170 else if (scenario.startMark == undefined && scenario.endMark != undefined) 171 { 172 // endMark is defined but startMark is undefined, provide both parameters 173 window.performance.measure(scenario.name, scenario.startMark, scenario.endMark); 174 175 // when startMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding 176 // to the navigationStart attribute with a timebase of the same attribute is used; this is 177 // equivalent to 0 178 scenario.startTime = 0; 179 180 // when endMark is provided to the measure() call, the value of the mark whose name is 181 // provided is used for the endMark 182 scenario.duration = endMarkValue; 183 } else 184 { 185 test_true(false, 'Test measure scenario unhandled'); 186 } 187 } 188 189 // test that expected measures are returned by getEntriesByName 190 for (var i in TEST_MEASURES) 191 { 192 entries = window.performance.getEntriesByName(TEST_MEASURES[i].name); 193 // for all test measures, the test will be validate the test measure against the first entry returned 194 // by getEntriesByName(), except for the last measure, where since it is a duplicate measure, the test 195 // will validate it against the second entry returned by getEntriesByName() 196 test_measure(entries[(i == duplicate_index ? 1 : 0)], 197 "window.performance.getEntriesByName(\"" + TEST_MEASURES[i].name + "\")[" + 198 (i == duplicate_index ? 1 : 0) + "]", 199 TEST_MEASURES[i].name, 200 TEST_MEASURES[i].startTime, 201 TEST_MEASURES[i].duration); 202 TEST_MEASURES[i].entryMatch = entries[(i == duplicate_index ? 1 : 0)]; 203 } 204 205 // test that expected measures are returned by getEntriesByName with the entryType parameter provided 206 for (var i in TEST_MEASURES) 207 { 208 entries = window.performance.getEntriesByName(TEST_MEASURES[i].name, "measure"); 209 210 test_true(match_entries(entries[(i == duplicate_index ? 1 : 0)], TEST_MEASURES[i].entryMatch), 211 "window.performance.getEntriesByName(\"" + TEST_MEASURES[i].name + "\", \"measure\")[" + 212 (i == duplicate_index ? 1 : 0) + "] returns an object containing the \"" + TEST_MEASURES[i].name + 213 "\" measure in the correct order, and its value matches the \"" + TEST_MEASURES[i].name + 214 "\" measure returned by window.performance.getEntriesByName(\"" + TEST_MEASURES[i].name + 215 "\")"); 216 } 217 218 // test that expected measures are returned by getEntries 219 entries = get_test_entries(window.performance.getEntries(), "measure"); 220 221 test_measure_list(entries, "window.performance.getEntries()", TEST_MEASURES); 222 223 // test that expected measures are returned by getEntriesByType 224 entries = window.performance.getEntriesByType("measure"); 225 226 test_measure_list(entries, "window.performance.getEntriesByType(\"measure\")", TEST_MEASURES); 227 228 done(); 229 } 230 231 function match_entries(entry1, entry2, threshold) 232 { 233 if (threshold == undefined) 234 { 235 threshold = 0; 236 } 237 238 var pass = true; 239 240 // match name 241 pass = pass && (entry1.name == entry2.name); 242 243 // match startTime 244 pass = pass && (Math.abs(entry1.startTime - entry2.startTime) <= testThreshold); 245 246 // match entryType 247 pass = pass && (entry1.entryType == entry2.entryType); 248 249 // match duration 250 pass = pass && (Math.abs(entry1.duration - entry2.duration) <= testThreshold); 251 252 return pass; 253 } 254 255 function test_measure(measureEntry, measureEntryCommand, expectedName, expectedStartTime, expectedDuration) 256 { 257 // test name 258 test_true(measureEntry.name == expectedName, measureEntryCommand + ".name == \"" + expectedName + "\""); 259 260 // test startTime; since for a mark, the startTime is always equal to a mark's value or the value of a 261 // navigation timing attribute, the actual startTime should match the expected value exactly 262 test_true(Math.abs(measureEntry.startTime - expectedStartTime) == 0, 263 measureEntryCommand + ".startTime is correct"); 264 265 // test entryType 266 test_true(measureEntry.entryType == "measure", measureEntryCommand + ".entryType == \"measure\""); 267 268 // test duration, allow for an acceptable threshold in the difference between the actual duration and the 269 // expected value for the duration 270 test_true(Math.abs(measureEntry.duration - expectedDuration) <= testThreshold, measureEntryCommand + 271 ".duration is approximately correct (up to " + testThreshold + "ms difference allowed)"); 272 } 273 274 function test_measure_list(measureEntryList, measureEntryListCommand, measureScenarios) 275 { 276 // give all entries a "found" property that can be set to ensure it isn't tested twice 277 for (var i in measureEntryList) 278 { 279 measureEntryList[i].found = false; 280 } 281 282 for (var i in measureScenarios) 283 { 284 measureScenarios[i].found = false; 285 286 for (var j in measureEntryList) 287 { 288 if (match_entries(measureEntryList[j], measureScenarios[i]) && !measureEntryList[j].found) 289 { 290 test_true(match_entries(measureEntryList[j], measureScenarios[i].entryMatch), 291 measureEntryListCommand + " returns an object containing the \"" + 292 measureScenarios[i].name + "\" measure, and it's value matches the measure " + 293 "returned by window.performance.getEntriesByName(\"" + measureScenarios[i].name + 294 "\")[" + (i == duplicate_index ? 1 : 0) + "]."); 295 296 measureEntryList[j].found = true; 297 measureScenarios[i].found = true; 298 break; 299 } 300 } 301 302 if (!measureScenarios[i].found) 303 { 304 test_true(false, 305 measureEntryListCommand + " returns an object containing the \"" + 306 measureScenarios[i].name + "\" measure."); 307 } 308 } 309 310 // verify order of output of getEntriesByType 311 var startTimeCurr = 0; 312 var pass = true; 313 for (var i in measureEntryList) 314 { 315 if (measureEntryList[i].startTime < startTimeCurr) 316 { 317 pass = false; 318 } 319 startTimeCurr = measureEntryList[i].startTime; 320 } 321 test_true(pass, 322 measureEntryListCommand + " returns an object containing all test " + 323 "measures in order."); 324 } 325 326 function get_test_entries(entryList, entryType) 327 { 328 var testEntries = new Array(); 329 330 // filter entryList 331 for (var i in entryList) 332 { 333 if (entryList[i].entryType == entryType) 334 { 335 testEntries.push(entryList[i]); 336 } 337 } 338 339 return testEntries; 340 } 341 </script> 342 </head> 343 <body onload="onload_test();"> 344 <h1>Description</h1> 345 <p>This test validates that the performance.measure() method is working properly. This test creates the 346 following measures to test this method: 347 <ul> 348 <li>"measure_no_start_no_end": created using a measure() call without a startMark or endMark 349 provided</li> 350 <li>"measure_start_no_end": created using a measure() call with only the startMark provided</li> 351 <li>"measure_start_end": created using a measure() call with both a startMark or endMark provided</li> 352 <li>"measure_no_start_end": created using a measure() call with only the endMark provided</li> 353 <li>"measure_no_start_no_end": duplicate of the first measure, used to confirm names can be re-used</li> 354 </ul> 355 After creating each measure, the existence of these measures is validated by calling 356 performance.getEntriesByName() (both with and without the entryType parameter provided), 357 performance.getEntriesByType(), and performance.getEntries() 358 </p> 359 360 <div id="log"></div> 361 </body> 362</html> 363