• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2010 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "chrome/browser/net/passive_log_collector.h"
6 
7 #include "base/compiler_specific.h"
8 #include "base/format_macros.h"
9 #include "base/string_util.h"
10 #include "net/url_request/url_request_netlog_params.h"
11 #include "testing/gtest/include/gtest/gtest.h"
12 
13 namespace {
14 
15 typedef PassiveLogCollector::RequestTracker RequestTracker;
16 typedef PassiveLogCollector::SourceInfoList SourceInfoList;
17 typedef PassiveLogCollector::SocketTracker SocketTracker;
18 typedef PassiveLogCollector::HttpStreamJobTracker HttpStreamJobTracker;
19 using net::NetLog;
20 
21 const NetLog::SourceType kSourceType = NetLog::SOURCE_NONE;
22 
MakeStartLogEntryWithURL(int source_id,const std::string & url)23 ChromeNetLog::Entry MakeStartLogEntryWithURL(int source_id,
24                                              const std::string& url) {
25   return ChromeNetLog::Entry(
26       0,
27       NetLog::TYPE_URL_REQUEST_START_JOB,
28       base::TimeTicks(),
29       NetLog::Source(kSourceType, source_id),
30       NetLog::PHASE_BEGIN,
31       new net::URLRequestStartEventParameters(GURL(url), "GET", 0, net::LOW));
32 }
33 
MakeStartLogEntry(int source_id)34 ChromeNetLog::Entry MakeStartLogEntry(int source_id) {
35   return MakeStartLogEntryWithURL(source_id,
36                                   StringPrintf("http://req%d", source_id));
37 }
38 
MakeEndLogEntry(int source_id)39 ChromeNetLog::Entry MakeEndLogEntry(int source_id) {
40   return ChromeNetLog::Entry(
41       0,
42       NetLog::TYPE_REQUEST_ALIVE,
43       base::TimeTicks(),
44       NetLog::Source(kSourceType, source_id),
45       NetLog::PHASE_END,
46       NULL);
47 }
48 
OrderBySourceID(const PassiveLogCollector::SourceInfo & a,const PassiveLogCollector::SourceInfo & b)49 bool OrderBySourceID(const PassiveLogCollector::SourceInfo& a,
50                      const PassiveLogCollector::SourceInfo& b) {
51   return a.source_id < b.source_id;
52 }
53 
GetLiveSources(const PassiveLogCollector::SourceTracker & tracker)54 SourceInfoList GetLiveSources(
55     const PassiveLogCollector::SourceTracker& tracker) {
56   SourceInfoList result = tracker.GetAllDeadOrAliveSources(true);
57   std::sort(result.begin(), result.end(), &OrderBySourceID);
58   return result;
59 }
60 
GetDeadSources(const PassiveLogCollector::SourceTracker & tracker)61 SourceInfoList GetDeadSources(
62     const PassiveLogCollector::SourceTracker& tracker) {
63   SourceInfoList result = tracker.GetAllDeadOrAliveSources(false);
64   std::sort(result.begin(), result.end(), &OrderBySourceID);
65   return result;
66 }
67 
68 static const int kMaxNumLoadLogEntries = 1;
69 
70 }  // namespace
71 
72 // Test that once the tracker contains a total maximum amount of data
73 // (graveyard + live requests), it resets itself to avoid growing unbounded.
TEST(RequestTrackerTest,DropsAfterMaximumSize)74 TEST(RequestTrackerTest, DropsAfterMaximumSize) {
75   RequestTracker tracker(NULL);
76 
77   // Fill the source tracker with as many sources as it can hold.
78   for (size_t i = 0; i < RequestTracker::kMaxNumSources; ++i)
79     tracker.OnAddEntry(MakeStartLogEntry(i));
80 
81   EXPECT_EQ(RequestTracker::kMaxNumSources, GetLiveSources(tracker).size());
82 
83   // Add 1 more -- this should cause it to exceed its expected peak, and
84   // therefore reset all of its data.
85   tracker.OnAddEntry(
86       MakeStartLogEntry(1 + RequestTracker::kMaxNumSources));
87 
88   EXPECT_EQ(1u, GetLiveSources(tracker).size());
89 }
90 
TEST(RequestTrackerTest,BasicBounded)91 TEST(RequestTrackerTest, BasicBounded) {
92   RequestTracker tracker(NULL);
93   EXPECT_EQ(0u, GetLiveSources(tracker).size());
94   EXPECT_EQ(0u, GetDeadSources(tracker).size());
95 
96   tracker.OnAddEntry(MakeStartLogEntry(1));
97   tracker.OnAddEntry(MakeStartLogEntry(2));
98   tracker.OnAddEntry(MakeStartLogEntry(3));
99   tracker.OnAddEntry(MakeStartLogEntry(4));
100   tracker.OnAddEntry(MakeStartLogEntry(5));
101 
102   SourceInfoList live_reqs = GetLiveSources(tracker);
103 
104   ASSERT_EQ(5u, live_reqs.size());
105   EXPECT_EQ("http://req1/", live_reqs[0].GetURL());
106   EXPECT_EQ("http://req2/", live_reqs[1].GetURL());
107   EXPECT_EQ("http://req3/", live_reqs[2].GetURL());
108   EXPECT_EQ("http://req4/", live_reqs[3].GetURL());
109   EXPECT_EQ("http://req5/", live_reqs[4].GetURL());
110 
111   tracker.OnAddEntry(MakeEndLogEntry(1));
112   tracker.OnAddEntry(MakeEndLogEntry(5));
113   tracker.OnAddEntry(MakeEndLogEntry(3));
114 
115   ASSERT_EQ(3u, GetDeadSources(tracker).size());
116 
117   live_reqs = GetLiveSources(tracker);
118 
119   ASSERT_EQ(2u, live_reqs.size());
120   EXPECT_EQ("http://req2/", live_reqs[0].GetURL());
121   EXPECT_EQ("http://req4/", live_reqs[1].GetURL());
122 }
123 
TEST(RequestTrackerTest,GraveyardBounded)124 TEST(RequestTrackerTest, GraveyardBounded) {
125   RequestTracker tracker(NULL);
126   EXPECT_EQ(0u, GetLiveSources(tracker).size());
127   EXPECT_EQ(0u, GetDeadSources(tracker).size());
128 
129   // Add twice as many requests as will fit in the graveyard.
130   for (size_t i = 0; i < RequestTracker::kMaxGraveyardSize * 2; ++i) {
131     tracker.OnAddEntry(MakeStartLogEntry(i));
132     tracker.OnAddEntry(MakeEndLogEntry(i));
133   }
134 
135   EXPECT_EQ(0u, GetLiveSources(tracker).size());
136 
137   // Check that only the last |kMaxGraveyardSize| requests are in-memory.
138 
139   SourceInfoList recent = GetDeadSources(tracker);
140 
141   ASSERT_EQ(RequestTracker::kMaxGraveyardSize, recent.size());
142 
143   for (size_t i = 0; i < RequestTracker::kMaxGraveyardSize; ++i) {
144     size_t req_number = i + RequestTracker::kMaxGraveyardSize;
145     std::string url = StringPrintf("http://req%" PRIuS "/", req_number);
146     EXPECT_EQ(url, recent[i].GetURL());
147   }
148 }
149 
150 // Check that we exclude "chrome://" URLs from being saved into the recent
151 // requests list (graveyard).
TEST(RequestTrackerTest,GraveyardIsFiltered)152 TEST(RequestTrackerTest, GraveyardIsFiltered) {
153   RequestTracker tracker(NULL);
154 
155   // This will be excluded.
156   std::string url1 = "chrome://dontcare/";
157   tracker.OnAddEntry(MakeStartLogEntryWithURL(1, url1));
158   tracker.OnAddEntry(MakeEndLogEntry(1));
159 
160   // This will be be added to graveyard.
161   std::string url2 = "chrome2://dontcare/";
162   tracker.OnAddEntry(MakeStartLogEntryWithURL(2, url2));
163   tracker.OnAddEntry(MakeEndLogEntry(2));
164 
165   // This will be be added to graveyard.
166   std::string url3 = "http://foo/";
167   tracker.OnAddEntry(MakeStartLogEntryWithURL(3, url3));
168   tracker.OnAddEntry(MakeEndLogEntry(3));
169 
170   ASSERT_EQ(2u, GetDeadSources(tracker).size());
171   EXPECT_EQ(url2, GetDeadSources(tracker)[0].GetURL());
172   EXPECT_EQ(url3, GetDeadSources(tracker)[1].GetURL());
173 }
174 
TEST(SpdySessionTracker,MovesToGraveyard)175 TEST(SpdySessionTracker, MovesToGraveyard) {
176   PassiveLogCollector::SpdySessionTracker tracker;
177   EXPECT_EQ(0u, GetLiveSources(tracker).size());
178   EXPECT_EQ(0u, GetDeadSources(tracker).size());
179 
180   ChromeNetLog::Entry begin(
181       0u,
182       NetLog::TYPE_SPDY_SESSION,
183       base::TimeTicks(),
184       NetLog::Source(NetLog::SOURCE_SPDY_SESSION, 1),
185       NetLog::PHASE_BEGIN,
186       NULL);
187 
188   tracker.OnAddEntry(begin);
189   EXPECT_EQ(1u, GetLiveSources(tracker).size());
190   EXPECT_EQ(0u, GetDeadSources(tracker).size());
191 
192   ChromeNetLog::Entry end(
193       0u,
194       NetLog::TYPE_SPDY_SESSION,
195       base::TimeTicks(),
196       NetLog::Source(NetLog::SOURCE_SPDY_SESSION, 1),
197       NetLog::PHASE_END,
198       NULL);
199 
200   tracker.OnAddEntry(end);
201   EXPECT_EQ(0u, GetLiveSources(tracker).size());
202   EXPECT_EQ(1u, GetDeadSources(tracker).size());
203 }
204 
205 // Test that when a SOURCE_HTTP_STREAM_JOB is connected to a SOURCE_URL_REQUEST
206 // (via the TYPE_HTTP_STREAM_REQUEST_BOUND_TO_JOB event), it holds a reference
207 // to the SOURCE_HTTP_STREAM_JOB preventing it from getting deleted as long as
208 // the SOURCE_URL_REQUEST is still around.
TEST(PassiveLogCollectorTest,HoldReferenceToDependentSource)209 TEST(PassiveLogCollectorTest, HoldReferenceToDependentSource) {
210   PassiveLogCollector log;
211 
212   EXPECT_EQ(0u, GetLiveSources(log.url_request_tracker_).size());
213   EXPECT_EQ(0u, GetLiveSources(log.http_stream_job_tracker_).size());
214 
215   uint32 next_id = 0;
216   NetLog::Source stream_job_source(NetLog::SOURCE_HTTP_STREAM_JOB, next_id++);
217   NetLog::Source url_request_source(NetLog::SOURCE_URL_REQUEST, next_id++);
218 
219   // Start a SOURCE_HTTP_STREAM_JOB.
220   log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_JOB,
221                  base::TimeTicks(),
222                  stream_job_source,
223                  NetLog::PHASE_BEGIN,
224                  NULL);
225 
226   EXPECT_EQ(0u, GetLiveSources(log.url_request_tracker_).size());
227   EXPECT_EQ(1u, GetLiveSources(log.http_stream_job_tracker_).size());
228 
229   // Start a SOURCE_URL_REQUEST.
230   log.OnAddEntry(NetLog::TYPE_REQUEST_ALIVE,
231                  base::TimeTicks(),
232                  url_request_source,
233                  NetLog::PHASE_BEGIN,
234                  NULL);
235 
236   // Check that there is no association between the SOURCE_URL_REQUEST and the
237   // SOURCE_HTTP_STREAM_JOB yet.
238   ASSERT_EQ(1u, GetLiveSources(log.url_request_tracker_).size());
239   {
240     PassiveLogCollector::SourceInfo info =
241         GetLiveSources(log.url_request_tracker_)[0];
242     EXPECT_EQ(0, info.reference_count);
243     EXPECT_EQ(0u, info.dependencies.size());
244   }
245   ASSERT_EQ(1u, GetLiveSources(log.http_stream_job_tracker_).size());
246   {
247     PassiveLogCollector::SourceInfo info =
248         GetLiveSources(log.http_stream_job_tracker_)[0];
249     EXPECT_EQ(0, info.reference_count);
250     EXPECT_EQ(0u, info.dependencies.size());
251   }
252 
253   // Associate the SOURCE_HTTP_STREAM_JOB with the SOURCE_URL_REQUEST.
254   log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_REQUEST_BOUND_TO_JOB,
255                  base::TimeTicks(),
256                  url_request_source,
257                  NetLog::PHASE_NONE,
258                  new net::NetLogSourceParameter("x", stream_job_source));
259 
260   // Check that an associate was made -- the SOURCE_URL_REQUEST should have
261   // added a reference to the SOURCE_HTTP_STREAM_JOB.
262   ASSERT_EQ(1u, GetLiveSources(log.url_request_tracker_).size());
263   {
264     PassiveLogCollector::SourceInfo info =
265         GetLiveSources(log.url_request_tracker_)[0];
266     EXPECT_EQ(0, info.reference_count);
267     EXPECT_EQ(1u, info.dependencies.size());
268     EXPECT_EQ(stream_job_source.id, info.dependencies[0].id);
269   }
270   ASSERT_EQ(1u, GetLiveSources(log.http_stream_job_tracker_).size());
271   {
272     PassiveLogCollector::SourceInfo info =
273         GetLiveSources(log.http_stream_job_tracker_)[0];
274     EXPECT_EQ(1, info.reference_count);
275     EXPECT_EQ(0u, info.dependencies.size());
276   }
277 
278   // Now end both |stream_job_source| and |url_request_source|. This sends them
279   // to deletion queue, and they will be deleted once space runs out.
280 
281   log.OnAddEntry(NetLog::TYPE_REQUEST_ALIVE,
282                  base::TimeTicks(),
283                  url_request_source,
284                  NetLog::PHASE_END,
285                  NULL);
286 
287   log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_JOB,
288                  base::TimeTicks(),
289                  stream_job_source,
290                  NetLog::PHASE_END,
291                  NULL);
292 
293   // Verify that both sources are in fact dead, and that |url_request_source|
294   // still holds a reference to |stream_job_source|.
295   ASSERT_EQ(0u, GetLiveSources(log.url_request_tracker_).size());
296   ASSERT_EQ(1u, GetDeadSources(log.url_request_tracker_).size());
297   {
298     PassiveLogCollector::SourceInfo info =
299         GetDeadSources(log.url_request_tracker_)[0];
300     EXPECT_EQ(0, info.reference_count);
301     EXPECT_EQ(1u, info.dependencies.size());
302     EXPECT_EQ(stream_job_source.id, info.dependencies[0].id);
303   }
304   EXPECT_EQ(0u, GetLiveSources(log.http_stream_job_tracker_).size());
305   ASSERT_EQ(1u, GetDeadSources(log.http_stream_job_tracker_).size());
306   {
307     PassiveLogCollector::SourceInfo info =
308         GetDeadSources(log.http_stream_job_tracker_)[0];
309     EXPECT_EQ(1, info.reference_count);
310     EXPECT_EQ(0u, info.dependencies.size());
311   }
312 
313   // Cycle through a bunch of SOURCE_HTTP_STREAM_JOB -- if it were not
314   // referenced, this loop will have deleted it.
315   for (size_t i = 0; i < HttpStreamJobTracker::kMaxGraveyardSize; ++i) {
316       log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_JOB,
317                      base::TimeTicks(),
318                      NetLog::Source(NetLog::SOURCE_HTTP_STREAM_JOB, next_id++),
319                      NetLog::PHASE_END,
320                      NULL);
321   }
322 
323   EXPECT_EQ(0u, GetLiveSources(log.http_stream_job_tracker_).size());
324   ASSERT_EQ(HttpStreamJobTracker::kMaxGraveyardSize + 1,
325             GetDeadSources(log.http_stream_job_tracker_).size());
326   {
327     PassiveLogCollector::SourceInfo info =
328         GetDeadSources(log.http_stream_job_tracker_)[0];
329     EXPECT_EQ(stream_job_source.id, info.source_id);
330     EXPECT_EQ(1, info.reference_count);
331     EXPECT_EQ(0u, info.dependencies.size());
332   }
333 
334   // Cycle through a bunch of SOURCE_URL_REQUEST -- this will cause
335   // |url_request_source| to be freed, which in turn should release the final
336   // reference to |stream_job_source| cause it to be freed as well.
337   for (size_t i = 0; i < RequestTracker::kMaxGraveyardSize; ++i) {
338     log.OnAddEntry(NetLog::TYPE_REQUEST_ALIVE,
339                    base::TimeTicks(),
340                    NetLog::Source(NetLog::SOURCE_URL_REQUEST, next_id++),
341                    NetLog::PHASE_END,
342                    NULL);
343   }
344 
345   EXPECT_EQ(0u, GetLiveSources(log.url_request_tracker_).size());
346   EXPECT_EQ(RequestTracker::kMaxGraveyardSize,
347             GetDeadSources(log.url_request_tracker_).size());
348 
349   EXPECT_EQ(0u, GetLiveSources(log.http_stream_job_tracker_).size());
350   EXPECT_EQ(HttpStreamJobTracker::kMaxGraveyardSize,
351             GetDeadSources(log.http_stream_job_tracker_).size());
352 }
353 
354 // Have a HTTP_STREAM_JOB hold a reference to a SOCKET. Then cause the SOCKET to
355 // get evicted (by exceeding maximum sources limit). Now the HTTP_STREAM_JOB is
356 // referencing a non-existant SOCKET. Lastly, evict the HTTP_STREAM_JOB so it
357 // tries to drop all of its references. Make sure that in releasing its
358 // non-existant reference it doesn't trip any DCHECKs.
TEST(PassiveLogCollectorTest,HoldReferenceToDeletedSource)359 TEST(PassiveLogCollectorTest, HoldReferenceToDeletedSource) {
360   PassiveLogCollector log;
361 
362   EXPECT_EQ(0u, GetLiveSources(log.http_stream_job_tracker_).size());
363   EXPECT_EQ(0u, GetLiveSources(log.socket_tracker_).size());
364 
365   uint32 next_id = 0;
366   NetLog::Source socket_source(NetLog::SOURCE_SOCKET, next_id++);
367   NetLog::Source stream_job_source(NetLog::SOURCE_HTTP_STREAM_JOB, next_id++);
368 
369   // Start a SOURCE_SOCKET.
370   log.OnAddEntry(NetLog::TYPE_SOCKET_ALIVE,
371                  base::TimeTicks(),
372                  socket_source,
373                  NetLog::PHASE_BEGIN,
374                  NULL);
375 
376   EXPECT_EQ(0u, GetLiveSources(log.http_stream_job_tracker_).size());
377   EXPECT_EQ(1u, GetLiveSources(log.socket_tracker_).size());
378 
379   // Start a SOURCE_HTTP_STREAM_JOB.
380   log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_JOB,
381                  base::TimeTicks(),
382                  stream_job_source,
383                  NetLog::PHASE_BEGIN,
384                  NULL);
385 
386   // Associate the SOURCE_SOCKET with the SOURCE_HTTP_STREAM_JOB.
387   log.OnAddEntry(NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET,
388                  base::TimeTicks(),
389                  stream_job_source,
390                  NetLog::PHASE_NONE,
391                  new net::NetLogSourceParameter("x", socket_source));
392 
393   // Check that an associate was made -- the SOURCE_HTTP_STREAM_JOB should have
394   // added a reference to the SOURCE_SOCKET.
395   ASSERT_EQ(1u, GetLiveSources(log.http_stream_job_tracker_).size());
396   {
397     PassiveLogCollector::SourceInfo info =
398         GetLiveSources(log.http_stream_job_tracker_)[0];
399     EXPECT_EQ(0, info.reference_count);
400     EXPECT_EQ(1u, info.dependencies.size());
401     EXPECT_EQ(socket_source.id, info.dependencies[0].id);
402   }
403   ASSERT_EQ(1u, GetLiveSources(log.socket_tracker_).size());
404   {
405     PassiveLogCollector::SourceInfo info =
406         GetLiveSources(log.socket_tracker_)[0];
407     EXPECT_EQ(1, info.reference_count);
408     EXPECT_EQ(0u, info.dependencies.size());
409   }
410 
411   // Add lots of sources to the socket tracker. This is just enough to cause
412   // the tracker to reach its peak, and reset all of its data as a safeguard.
413   for (size_t i = 0; i < SocketTracker::kMaxNumSources; ++i) {
414     log.OnAddEntry(NetLog::TYPE_SOCKET_ALIVE,
415                    base::TimeTicks(),
416                    NetLog::Source(NetLog::SOURCE_SOCKET, next_id++),
417                    NetLog::PHASE_BEGIN,
418                    NULL);
419   }
420   ASSERT_EQ(1u, GetLiveSources(log.socket_tracker_).size());
421 
422   // End the original request. Then saturate the graveyard with enough other
423   // requests to cause it to be deleted. Once that source is deleted, it will
424   // try to give up its reference to the SOCKET. However that socket_id no
425   // longer exists -- should not DCHECK().
426   log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_JOB,
427                  base::TimeTicks(),
428                  stream_job_source,
429                  NetLog::PHASE_END,
430                  NULL);
431   for (size_t i = 0; i < HttpStreamJobTracker::kMaxGraveyardSize; ++i) {
432     log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_JOB,
433                    base::TimeTicks(),
434                    NetLog::Source(NetLog::SOURCE_HTTP_STREAM_JOB, next_id++),
435                    NetLog::PHASE_END,
436                    NULL);
437   }
438   EXPECT_EQ(HttpStreamJobTracker::kMaxGraveyardSize,
439             GetDeadSources(log.http_stream_job_tracker_).size());
440 }
441 
442 // Regression test for http://crbug.com/58847
TEST(PassiveLogCollectorTest,ReleaseDependencyToUnreferencedSource)443 TEST(PassiveLogCollectorTest, ReleaseDependencyToUnreferencedSource) {
444   PassiveLogCollector log;
445 
446   // If these constants are weird, the test won't be testing the right thing.
447   EXPECT_LT(PassiveLogCollector::HttpStreamJobTracker::kMaxGraveyardSize,
448             PassiveLogCollector::HttpStreamJobTracker::kMaxNumSources);
449 
450   // Add a "reference" to a non-existant source (sourceID=1263 does not exist).
451   scoped_refptr<net::NetLog::EventParameters> params =
452       new net::NetLogSourceParameter(
453           "source_dependency",
454           net::NetLog::Source(net::NetLog::SOURCE_SOCKET, 1263));
455   log.OnAddEntry(net::NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET,
456                  base::TimeTicks(),
457                  net::NetLog::Source(net::NetLog::SOURCE_HTTP_STREAM_JOB, 1706),
458                  net::NetLog::PHASE_NONE,
459                  params);
460 
461   // At this point source 1706 has noted 1263 as a dependency. However the
462   // reference count for 1263 was not adjusted since it doesn't actually exist.
463 
464   // Move source 1706 to the graveyard.
465   log.OnAddEntry(net::NetLog::TYPE_HTTP_STREAM_JOB,
466                  base::TimeTicks(),
467                  net::NetLog::Source(net::NetLog::SOURCE_HTTP_STREAM_JOB, 1706),
468                  net::NetLog::PHASE_END,
469                  NULL);
470 
471   // Now create a source entry for 1263, such that it is unreferenced and
472   // waiting to be garbage collected.
473   log.OnAddEntry(net::NetLog::TYPE_SOCKET_ALIVE,
474                  base::TimeTicks(),
475                  net::NetLog::Source(net::NetLog::SOURCE_SOCKET, 1263),
476                  net::NetLog::PHASE_END, NULL);
477 
478   // Add kMaxGraveyardSize  unreferenced HTTP_STREAM_JOBS, so the circular
479   // buffer containing source 1706. After adding kMaxGraveyardSize - 1 the
480   // buffer will be full. Now when we add one more more source it will now evict
481   // the oldest item, which is 1706. In doing so, 1706 will try to release the
482   // reference it *thinks* it has on 1263. However 1263 has a reference count
483   // of 0 and is already in a graveyard.
484   for (size_t i = 0;
485        i < PassiveLogCollector::HttpStreamJobTracker::kMaxGraveyardSize; ++i) {
486     log.OnAddEntry(net::NetLog::TYPE_HTTP_STREAM_JOB,
487                    base::TimeTicks(),
488                    net::NetLog::Source(net::NetLog::SOURCE_HTTP_STREAM_JOB, i),
489                    net::NetLog::PHASE_END,
490                    NULL);
491   }
492 
493   // To pass, this should simply not have DCHECK-ed above.
494 }
495