• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2023 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Runtime -> tracer API.
6
7package runtime
8
9import (
10	"internal/runtime/atomic"
11	_ "unsafe" // for go:linkname
12)
13
14// gTraceState is per-G state for the tracer.
15type gTraceState struct {
16	traceSchedResourceState
17}
18
19// reset resets the gTraceState for a new goroutine.
20func (s *gTraceState) reset() {
21	s.seq = [2]uint64{}
22	// N.B. s.statusTraced is managed and cleared separately.
23}
24
25// mTraceState is per-M state for the tracer.
26type mTraceState struct {
27	seqlock atomic.Uintptr // seqlock indicating that this M is writing to a trace buffer.
28	buf     [2]*traceBuf   // Per-M traceBuf for writing. Indexed by trace.gen%2.
29	link    *m             // Snapshot of alllink or freelink.
30}
31
32// pTraceState is per-P state for the tracer.
33type pTraceState struct {
34	traceSchedResourceState
35
36	// mSyscallID is the ID of the M this was bound to before entering a syscall.
37	mSyscallID int64
38
39	// maySweep indicates the sweep events should be traced.
40	// This is used to defer the sweep start event until a span
41	// has actually been swept.
42	maySweep bool
43
44	// inSweep indicates that at least one sweep event has been traced.
45	inSweep bool
46
47	// swept and reclaimed track the number of bytes swept and reclaimed
48	// by sweeping in the current sweep loop (while maySweep was true).
49	swept, reclaimed uintptr
50}
51
52// traceLockInit initializes global trace locks.
53func traceLockInit() {
54	// Sharing a lock rank here is fine because they should never be accessed
55	// together. If they are, we want to find out immediately.
56	lockInit(&trace.stringTab[0].lock, lockRankTraceStrings)
57	lockInit(&trace.stringTab[0].tab.mem.lock, lockRankTraceStrings)
58	lockInit(&trace.stringTab[1].lock, lockRankTraceStrings)
59	lockInit(&trace.stringTab[1].tab.mem.lock, lockRankTraceStrings)
60	lockInit(&trace.stackTab[0].tab.mem.lock, lockRankTraceStackTab)
61	lockInit(&trace.stackTab[1].tab.mem.lock, lockRankTraceStackTab)
62	lockInit(&trace.typeTab[0].tab.mem.lock, lockRankTraceTypeTab)
63	lockInit(&trace.typeTab[1].tab.mem.lock, lockRankTraceTypeTab)
64	lockInit(&trace.lock, lockRankTrace)
65}
66
67// lockRankMayTraceFlush records the lock ranking effects of a
68// potential call to traceFlush.
69//
70// nosplit because traceAcquire is nosplit.
71//
72//go:nosplit
73func lockRankMayTraceFlush() {
74	lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock))
75}
76
77// traceBlockReason is an enumeration of reasons a goroutine might block.
78// This is the interface the rest of the runtime uses to tell the
79// tracer why a goroutine blocked. The tracer then propagates this information
80// into the trace however it sees fit.
81//
82// Note that traceBlockReasons should not be compared, since reasons that are
83// distinct by name may *not* be distinct by value.
84type traceBlockReason uint8
85
86const (
87	traceBlockGeneric traceBlockReason = iota
88	traceBlockForever
89	traceBlockNet
90	traceBlockSelect
91	traceBlockCondWait
92	traceBlockSync
93	traceBlockChanSend
94	traceBlockChanRecv
95	traceBlockGCMarkAssist
96	traceBlockGCSweep
97	traceBlockSystemGoroutine
98	traceBlockPreempted
99	traceBlockDebugCall
100	traceBlockUntilGCEnds
101	traceBlockSleep
102	traceBlockGCWeakToStrongWait
103)
104
105var traceBlockReasonStrings = [...]string{
106	traceBlockGeneric:            "unspecified",
107	traceBlockForever:            "forever",
108	traceBlockNet:                "network",
109	traceBlockSelect:             "select",
110	traceBlockCondWait:           "sync.(*Cond).Wait",
111	traceBlockSync:               "sync",
112	traceBlockChanSend:           "chan send",
113	traceBlockChanRecv:           "chan receive",
114	traceBlockGCMarkAssist:       "GC mark assist wait for work",
115	traceBlockGCSweep:            "GC background sweeper wait",
116	traceBlockSystemGoroutine:    "system goroutine wait",
117	traceBlockPreempted:          "preempted",
118	traceBlockDebugCall:          "wait for debug call",
119	traceBlockUntilGCEnds:        "wait until GC ends",
120	traceBlockSleep:              "sleep",
121	traceBlockGCWeakToStrongWait: "GC weak to strong wait",
122}
123
124// traceGoStopReason is an enumeration of reasons a goroutine might yield.
125//
126// Note that traceGoStopReasons should not be compared, since reasons that are
127// distinct by name may *not* be distinct by value.
128type traceGoStopReason uint8
129
130const (
131	traceGoStopGeneric traceGoStopReason = iota
132	traceGoStopGoSched
133	traceGoStopPreempted
134)
135
136var traceGoStopReasonStrings = [...]string{
137	traceGoStopGeneric:   "unspecified",
138	traceGoStopGoSched:   "runtime.Gosched",
139	traceGoStopPreempted: "preempted",
140}
141
142// traceEnabled returns true if the trace is currently enabled.
143//
144//go:nosplit
145func traceEnabled() bool {
146	return trace.enabled
147}
148
149// traceAllocFreeEnabled returns true if the trace is currently enabled
150// and alloc/free events are also enabled.
151//
152//go:nosplit
153func traceAllocFreeEnabled() bool {
154	return trace.enabledWithAllocFree
155}
156
157// traceShuttingDown returns true if the trace is currently shutting down.
158func traceShuttingDown() bool {
159	return trace.shutdown.Load()
160}
161
162// traceLocker represents an M writing trace events. While a traceLocker value
163// is valid, the tracer observes all operations on the G/M/P or trace events being
164// written as happening atomically.
165type traceLocker struct {
166	mp  *m
167	gen uintptr
168}
169
170// debugTraceReentrancy checks if the trace is reentrant.
171//
172// This is optional because throwing in a function makes it instantly
173// not inlineable, and we want traceAcquire to be inlineable for
174// low overhead when the trace is disabled.
175const debugTraceReentrancy = false
176
177// traceAcquire prepares this M for writing one or more trace events.
178//
179// nosplit because it's called on the syscall path when stack movement is forbidden.
180//
181//go:nosplit
182func traceAcquire() traceLocker {
183	if !traceEnabled() {
184		return traceLocker{}
185	}
186	return traceAcquireEnabled()
187}
188
189// traceTryAcquire is like traceAcquire, but may return an invalid traceLocker even
190// if tracing is enabled. For example, it will return !ok if traceAcquire is being
191// called with an active traceAcquire on the M (reentrant locking). This exists for
192// optimistically emitting events in the few contexts where tracing is now allowed.
193//
194// nosplit for alignment with traceTryAcquire, so it can be used in the
195// same contexts.
196//
197//go:nosplit
198func traceTryAcquire() traceLocker {
199	if !traceEnabled() {
200		return traceLocker{}
201	}
202	return traceTryAcquireEnabled()
203}
204
205// traceAcquireEnabled is the traceEnabled path for traceAcquire. It's explicitly
206// broken out to make traceAcquire inlineable to keep the overhead of the tracer
207// when it's disabled low.
208//
209// nosplit because it's called by traceAcquire, which is nosplit.
210//
211//go:nosplit
212func traceAcquireEnabled() traceLocker {
213	// Any time we acquire a traceLocker, we may flush a trace buffer. But
214	// buffer flushes are rare. Record the lock edge even if it doesn't happen
215	// this time.
216	lockRankMayTraceFlush()
217
218	// Prevent preemption.
219	mp := acquirem()
220
221	// Acquire the trace seqlock. This prevents traceAdvance from moving forward
222	// until all Ms are observed to be outside of their seqlock critical section.
223	//
224	// Note: The seqlock is mutated here and also in traceCPUSample. If you update
225	// usage of the seqlock here, make sure to also look at what traceCPUSample is
226	// doing.
227	seq := mp.trace.seqlock.Add(1)
228	if debugTraceReentrancy && seq%2 != 1 {
229		throw("bad use of trace.seqlock or tracer is reentrant")
230	}
231
232	// N.B. This load of gen appears redundant with the one in traceEnabled.
233	// However, it's very important that the gen we use for writing to the trace
234	// is acquired under a traceLocker so traceAdvance can make sure no stale
235	// gen values are being used.
236	//
237	// Because we're doing this load again, it also means that the trace
238	// might end up being disabled when we load it. In that case we need to undo
239	// what we did and bail.
240	gen := trace.gen.Load()
241	if gen == 0 {
242		mp.trace.seqlock.Add(1)
243		releasem(mp)
244		return traceLocker{}
245	}
246	return traceLocker{mp, gen}
247}
248
249// traceTryAcquireEnabled is like traceAcquireEnabled but may return an invalid
250// traceLocker under some conditions. See traceTryAcquire for more details.
251//
252// nosplit for alignment with traceAcquireEnabled, so it can be used in the
253// same contexts.
254//
255//go:nosplit
256func traceTryAcquireEnabled() traceLocker {
257	// Any time we acquire a traceLocker, we may flush a trace buffer. But
258	// buffer flushes are rare. Record the lock edge even if it doesn't happen
259	// this time.
260	lockRankMayTraceFlush()
261
262	// Check if we're already locked. If so, return an invalid traceLocker.
263	if getg().m.trace.seqlock.Load()%2 == 1 {
264		return traceLocker{}
265	}
266	return traceAcquireEnabled()
267}
268
269// ok returns true if the traceLocker is valid (i.e. tracing is enabled).
270//
271// nosplit because it's called on the syscall path when stack movement is forbidden.
272//
273//go:nosplit
274func (tl traceLocker) ok() bool {
275	return tl.gen != 0
276}
277
278// traceRelease indicates that this M is done writing trace events.
279//
280// nosplit because it's called on the syscall path when stack movement is forbidden.
281//
282//go:nosplit
283func traceRelease(tl traceLocker) {
284	seq := tl.mp.trace.seqlock.Add(1)
285	if debugTraceReentrancy && seq%2 != 0 {
286		print("runtime: seq=", seq, "\n")
287		throw("bad use of trace.seqlock")
288	}
289	releasem(tl.mp)
290}
291
292// traceExitingSyscall marks a goroutine as exiting the syscall slow path.
293//
294// Must be paired with a traceExitedSyscall call.
295func traceExitingSyscall() {
296	trace.exitingSyscall.Add(1)
297}
298
299// traceExitedSyscall marks a goroutine as having exited the syscall slow path.
300func traceExitedSyscall() {
301	trace.exitingSyscall.Add(-1)
302}
303
304// Gomaxprocs emits a ProcsChange event.
305func (tl traceLocker) Gomaxprocs(procs int32) {
306	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvProcsChange, traceArg(procs), tl.stack(1))
307}
308
309// ProcStart traces a ProcStart event.
310//
311// Must be called with a valid P.
312func (tl traceLocker) ProcStart() {
313	pp := tl.mp.p.ptr()
314	// Procs are typically started within the scheduler when there is no user goroutine. If there is a user goroutine,
315	// it must be in _Gsyscall because the only time a goroutine is allowed to have its Proc moved around from under it
316	// is during a syscall.
317	tl.eventWriter(traceGoSyscall, traceProcIdle).commit(traceEvProcStart, traceArg(pp.id), pp.trace.nextSeq(tl.gen))
318}
319
320// ProcStop traces a ProcStop event.
321func (tl traceLocker) ProcStop(pp *p) {
322	// The only time a goroutine is allowed to have its Proc moved around
323	// from under it is during a syscall.
324	tl.eventWriter(traceGoSyscall, traceProcRunning).commit(traceEvProcStop)
325}
326
327// GCActive traces a GCActive event.
328//
329// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
330// easily and only depends on where it's currently called.
331func (tl traceLocker) GCActive() {
332	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCActive, traceArg(trace.seqGC))
333	// N.B. Only one GC can be running at a time, so this is naturally
334	// serialized by the caller.
335	trace.seqGC++
336}
337
338// GCStart traces a GCBegin event.
339//
340// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
341// easily and only depends on where it's currently called.
342func (tl traceLocker) GCStart() {
343	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCBegin, traceArg(trace.seqGC), tl.stack(3))
344	// N.B. Only one GC can be running at a time, so this is naturally
345	// serialized by the caller.
346	trace.seqGC++
347}
348
349// GCDone traces a GCEnd event.
350//
351// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
352// easily and only depends on where it's currently called.
353func (tl traceLocker) GCDone() {
354	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCEnd, traceArg(trace.seqGC))
355	// N.B. Only one GC can be running at a time, so this is naturally
356	// serialized by the caller.
357	trace.seqGC++
358}
359
360// STWStart traces a STWBegin event.
361func (tl traceLocker) STWStart(reason stwReason) {
362	// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
363	// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
364	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWBegin, tl.string(reason.String()), tl.stack(2))
365}
366
367// STWDone traces a STWEnd event.
368func (tl traceLocker) STWDone() {
369	// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
370	// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
371	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWEnd)
372}
373
374// GCSweepStart prepares to trace a sweep loop. This does not
375// emit any events until traceGCSweepSpan is called.
376//
377// GCSweepStart must be paired with traceGCSweepDone and there
378// must be no preemption points between these two calls.
379//
380// Must be called with a valid P.
381func (tl traceLocker) GCSweepStart() {
382	// Delay the actual GCSweepBegin event until the first span
383	// sweep. If we don't sweep anything, don't emit any events.
384	pp := tl.mp.p.ptr()
385	if pp.trace.maySweep {
386		throw("double traceGCSweepStart")
387	}
388	pp.trace.maySweep, pp.trace.swept, pp.trace.reclaimed = true, 0, 0
389}
390
391// GCSweepSpan traces the sweep of a single span. If this is
392// the first span swept since traceGCSweepStart was called, this
393// will emit a GCSweepBegin event.
394//
395// This may be called outside a traceGCSweepStart/traceGCSweepDone
396// pair; however, it will not emit any trace events in this case.
397//
398// Must be called with a valid P.
399func (tl traceLocker) GCSweepSpan(bytesSwept uintptr) {
400	pp := tl.mp.p.ptr()
401	if pp.trace.maySweep {
402		if pp.trace.swept == 0 {
403			tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepBegin, tl.stack(1))
404			pp.trace.inSweep = true
405		}
406		pp.trace.swept += bytesSwept
407	}
408}
409
410// GCSweepDone finishes tracing a sweep loop. If any memory was
411// swept (i.e. traceGCSweepSpan emitted an event) then this will emit
412// a GCSweepEnd event.
413//
414// Must be called with a valid P.
415func (tl traceLocker) GCSweepDone() {
416	pp := tl.mp.p.ptr()
417	if !pp.trace.maySweep {
418		throw("missing traceGCSweepStart")
419	}
420	if pp.trace.inSweep {
421		tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepEnd, traceArg(pp.trace.swept), traceArg(pp.trace.reclaimed))
422		pp.trace.inSweep = false
423	}
424	pp.trace.maySweep = false
425}
426
427// GCMarkAssistStart emits a MarkAssistBegin event.
428func (tl traceLocker) GCMarkAssistStart() {
429	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistBegin, tl.stack(1))
430}
431
432// GCMarkAssistDone emits a MarkAssistEnd event.
433func (tl traceLocker) GCMarkAssistDone() {
434	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistEnd)
435}
436
437// GoCreate emits a GoCreate event.
438func (tl traceLocker) GoCreate(newg *g, pc uintptr, blocked bool) {
439	newg.trace.setStatusTraced(tl.gen)
440	ev := traceEvGoCreate
441	if blocked {
442		ev = traceEvGoCreateBlocked
443	}
444	tl.eventWriter(traceGoRunning, traceProcRunning).commit(ev, traceArg(newg.goid), tl.startPC(pc), tl.stack(2))
445}
446
447// GoStart emits a GoStart event.
448//
449// Must be called with a valid P.
450func (tl traceLocker) GoStart() {
451	gp := getg().m.curg
452	pp := gp.m.p
453	w := tl.eventWriter(traceGoRunnable, traceProcRunning)
454	w = w.write(traceEvGoStart, traceArg(gp.goid), gp.trace.nextSeq(tl.gen))
455	if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
456		w = w.write(traceEvGoLabel, trace.markWorkerLabels[tl.gen%2][pp.ptr().gcMarkWorkerMode])
457	}
458	w.end()
459}
460
461// GoEnd emits a GoDestroy event.
462//
463// TODO(mknyszek): Rename this to GoDestroy.
464func (tl traceLocker) GoEnd() {
465	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoDestroy)
466}
467
468// GoSched emits a GoStop event with a GoSched reason.
469func (tl traceLocker) GoSched() {
470	tl.GoStop(traceGoStopGoSched)
471}
472
473// GoPreempt emits a GoStop event with a GoPreempted reason.
474func (tl traceLocker) GoPreempt() {
475	tl.GoStop(traceGoStopPreempted)
476}
477
478// GoStop emits a GoStop event with the provided reason.
479func (tl traceLocker) GoStop(reason traceGoStopReason) {
480	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(1))
481}
482
483// GoPark emits a GoBlock event with the provided reason.
484//
485// TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
486// that we have both, and waitReason is way more descriptive.
487func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
488	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip))
489}
490
491// GoUnpark emits a GoUnblock event.
492func (tl traceLocker) GoUnpark(gp *g, skip int) {
493	// Emit a GoWaiting status if necessary for the unblocked goroutine.
494	w := tl.eventWriter(traceGoRunning, traceProcRunning)
495	// Careful: don't use the event writer. We never want status or in-progress events
496	// to trigger more in-progress events.
497	w.w = emitUnblockStatus(w.w, gp, tl.gen)
498	w.commit(traceEvGoUnblock, traceArg(gp.goid), gp.trace.nextSeq(tl.gen), tl.stack(skip))
499}
500
501// GoCoroswitch emits a GoSwitch event. If destroy is true, the calling goroutine
502// is simultaneously being destroyed.
503func (tl traceLocker) GoSwitch(nextg *g, destroy bool) {
504	// Emit a GoWaiting status if necessary for the unblocked goroutine.
505	w := tl.eventWriter(traceGoRunning, traceProcRunning)
506	// Careful: don't use the event writer. We never want status or in-progress events
507	// to trigger more in-progress events.
508	w.w = emitUnblockStatus(w.w, nextg, tl.gen)
509	ev := traceEvGoSwitch
510	if destroy {
511		ev = traceEvGoSwitchDestroy
512	}
513	w.commit(ev, traceArg(nextg.goid), nextg.trace.nextSeq(tl.gen))
514}
515
516// emitUnblockStatus emits a GoStatus GoWaiting event for a goroutine about to be
517// unblocked to the trace writer.
518func emitUnblockStatus(w traceWriter, gp *g, gen uintptr) traceWriter {
519	if !gp.trace.statusWasTraced(gen) && gp.trace.acquireStatus(gen) {
520		// TODO(go.dev/issue/65634): Although it would be nice to add a stack trace here of gp,
521		// we cannot safely do so. gp is in _Gwaiting and so we don't have ownership of its stack.
522		// We can fix this by acquiring the goroutine's scan bit.
523		w = w.writeGoStatus(gp.goid, -1, traceGoWaiting, gp.inMarkAssist, 0)
524	}
525	return w
526}
527
528// GoSysCall emits a GoSyscallBegin event.
529//
530// Must be called with a valid P.
531func (tl traceLocker) GoSysCall() {
532	// Scribble down the M that the P is currently attached to.
533	pp := tl.mp.p.ptr()
534	pp.trace.mSyscallID = int64(tl.mp.procid)
535	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoSyscallBegin, pp.trace.nextSeq(tl.gen), tl.stack(1))
536}
537
538// GoSysExit emits a GoSyscallEnd event, possibly along with a GoSyscallBlocked event
539// if lostP is true.
540//
541// lostP must be true in all cases that a goroutine loses its P during a syscall.
542// This means it's not sufficient to check if it has no P. In particular, it needs to be
543// true in the following cases:
544// - The goroutine lost its P, it ran some other code, and then got it back. It's now running with that P.
545// - The goroutine lost its P and was unable to reacquire it, and is now running without a P.
546// - The goroutine lost its P and acquired a different one, and is now running with that P.
547func (tl traceLocker) GoSysExit(lostP bool) {
548	ev := traceEvGoSyscallEnd
549	procStatus := traceProcSyscall // Procs implicitly enter traceProcSyscall on GoSyscallBegin.
550	if lostP {
551		ev = traceEvGoSyscallEndBlocked
552		procStatus = traceProcRunning // If a G has a P when emitting this event, it reacquired a P and is indeed running.
553	} else {
554		tl.mp.p.ptr().trace.mSyscallID = -1
555	}
556	tl.eventWriter(traceGoSyscall, procStatus).commit(ev)
557}
558
559// ProcSteal indicates that our current M stole a P from another M.
560//
561// inSyscall indicates that we're stealing the P from a syscall context.
562//
563// The caller must have ownership of pp.
564func (tl traceLocker) ProcSteal(pp *p, inSyscall bool) {
565	// Grab the M ID we stole from.
566	mStolenFrom := pp.trace.mSyscallID
567	pp.trace.mSyscallID = -1
568
569	// The status of the proc and goroutine, if we need to emit one here, is not evident from the
570	// context of just emitting this event alone. There are two cases. Either we're trying to steal
571	// the P just to get its attention (e.g. STW or sysmon retake) or we're trying to steal a P for
572	// ourselves specifically to keep running. The two contexts look different, but can be summarized
573	// fairly succinctly. In the former, we're a regular running goroutine and proc, if we have either.
574	// In the latter, we're a goroutine in a syscall.
575	goStatus := traceGoRunning
576	procStatus := traceProcRunning
577	if inSyscall {
578		goStatus = traceGoSyscall
579		procStatus = traceProcSyscallAbandoned
580	}
581	w := tl.eventWriter(goStatus, procStatus)
582
583	// Emit the status of the P we're stealing. We may have *just* done this when creating the event
584	// writer but it's not guaranteed, even if inSyscall is true. Although it might seem like from a
585	// syscall context we're always stealing a P for ourselves, we may have not wired it up yet (so
586	// it wouldn't be visible to eventWriter) or we may not even intend to wire it up to ourselves
587	// at all (e.g. entersyscall_gcwait).
588	if !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) {
589		// Careful: don't use the event writer. We never want status or in-progress events
590		// to trigger more in-progress events.
591		w.w = w.w.writeProcStatus(uint64(pp.id), traceProcSyscallAbandoned, pp.trace.inSweep)
592	}
593	w.commit(traceEvProcSteal, traceArg(pp.id), pp.trace.nextSeq(tl.gen), traceArg(mStolenFrom))
594}
595
596// HeapAlloc emits a HeapAlloc event.
597func (tl traceLocker) HeapAlloc(live uint64) {
598	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapAlloc, traceArg(live))
599}
600
601// HeapGoal reads the current heap goal and emits a HeapGoal event.
602func (tl traceLocker) HeapGoal() {
603	heapGoal := gcController.heapGoal()
604	if heapGoal == ^uint64(0) {
605		// Heap-based triggering is disabled.
606		heapGoal = 0
607	}
608	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapGoal, traceArg(heapGoal))
609}
610
611// GoCreateSyscall indicates that a goroutine has transitioned from dead to GoSyscall.
612//
613// Unlike GoCreate, the caller must be running on gp.
614//
615// This occurs when C code calls into Go. On pthread platforms it occurs only when
616// a C thread calls into Go code for the first time.
617func (tl traceLocker) GoCreateSyscall(gp *g) {
618	// N.B. We should never trace a status for this goroutine (which we're currently running on),
619	// since we want this to appear like goroutine creation.
620	gp.trace.setStatusTraced(tl.gen)
621	tl.eventWriter(traceGoBad, traceProcBad).commit(traceEvGoCreateSyscall, traceArg(gp.goid))
622}
623
624// GoDestroySyscall indicates that a goroutine has transitioned from GoSyscall to dead.
625//
626// Must not have a P.
627//
628// This occurs when Go code returns back to C. On pthread platforms it occurs only when
629// the C thread is destroyed.
630func (tl traceLocker) GoDestroySyscall() {
631	// N.B. If we trace a status here, we must never have a P, and we must be on a goroutine
632	// that is in the syscall state.
633	tl.eventWriter(traceGoSyscall, traceProcBad).commit(traceEvGoDestroySyscall)
634}
635
636// To access runtime functions from runtime/trace.
637// See runtime/trace/annotation.go
638
639// trace_userTaskCreate emits a UserTaskCreate event.
640//
641//go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
642func trace_userTaskCreate(id, parentID uint64, taskType string) {
643	tl := traceAcquire()
644	if !tl.ok() {
645		// Need to do this check because the caller won't have it.
646		return
647	}
648	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskBegin, traceArg(id), traceArg(parentID), tl.string(taskType), tl.stack(3))
649	traceRelease(tl)
650}
651
652// trace_userTaskEnd emits a UserTaskEnd event.
653//
654//go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
655func trace_userTaskEnd(id uint64) {
656	tl := traceAcquire()
657	if !tl.ok() {
658		// Need to do this check because the caller won't have it.
659		return
660	}
661	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskEnd, traceArg(id), tl.stack(2))
662	traceRelease(tl)
663}
664
665// trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event,
666// depending on mode (0 == Begin, 1 == End).
667//
668// TODO(mknyszek): Just make this two functions.
669//
670//go:linkname trace_userRegion runtime/trace.userRegion
671func trace_userRegion(id, mode uint64, name string) {
672	tl := traceAcquire()
673	if !tl.ok() {
674		// Need to do this check because the caller won't have it.
675		return
676	}
677	var ev traceEv
678	switch mode {
679	case 0:
680		ev = traceEvUserRegionBegin
681	case 1:
682		ev = traceEvUserRegionEnd
683	default:
684		return
685	}
686	tl.eventWriter(traceGoRunning, traceProcRunning).commit(ev, traceArg(id), tl.string(name), tl.stack(3))
687	traceRelease(tl)
688}
689
690// trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event.
691//
692//go:linkname trace_userLog runtime/trace.userLog
693func trace_userLog(id uint64, category, message string) {
694	tl := traceAcquire()
695	if !tl.ok() {
696		// Need to do this check because the caller won't have it.
697		return
698	}
699	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserLog, traceArg(id), tl.string(category), tl.uniqueString(message), tl.stack(3))
700	traceRelease(tl)
701}
702
703// traceThreadDestroy is called when a thread is removed from
704// sched.freem.
705//
706// mp must not be able to emit trace events anymore.
707//
708// sched.lock must be held to synchronize with traceAdvance.
709func traceThreadDestroy(mp *m) {
710	assertLockHeld(&sched.lock)
711
712	// Flush all outstanding buffers to maintain the invariant
713	// that an M only has active buffers while on sched.freem
714	// or allm.
715	//
716	// Perform a traceAcquire/traceRelease on behalf of mp to
717	// synchronize with the tracer trying to flush our buffer
718	// as well.
719	seq := mp.trace.seqlock.Add(1)
720	if debugTraceReentrancy && seq%2 != 1 {
721		throw("bad use of trace.seqlock or tracer is reentrant")
722	}
723	systemstack(func() {
724		lock(&trace.lock)
725		for i := range mp.trace.buf {
726			if mp.trace.buf[i] != nil {
727				// N.B. traceBufFlush accepts a generation, but it
728				// really just cares about gen%2.
729				traceBufFlush(mp.trace.buf[i], uintptr(i))
730				mp.trace.buf[i] = nil
731			}
732		}
733		unlock(&trace.lock)
734	})
735	seq1 := mp.trace.seqlock.Add(1)
736	if seq1 != seq+1 {
737		print("runtime: seq1=", seq1, "\n")
738		throw("bad use of trace.seqlock")
739	}
740}
741