• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2022 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5//go:build ignore
6
7// mklockrank records the static rank graph of the locks in the
8// runtime and generates the rank checking structures in lockrank.go.
9package main
10
11import (
12	"bytes"
13	"flag"
14	"fmt"
15	"go/format"
16	"internal/dag"
17	"io"
18	"log"
19	"os"
20	"strings"
21)
22
23// ranks describes the lock rank graph. See "go doc internal/dag" for
24// the syntax.
25//
26// "a < b" means a must be acquired before b if both are held
27// (or, if b is held, a cannot be acquired).
28//
29// "NONE < a" means no locks may be held when a is acquired.
30//
31// If a lock is not given a rank, then it is assumed to be a leaf
32// lock, which means no other lock can be acquired while it is held.
33// Therefore, leaf locks do not need to be given an explicit rank.
34//
35// Ranks in all caps are pseudo-nodes that help define order, but do
36// not actually define a rank.
37//
38// TODO: It's often hard to correlate rank names to locks. Change
39// these to be more consistent with the locks they label.
40const ranks = `
41# Sysmon
42NONE
43< sysmon
44< scavenge, forcegc;
45
46# Defer
47NONE < defer;
48
49# GC
50NONE <
51  sweepWaiters,
52  assistQueue,
53  strongFromWeakQueue,
54  sweep;
55
56# Test only
57NONE < testR, testW;
58
59NONE < timerSend;
60
61# Scheduler, timers, netpoll
62NONE < allocmW, execW, cpuprof, pollCache, pollDesc, wakeableSleep;
63scavenge, sweep, testR, wakeableSleep, timerSend < hchan;
64assistQueue,
65  cpuprof,
66  forcegc,
67  hchan,
68  pollDesc, # pollDesc can interact with timers, which can lock sched.
69  scavenge,
70  strongFromWeakQueue,
71  sweep,
72  sweepWaiters,
73  testR,
74  wakeableSleep
75# Above SCHED are things that can call into the scheduler.
76< SCHED
77# Below SCHED is the scheduler implementation.
78< allocmR,
79  execR;
80allocmR, execR, hchan < sched;
81sched < allg, allp;
82
83# Channels
84NONE < notifyList;
85hchan, notifyList < sudog;
86
87hchan, pollDesc, wakeableSleep < timers;
88timers, timerSend < timer < netpollInit;
89
90# Semaphores
91NONE < root;
92
93# Itabs
94NONE
95< itab
96< reflectOffs;
97
98# User arena state
99NONE < userArenaState;
100
101# Tracing without a P uses a global trace buffer.
102scavenge
103# Above TRACEGLOBAL can emit a trace event without a P.
104< TRACEGLOBAL
105# Below TRACEGLOBAL manages the global tracing buffer.
106# Note that traceBuf eventually chains to MALLOC, but we never get that far
107# in the situation where there's no P.
108< traceBuf;
109# Starting/stopping tracing traces strings.
110traceBuf < traceStrings;
111
112# Malloc
113allg,
114  allocmR,
115  allp, # procresize
116  execR, # May grow stack
117  execW, # May allocate after BeforeFork
118  hchan,
119  notifyList,
120  reflectOffs,
121  timer,
122  traceStrings,
123  userArenaState
124# Above MALLOC are things that can allocate memory.
125< MALLOC
126# Below MALLOC is the malloc implementation.
127< fin,
128  spanSetSpine,
129  mspanSpecial,
130  traceTypeTab,
131  MPROF;
132
133# We can acquire gcBitsArenas for pinner bits, and
134# it's guarded by mspanSpecial.
135MALLOC, mspanSpecial < gcBitsArenas;
136
137# Memory profiling
138MPROF < profInsert, profBlock, profMemActive;
139profMemActive < profMemFuture;
140
141# Stack allocation and copying
142gcBitsArenas,
143  netpollInit,
144  profBlock,
145  profInsert,
146  profMemFuture,
147  spanSetSpine,
148  fin,
149  root
150# Anything that can grow the stack can acquire STACKGROW.
151# (Most higher layers imply STACKGROW, like MALLOC.)
152< STACKGROW
153# Below STACKGROW is the stack allocator/copying implementation.
154< gscan;
155gscan < stackpool;
156gscan < stackLarge;
157# Generally, hchan must be acquired before gscan. But in one case,
158# where we suspend a G and then shrink its stack, syncadjustsudogs
159# can acquire hchan locks while holding gscan. To allow this case,
160# we use hchanLeaf instead of hchan.
161gscan < hchanLeaf;
162
163# Write barrier
164defer,
165  gscan,
166  mspanSpecial,
167  pollCache,
168  sudog,
169  timer
170# Anything that can have write barriers can acquire WB.
171# Above WB, we can have write barriers.
172< WB
173# Below WB is the write barrier implementation.
174< wbufSpans;
175
176# Span allocator
177stackLarge,
178  stackpool,
179  wbufSpans
180# Above mheap is anything that can call the span allocator.
181< mheap;
182# Below mheap is the span allocator implementation.
183#
184# Specials: we're allowed to allocate a special while holding
185# an mspanSpecial lock, and they're part of the malloc implementation.
186# Pinner bits might be freed by the span allocator.
187mheap, mspanSpecial < mheapSpecial;
188mheap, mheapSpecial < globalAlloc;
189
190# Execution tracer events (with a P)
191hchan,
192  mheap,
193  root,
194  sched,
195  traceStrings,
196  notifyList,
197  fin
198# Above TRACE is anything that can create a trace event
199< TRACE
200< trace
201< traceStackTab;
202
203# panic is handled specially. It is implicitly below all other locks.
204NONE < panic;
205# deadlock is not acquired while holding panic, but it also needs to be
206# below all other locks.
207panic < deadlock;
208# raceFini is only held while exiting.
209panic < raceFini;
210
211# RWMutex internal read lock
212
213allocmR,
214  allocmW
215< allocmRInternal;
216
217execR,
218  execW
219< execRInternal;
220
221testR,
222  testW
223< testRInternal;
224`
225
226// cyclicRanks lists lock ranks that allow multiple locks of the same
227// rank to be acquired simultaneously. The runtime enforces ordering
228// within these ranks using a separate mechanism.
229var cyclicRanks = map[string]bool{
230	// Multiple timers are locked simultaneously in destroy().
231	"timers": true,
232	// Multiple hchans are acquired in hchan.sortkey() order in
233	// select.
234	"hchan": true,
235	// Multiple hchanLeafs are acquired in hchan.sortkey() order in
236	// syncadjustsudogs().
237	"hchanLeaf": true,
238	// The point of the deadlock lock is to deadlock.
239	"deadlock": true,
240}
241
242func main() {
243	flagO := flag.String("o", "", "write to `file` instead of stdout")
244	flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
245	flag.Parse()
246	if flag.NArg() != 0 {
247		fmt.Fprintf(os.Stderr, "too many arguments")
248		os.Exit(2)
249	}
250
251	g, err := dag.Parse(ranks)
252	if err != nil {
253		log.Fatal(err)
254	}
255
256	var out []byte
257	if *flagDot {
258		var b bytes.Buffer
259		g.TransitiveReduction()
260		// Add cyclic edges for visualization.
261		for k := range cyclicRanks {
262			g.AddEdge(k, k)
263		}
264		// Reverse the graph. It's much easier to read this as
265		// a "<" partial order than a ">" partial order. This
266		// ways, locks are acquired from the top going down
267		// and time moves forward over the edges instead of
268		// backward.
269		g.Transpose()
270		generateDot(&b, g)
271		out = b.Bytes()
272	} else {
273		var b bytes.Buffer
274		generateGo(&b, g)
275		out, err = format.Source(b.Bytes())
276		if err != nil {
277			log.Fatal(err)
278		}
279	}
280
281	if *flagO != "" {
282		err = os.WriteFile(*flagO, out, 0666)
283	} else {
284		_, err = os.Stdout.Write(out)
285	}
286	if err != nil {
287		log.Fatal(err)
288	}
289}
290
291func generateGo(w io.Writer, g *dag.Graph) {
292	fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
293
294package runtime
295
296type lockRank int
297
298`)
299
300	// Create numeric ranks.
301	topo := g.Topo()
302	for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
303		topo[i], topo[j] = topo[j], topo[i]
304	}
305	fmt.Fprintf(w, `
306// Constants representing the ranks of all non-leaf runtime locks, in rank order.
307// Locks with lower rank must be taken before locks with higher rank,
308// in addition to satisfying the partial order in lockPartialOrder.
309// A few ranks allow self-cycles, which are specified in lockPartialOrder.
310const (
311	lockRankUnknown lockRank = iota
312
313`)
314	for _, rank := range topo {
315		if isPseudo(rank) {
316			fmt.Fprintf(w, "\t// %s\n", rank)
317		} else {
318			fmt.Fprintf(w, "\t%s\n", cname(rank))
319		}
320	}
321	fmt.Fprintf(w, `)
322
323// lockRankLeafRank is the rank of lock that does not have a declared rank,
324// and hence is a leaf lock.
325const lockRankLeafRank lockRank = 1000
326`)
327
328	// Create string table.
329	fmt.Fprintf(w, `
330// lockNames gives the names associated with each of the above ranks.
331var lockNames = []string{
332`)
333	for _, rank := range topo {
334		if !isPseudo(rank) {
335			fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
336		}
337	}
338	fmt.Fprintf(w, `}
339
340func (rank lockRank) String() string {
341	if rank == 0 {
342		return "UNKNOWN"
343	}
344	if rank == lockRankLeafRank {
345		return "LEAF"
346	}
347	if rank < 0 || int(rank) >= len(lockNames) {
348		return "BAD RANK"
349	}
350	return lockNames[rank]
351}
352`)
353
354	// Create partial order structure.
355	fmt.Fprintf(w, `
356// lockPartialOrder is the transitive closure of the lock rank graph.
357// An entry for rank X lists all of the ranks that can already be held
358// when rank X is acquired.
359//
360// Lock ranks that allow self-cycles list themselves.
361var lockPartialOrder [][]lockRank = [][]lockRank{
362`)
363	for _, rank := range topo {
364		if isPseudo(rank) {
365			continue
366		}
367		list := []string{}
368		for _, before := range g.Edges(rank) {
369			if !isPseudo(before) {
370				list = append(list, cname(before))
371			}
372		}
373		if cyclicRanks[rank] {
374			list = append(list, cname(rank))
375		}
376
377		fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
378	}
379	fmt.Fprintf(w, "}\n")
380}
381
382// cname returns the Go const name for the given lock rank label.
383func cname(label string) string {
384	return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
385}
386
387func isPseudo(label string) bool {
388	return strings.ToUpper(label) == label
389}
390
391// generateDot emits a Graphviz dot representation of g to w.
392func generateDot(w io.Writer, g *dag.Graph) {
393	fmt.Fprintf(w, "digraph g {\n")
394
395	// Define all nodes.
396	for _, node := range g.Nodes {
397		fmt.Fprintf(w, "%q;\n", node)
398	}
399
400	// Create edges.
401	for _, node := range g.Nodes {
402		for _, to := range g.Edges(node) {
403			fmt.Fprintf(w, "%q -> %q;\n", node, to)
404		}
405	}
406
407	fmt.Fprintf(w, "}\n")
408}
409