• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"fmt"
9	"internal/testenv"
10	"internal/weak"
11	"math/bits"
12	"math/rand"
13	"os"
14	"reflect"
15	"runtime"
16	"runtime/debug"
17	"slices"
18	"strings"
19	"sync"
20	"sync/atomic"
21	"testing"
22	"time"
23	"unsafe"
24)
25
26func TestGcSys(t *testing.T) {
27	t.Skip("skipping known-flaky test; golang.org/issue/37331")
28	if os.Getenv("GOGC") == "off" {
29		t.Skip("skipping test; GOGC=off in environment")
30	}
31	got := runTestProg(t, "testprog", "GCSys")
32	want := "OK\n"
33	if got != want {
34		t.Fatalf("expected %q, but got %q", want, got)
35	}
36}
37
38func TestGcDeepNesting(t *testing.T) {
39	type T [2][2][2][2][2][2][2][2][2][2]*int
40	a := new(T)
41
42	// Prevent the compiler from applying escape analysis.
43	// This makes sure new(T) is allocated on heap, not on the stack.
44	t.Logf("%p", a)
45
46	a[0][0][0][0][0][0][0][0][0][0] = new(int)
47	*a[0][0][0][0][0][0][0][0][0][0] = 13
48	runtime.GC()
49	if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
50		t.Fail()
51	}
52}
53
54func TestGcMapIndirection(t *testing.T) {
55	defer debug.SetGCPercent(debug.SetGCPercent(1))
56	runtime.GC()
57	type T struct {
58		a [256]int
59	}
60	m := make(map[T]T)
61	for i := 0; i < 2000; i++ {
62		var a T
63		a.a[0] = i
64		m[a] = T{}
65	}
66}
67
68func TestGcArraySlice(t *testing.T) {
69	type X struct {
70		buf     [1]byte
71		nextbuf []byte
72		next    *X
73	}
74	var head *X
75	for i := 0; i < 10; i++ {
76		p := &X{}
77		p.buf[0] = 42
78		p.next = head
79		if head != nil {
80			p.nextbuf = head.buf[:]
81		}
82		head = p
83		runtime.GC()
84	}
85	for p := head; p != nil; p = p.next {
86		if p.buf[0] != 42 {
87			t.Fatal("corrupted heap")
88		}
89	}
90}
91
92func TestGcRescan(t *testing.T) {
93	type X struct {
94		c     chan error
95		nextx *X
96	}
97	type Y struct {
98		X
99		nexty *Y
100		p     *int
101	}
102	var head *Y
103	for i := 0; i < 10; i++ {
104		p := &Y{}
105		p.c = make(chan error)
106		if head != nil {
107			p.nextx = &head.X
108		}
109		p.nexty = head
110		p.p = new(int)
111		*p.p = 42
112		head = p
113		runtime.GC()
114	}
115	for p := head; p != nil; p = p.nexty {
116		if *p.p != 42 {
117			t.Fatal("corrupted heap")
118		}
119	}
120}
121
122func TestGcLastTime(t *testing.T) {
123	ms := new(runtime.MemStats)
124	t0 := time.Now().UnixNano()
125	runtime.GC()
126	t1 := time.Now().UnixNano()
127	runtime.ReadMemStats(ms)
128	last := int64(ms.LastGC)
129	if t0 > last || last > t1 {
130		t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
131	}
132	pause := ms.PauseNs[(ms.NumGC+255)%256]
133	// Due to timer granularity, pause can actually be 0 on windows
134	// or on virtualized environments.
135	if pause == 0 {
136		t.Logf("last GC pause was 0")
137	} else if pause > 10e9 {
138		t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
139	}
140}
141
142var hugeSink any
143
144func TestHugeGCInfo(t *testing.T) {
145	// The test ensures that compiler can chew these huge types even on weakest machines.
146	// The types are not allocated at runtime.
147	if hugeSink != nil {
148		// 400MB on 32 bots, 4TB on 64-bits.
149		const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
150		hugeSink = new([n]*byte)
151		hugeSink = new([n]uintptr)
152		hugeSink = new(struct {
153			x float64
154			y [n]*byte
155			z []string
156		})
157		hugeSink = new(struct {
158			x float64
159			y [n]uintptr
160			z []string
161		})
162	}
163}
164
165func TestPeriodicGC(t *testing.T) {
166	if runtime.GOARCH == "wasm" {
167		t.Skip("no sysmon on wasm yet")
168	}
169
170	// Make sure we're not in the middle of a GC.
171	runtime.GC()
172
173	var ms1, ms2 runtime.MemStats
174	runtime.ReadMemStats(&ms1)
175
176	// Make periodic GC run continuously.
177	orig := *runtime.ForceGCPeriod
178	*runtime.ForceGCPeriod = 0
179
180	// Let some periodic GCs happen. In a heavily loaded system,
181	// it's possible these will be delayed, so this is designed to
182	// succeed quickly if things are working, but to give it some
183	// slack if things are slow.
184	var numGCs uint32
185	const want = 2
186	for i := 0; i < 200 && numGCs < want; i++ {
187		time.Sleep(5 * time.Millisecond)
188
189		// Test that periodic GC actually happened.
190		runtime.ReadMemStats(&ms2)
191		numGCs = ms2.NumGC - ms1.NumGC
192	}
193	*runtime.ForceGCPeriod = orig
194
195	if numGCs < want {
196		t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
197	}
198}
199
200func TestGcZombieReporting(t *testing.T) {
201	// This test is somewhat sensitive to how the allocator works.
202	// Pointers in zombies slice may cross-span, thus we
203	// add invalidptr=0 for avoiding the badPointer check.
204	// See issue https://golang.org/issues/49613/
205	got := runTestProg(t, "testprog", "GCZombie", "GODEBUG=invalidptr=0")
206	want := "found pointer to free object"
207	if !strings.Contains(got, want) {
208		t.Fatalf("expected %q in output, but got %q", want, got)
209	}
210}
211
212func TestGCTestMoveStackOnNextCall(t *testing.T) {
213	t.Parallel()
214	var onStack int
215	// GCTestMoveStackOnNextCall can fail in rare cases if there's
216	// a preemption. This won't happen many times in quick
217	// succession, so just retry a few times.
218	for retry := 0; retry < 5; retry++ {
219		runtime.GCTestMoveStackOnNextCall()
220		if moveStackCheck(t, &onStack, uintptr(unsafe.Pointer(&onStack))) {
221			// Passed.
222			return
223		}
224	}
225	t.Fatal("stack did not move")
226}
227
228// This must not be inlined because the point is to force a stack
229// growth check and move the stack.
230//
231//go:noinline
232func moveStackCheck(t *testing.T, new *int, old uintptr) bool {
233	// new should have been updated by the stack move;
234	// old should not have.
235
236	// Capture new's value before doing anything that could
237	// further move the stack.
238	new2 := uintptr(unsafe.Pointer(new))
239
240	t.Logf("old stack pointer %x, new stack pointer %x", old, new2)
241	if new2 == old {
242		// Check that we didn't screw up the test's escape analysis.
243		if cls := runtime.GCTestPointerClass(unsafe.Pointer(new)); cls != "stack" {
244			t.Fatalf("test bug: new (%#x) should be a stack pointer, not %s", new2, cls)
245		}
246		// This was a real failure.
247		return false
248	}
249	return true
250}
251
252func TestGCTestMoveStackRepeatedly(t *testing.T) {
253	// Move the stack repeatedly to make sure we're not doubling
254	// it each time.
255	for i := 0; i < 100; i++ {
256		runtime.GCTestMoveStackOnNextCall()
257		moveStack1(false)
258	}
259}
260
261//go:noinline
262func moveStack1(x bool) {
263	// Make sure this function doesn't get auto-nosplit.
264	if x {
265		println("x")
266	}
267}
268
269func TestGCTestIsReachable(t *testing.T) {
270	var all, half []unsafe.Pointer
271	var want uint64
272	for i := 0; i < 16; i++ {
273		// The tiny allocator muddies things, so we use a
274		// scannable type.
275		p := unsafe.Pointer(new(*int))
276		all = append(all, p)
277		if i%2 == 0 {
278			half = append(half, p)
279			want |= 1 << i
280		}
281	}
282
283	got := runtime.GCTestIsReachable(all...)
284	if got&want != want {
285		// This is a serious bug - an object is live (due to the KeepAlive
286		// call below), but isn't reported as such.
287		t.Fatalf("live object not in reachable set; want %b, got %b", want, got)
288	}
289	if bits.OnesCount64(got&^want) > 1 {
290		// Note: we can occasionally have a value that is retained even though
291		// it isn't live, due to conservative scanning of stack frames.
292		// See issue 67204. For now, we allow a "slop" of 1 unintentionally
293		// retained object.
294		t.Fatalf("dead object in reachable set; want %b, got %b", want, got)
295	}
296	runtime.KeepAlive(half)
297}
298
299var pointerClassBSS *int
300var pointerClassData = 42
301
302func TestGCTestPointerClass(t *testing.T) {
303	t.Parallel()
304	check := func(p unsafe.Pointer, want string) {
305		t.Helper()
306		got := runtime.GCTestPointerClass(p)
307		if got != want {
308			// Convert the pointer to a uintptr to avoid
309			// escaping it.
310			t.Errorf("for %#x, want class %s, got %s", uintptr(p), want, got)
311		}
312	}
313	var onStack int
314	var notOnStack int
315	check(unsafe.Pointer(&onStack), "stack")
316	check(unsafe.Pointer(runtime.Escape(&notOnStack)), "heap")
317	check(unsafe.Pointer(&pointerClassBSS), "bss")
318	check(unsafe.Pointer(&pointerClassData), "data")
319	check(nil, "other")
320}
321
322func BenchmarkAllocation(b *testing.B) {
323	type T struct {
324		x, y *byte
325	}
326	ngo := runtime.GOMAXPROCS(0)
327	work := make(chan bool, b.N+ngo)
328	result := make(chan *T)
329	for i := 0; i < b.N; i++ {
330		work <- true
331	}
332	for i := 0; i < ngo; i++ {
333		work <- false
334	}
335	for i := 0; i < ngo; i++ {
336		go func() {
337			var x *T
338			for <-work {
339				for i := 0; i < 1000; i++ {
340					x = &T{}
341				}
342			}
343			result <- x
344		}()
345	}
346	for i := 0; i < ngo; i++ {
347		<-result
348	}
349}
350
351func TestPrintGC(t *testing.T) {
352	if testing.Short() {
353		t.Skip("Skipping in short mode")
354	}
355	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
356	done := make(chan bool)
357	go func() {
358		for {
359			select {
360			case <-done:
361				return
362			default:
363				runtime.GC()
364			}
365		}
366	}()
367	for i := 0; i < 1e4; i++ {
368		func() {
369			defer print("")
370		}()
371	}
372	close(done)
373}
374
375func testTypeSwitch(x any) error {
376	switch y := x.(type) {
377	case nil:
378		// ok
379	case error:
380		return y
381	}
382	return nil
383}
384
385func testAssert(x any) error {
386	if y, ok := x.(error); ok {
387		return y
388	}
389	return nil
390}
391
392func testAssertVar(x any) error {
393	var y, ok = x.(error)
394	if ok {
395		return y
396	}
397	return nil
398}
399
400var a bool
401
402//go:noinline
403func testIfaceEqual(x any) {
404	if x == "abc" {
405		a = true
406	}
407}
408
409func TestPageAccounting(t *testing.T) {
410	// Grow the heap in small increments. This used to drop the
411	// pages-in-use count below zero because of a rounding
412	// mismatch (golang.org/issue/15022).
413	const blockSize = 64 << 10
414	blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
415	for i := range blocks {
416		blocks[i] = new([blockSize]byte)
417	}
418
419	// Check that the running page count matches reality.
420	pagesInUse, counted := runtime.CountPagesInUse()
421	if pagesInUse != counted {
422		t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
423	}
424}
425
426func init() {
427	// Enable ReadMemStats' double-check mode.
428	*runtime.DoubleCheckReadMemStats = true
429}
430
431func TestReadMemStats(t *testing.T) {
432	base, slow := runtime.ReadMemStatsSlow()
433	if base != slow {
434		logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
435		t.Fatal("memstats mismatch")
436	}
437}
438
439func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
440	typ := got.Type()
441	switch typ.Kind() {
442	case reflect.Array, reflect.Slice:
443		if got.Len() != want.Len() {
444			t.Logf("len(%s): got %v, want %v", prefix, got, want)
445			return
446		}
447		for i := 0; i < got.Len(); i++ {
448			logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
449		}
450	case reflect.Struct:
451		for i := 0; i < typ.NumField(); i++ {
452			gf, wf := got.Field(i), want.Field(i)
453			logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
454		}
455	case reflect.Map:
456		t.Fatal("not implemented: logDiff for map")
457	default:
458		if got.Interface() != want.Interface() {
459			t.Logf("%s: got %v, want %v", prefix, got, want)
460		}
461	}
462}
463
464func BenchmarkReadMemStats(b *testing.B) {
465	var ms runtime.MemStats
466	const heapSize = 100 << 20
467	x := make([]*[1024]byte, heapSize/1024)
468	for i := range x {
469		x[i] = new([1024]byte)
470	}
471
472	b.ResetTimer()
473	for i := 0; i < b.N; i++ {
474		runtime.ReadMemStats(&ms)
475	}
476
477	runtime.KeepAlive(x)
478}
479
480func applyGCLoad(b *testing.B) func() {
481	// We’ll apply load to the runtime with maxProcs-1 goroutines
482	// and use one more to actually benchmark. It doesn't make sense
483	// to try to run this test with only 1 P (that's what
484	// BenchmarkReadMemStats is for).
485	maxProcs := runtime.GOMAXPROCS(-1)
486	if maxProcs == 1 {
487		b.Skip("This benchmark can only be run with GOMAXPROCS > 1")
488	}
489
490	// Code to build a big tree with lots of pointers.
491	type node struct {
492		children [16]*node
493	}
494	var buildTree func(depth int) *node
495	buildTree = func(depth int) *node {
496		tree := new(node)
497		if depth != 0 {
498			for i := range tree.children {
499				tree.children[i] = buildTree(depth - 1)
500			}
501		}
502		return tree
503	}
504
505	// Keep the GC busy by continuously generating large trees.
506	done := make(chan struct{})
507	var wg sync.WaitGroup
508	for i := 0; i < maxProcs-1; i++ {
509		wg.Add(1)
510		go func() {
511			defer wg.Done()
512			var hold *node
513		loop:
514			for {
515				hold = buildTree(5)
516				select {
517				case <-done:
518					break loop
519				default:
520				}
521			}
522			runtime.KeepAlive(hold)
523		}()
524	}
525	return func() {
526		close(done)
527		wg.Wait()
528	}
529}
530
531func BenchmarkReadMemStatsLatency(b *testing.B) {
532	stop := applyGCLoad(b)
533
534	// Spend this much time measuring latencies.
535	latencies := make([]time.Duration, 0, 1024)
536
537	// Run for timeToBench hitting ReadMemStats continuously
538	// and measuring the latency.
539	b.ResetTimer()
540	var ms runtime.MemStats
541	for i := 0; i < b.N; i++ {
542		// Sleep for a bit, otherwise we're just going to keep
543		// stopping the world and no one will get to do anything.
544		time.Sleep(100 * time.Millisecond)
545		start := time.Now()
546		runtime.ReadMemStats(&ms)
547		latencies = append(latencies, time.Since(start))
548	}
549	// Make sure to stop the timer before we wait! The load created above
550	// is very heavy-weight and not easy to stop, so we could end up
551	// confusing the benchmarking framework for small b.N.
552	b.StopTimer()
553	stop()
554
555	// Disable the default */op metrics.
556	// ns/op doesn't mean anything because it's an average, but we
557	// have a sleep in our b.N loop above which skews this significantly.
558	b.ReportMetric(0, "ns/op")
559	b.ReportMetric(0, "B/op")
560	b.ReportMetric(0, "allocs/op")
561
562	// Sort latencies then report percentiles.
563	slices.Sort(latencies)
564	b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
565	b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
566	b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
567}
568
569func TestUserForcedGC(t *testing.T) {
570	// Test that runtime.GC() triggers a GC even if GOGC=off.
571	defer debug.SetGCPercent(debug.SetGCPercent(-1))
572
573	var ms1, ms2 runtime.MemStats
574	runtime.ReadMemStats(&ms1)
575	runtime.GC()
576	runtime.ReadMemStats(&ms2)
577	if ms1.NumGC == ms2.NumGC {
578		t.Fatalf("runtime.GC() did not trigger GC")
579	}
580	if ms1.NumForcedGC == ms2.NumForcedGC {
581		t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
582	}
583}
584
585func writeBarrierBenchmark(b *testing.B, f func()) {
586	runtime.GC()
587	var ms runtime.MemStats
588	runtime.ReadMemStats(&ms)
589	//b.Logf("heap size: %d MB", ms.HeapAlloc>>20)
590
591	// Keep GC running continuously during the benchmark, which in
592	// turn keeps the write barrier on continuously.
593	var stop uint32
594	done := make(chan bool)
595	go func() {
596		for atomic.LoadUint32(&stop) == 0 {
597			runtime.GC()
598		}
599		close(done)
600	}()
601	defer func() {
602		atomic.StoreUint32(&stop, 1)
603		<-done
604	}()
605
606	b.ResetTimer()
607	f()
608	b.StopTimer()
609}
610
611func BenchmarkWriteBarrier(b *testing.B) {
612	if runtime.GOMAXPROCS(-1) < 2 {
613		// We don't want GC to take our time.
614		b.Skip("need GOMAXPROCS >= 2")
615	}
616
617	// Construct a large tree both so the GC runs for a while and
618	// so we have a data structure to manipulate the pointers of.
619	type node struct {
620		l, r *node
621	}
622	var wbRoots []*node
623	var mkTree func(level int) *node
624	mkTree = func(level int) *node {
625		if level == 0 {
626			return nil
627		}
628		n := &node{mkTree(level - 1), mkTree(level - 1)}
629		if level == 10 {
630			// Seed GC with enough early pointers so it
631			// doesn't start termination barriers when it
632			// only has the top of the tree.
633			wbRoots = append(wbRoots, n)
634		}
635		return n
636	}
637	const depth = 22 // 64 MB
638	root := mkTree(22)
639
640	writeBarrierBenchmark(b, func() {
641		var stack [depth]*node
642		tos := -1
643
644		// There are two write barriers per iteration, so i+=2.
645		for i := 0; i < b.N; i += 2 {
646			if tos == -1 {
647				stack[0] = root
648				tos = 0
649			}
650
651			// Perform one step of reversing the tree.
652			n := stack[tos]
653			if n.l == nil {
654				tos--
655			} else {
656				n.l, n.r = n.r, n.l
657				stack[tos] = n.l
658				stack[tos+1] = n.r
659				tos++
660			}
661
662			if i%(1<<12) == 0 {
663				// Avoid non-preemptible loops (see issue #10958).
664				runtime.Gosched()
665			}
666		}
667	})
668
669	runtime.KeepAlive(wbRoots)
670}
671
672func BenchmarkBulkWriteBarrier(b *testing.B) {
673	if runtime.GOMAXPROCS(-1) < 2 {
674		// We don't want GC to take our time.
675		b.Skip("need GOMAXPROCS >= 2")
676	}
677
678	// Construct a large set of objects we can copy around.
679	const heapSize = 64 << 20
680	type obj [16]*byte
681	ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
682	for i := range ptrs {
683		ptrs[i] = new(obj)
684	}
685
686	writeBarrierBenchmark(b, func() {
687		const blockSize = 1024
688		var pos int
689		for i := 0; i < b.N; i += blockSize {
690			// Rotate block.
691			block := ptrs[pos : pos+blockSize]
692			first := block[0]
693			copy(block, block[1:])
694			block[blockSize-1] = first
695
696			pos += blockSize
697			if pos+blockSize > len(ptrs) {
698				pos = 0
699			}
700
701			runtime.Gosched()
702		}
703	})
704
705	runtime.KeepAlive(ptrs)
706}
707
708func BenchmarkScanStackNoLocals(b *testing.B) {
709	var ready sync.WaitGroup
710	teardown := make(chan bool)
711	for j := 0; j < 10; j++ {
712		ready.Add(1)
713		go func() {
714			x := 100000
715			countpwg(&x, &ready, teardown)
716		}()
717	}
718	ready.Wait()
719	b.ResetTimer()
720	for i := 0; i < b.N; i++ {
721		b.StartTimer()
722		runtime.GC()
723		runtime.GC()
724		b.StopTimer()
725	}
726	close(teardown)
727}
728
729func BenchmarkMSpanCountAlloc(b *testing.B) {
730	// Allocate one dummy mspan for the whole benchmark.
731	s := runtime.AllocMSpan()
732	defer runtime.FreeMSpan(s)
733
734	// n is the number of bytes to benchmark against.
735	// n must always be a multiple of 8, since gcBits is
736	// always rounded up 8 bytes.
737	for _, n := range []int{8, 16, 32, 64, 128} {
738		b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) {
739			// Initialize a new byte slice with pseduo-random data.
740			bits := make([]byte, n)
741			rand.Read(bits)
742
743			b.ResetTimer()
744			for i := 0; i < b.N; i++ {
745				runtime.MSpanCountAlloc(s, bits)
746			}
747		})
748	}
749}
750
751func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
752	if *n == 0 {
753		ready.Done()
754		<-teardown
755		return
756	}
757	*n--
758	countpwg(n, ready, teardown)
759}
760
761func TestMemoryLimit(t *testing.T) {
762	if testing.Short() {
763		t.Skip("stress test that takes time to run")
764	}
765	if runtime.NumCPU() < 4 {
766		t.Skip("want at least 4 CPUs for this test")
767	}
768	got := runTestProg(t, "testprog", "GCMemoryLimit")
769	want := "OK\n"
770	if got != want {
771		t.Fatalf("expected %q, but got %q", want, got)
772	}
773}
774
775func TestMemoryLimitNoGCPercent(t *testing.T) {
776	if testing.Short() {
777		t.Skip("stress test that takes time to run")
778	}
779	if runtime.NumCPU() < 4 {
780		t.Skip("want at least 4 CPUs for this test")
781	}
782	got := runTestProg(t, "testprog", "GCMemoryLimitNoGCPercent")
783	want := "OK\n"
784	if got != want {
785		t.Fatalf("expected %q, but got %q", want, got)
786	}
787}
788
789func TestMyGenericFunc(t *testing.T) {
790	runtime.MyGenericFunc[int]()
791}
792
793func TestWeakToStrongMarkTermination(t *testing.T) {
794	testenv.MustHaveParallelism(t)
795
796	type T struct {
797		a *int
798		b int
799	}
800	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
801	defer debug.SetGCPercent(debug.SetGCPercent(-1))
802	w := make([]weak.Pointer[T], 2048)
803
804	// Make sure there's no out-standing GC from a previous test.
805	runtime.GC()
806
807	// Create many objects with a weak pointers to them.
808	for i := range w {
809		x := new(T)
810		x.a = new(int)
811		w[i] = weak.Make(x)
812	}
813
814	// Reset the restart flag.
815	runtime.GCMarkDoneResetRestartFlag()
816
817	// Prevent mark termination from completing.
818	runtime.SetSpinInGCMarkDone(true)
819
820	// Start a GC, and wait a little bit to get something spinning in mark termination.
821	// Simultaneously, fire off another goroutine to disable spinning. If everything's
822	// working correctly, then weak.Strong will block, so we need to make sure something
823	// prevents the GC from continuing to spin.
824	done := make(chan struct{})
825	go func() {
826		runtime.GC()
827		done <- struct{}{}
828	}()
829	go func() {
830		time.Sleep(100 * time.Millisecond)
831
832		// Let mark termination continue.
833		runtime.SetSpinInGCMarkDone(false)
834	}()
835	time.Sleep(10 * time.Millisecond)
836
837	// Perform many weak->strong conversions in the critical window.
838	var wg sync.WaitGroup
839	for _, wp := range w {
840		wg.Add(1)
841		go func() {
842			defer wg.Done()
843			wp.Strong()
844		}()
845	}
846
847	// Make sure the GC completes.
848	<-done
849
850	// Make sure all the weak->strong conversions finish.
851	wg.Wait()
852
853	// The bug is triggered if there's still mark work after gcMarkDone stops the world.
854	//
855	// This can manifest in one of two ways today:
856	// - An exceedingly rare crash in mark termination.
857	// - gcMarkDone restarts, as if issue #27993 is at play.
858	//
859	// Check for the latter. This is a fairly controlled environment, so #27993 is very
860	// unlikely to happen (it's already rare to begin with) but we'll always _appear_ to
861	// trigger the same bug if weak->strong conversions aren't properly coordinated with
862	// mark termination.
863	if runtime.GCMarkDoneRestarted() {
864		t.Errorf("gcMarkDone restarted")
865	}
866}
867