Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goexperiment"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/exithook"
15 "internal/runtime/sys"
16 "internal/strconv"
17 "internal/stringslite"
18 "unsafe"
19 )
20
21
22 var modinfo string
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 var (
119 m0 m
120 g0 g
121 mcache0 *mcache
122 raceprocctx0 uintptr
123 raceFiniLock mutex
124 )
125
126
127
128 var runtime_inittasks []*initTask
129
130
131
132 var mainInitDone atomic.Bool
133
134
135
136
137 var mainInitDoneChan chan bool
138
139
140 func main_main()
141
142
143 var mainStarted bool
144
145
146 var runtimeInitTime int64
147
148
149 var initSigmask sigset
150
151
152 func main() {
153 mp := getg().m
154
155
156
157 mp.g0.racectx = 0
158
159
160
161
162 if goarch.PtrSize == 8 {
163 maxstacksize = 1000000000
164 } else {
165 maxstacksize = 250000000
166 }
167
168
169
170
171 maxstackceiling = 2 * maxstacksize
172
173
174 mainStarted = true
175
176 if haveSysmon {
177 systemstack(func() {
178 newm(sysmon, nil, -1)
179 })
180 }
181
182
183
184
185
186
187
188 lockOSThread()
189
190 if mp != &m0 {
191 throw("runtime.main not on m0")
192 }
193
194
195
196 runtimeInitTime = nanotime()
197 if runtimeInitTime == 0 {
198 throw("nanotime returning zero")
199 }
200
201 if debug.inittrace != 0 {
202 inittrace.id = getg().goid
203 inittrace.active = true
204 }
205
206 doInit(runtime_inittasks)
207
208
209 needUnlock := true
210 defer func() {
211 if needUnlock {
212 unlockOSThread()
213 }
214 }()
215
216 gcenable()
217 defaultGOMAXPROCSUpdateEnable()
218
219 mainInitDoneChan = make(chan bool)
220 if iscgo {
221 if _cgo_pthread_key_created == nil {
222 throw("_cgo_pthread_key_created missing")
223 }
224
225 if GOOS != "windows" {
226 if _cgo_thread_start == nil {
227 throw("_cgo_thread_start missing")
228 }
229 if _cgo_setenv == nil {
230 throw("_cgo_setenv missing")
231 }
232 if _cgo_unsetenv == nil {
233 throw("_cgo_unsetenv missing")
234 }
235 }
236 if _cgo_notify_runtime_init_done == nil {
237 throw("_cgo_notify_runtime_init_done missing")
238 }
239
240
241 if set_crosscall2 == nil {
242 throw("set_crosscall2 missing")
243 }
244 set_crosscall2()
245
246
247
248 startTemplateThread()
249 cgocall(_cgo_notify_runtime_init_done, nil)
250 }
251
252
253
254
255
256
257
258
259 last := lastmoduledatap
260 for m := &firstmoduledata; true; m = m.next {
261 doInit(m.inittasks)
262 if m == last {
263 break
264 }
265 }
266
267
268
269 inittrace.active = false
270
271 mainInitDone.Store(true)
272 close(mainInitDoneChan)
273
274 needUnlock = false
275 unlockOSThread()
276
277 if isarchive || islibrary {
278
279
280 if GOARCH == "wasm" {
281
282
283
284
285
286
287
288 pause(sys.GetCallerSP() - 16)
289 panic("unreachable")
290 }
291 return
292 }
293 fn := main_main
294 fn()
295
296
297
298
299
300
301
302
303 exitHooksRun := false
304 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
305 runExitHooks(0)
306 exitHooksRun = true
307 lsandoleakcheck()
308 }
309
310
311
312
313
314 if runningPanicDefers.Load() != 0 {
315
316 for c := 0; c < 1000; c++ {
317 if runningPanicDefers.Load() == 0 {
318 break
319 }
320 Gosched()
321 }
322 }
323 if panicking.Load() != 0 {
324 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
325 }
326 if !exitHooksRun {
327 runExitHooks(0)
328 }
329 if raceenabled {
330 racefini()
331 }
332
333 exit(0)
334 for {
335 var x *int32
336 *x = 0
337 }
338 }
339
340
341
342
343 func os_beforeExit(exitCode int) {
344 runExitHooks(exitCode)
345 if exitCode == 0 && raceenabled {
346 racefini()
347 }
348
349
350 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
351 lsandoleakcheck()
352 }
353 }
354
355 func init() {
356 exithook.Gosched = Gosched
357 exithook.Goid = func() uint64 { return getg().goid }
358 exithook.Throw = throw
359 }
360
361 func runExitHooks(code int) {
362 exithook.Run(code)
363 }
364
365
366 func init() {
367 go forcegchelper()
368 }
369
370 func forcegchelper() {
371 forcegc.g = getg()
372 lockInit(&forcegc.lock, lockRankForcegc)
373 for {
374 lock(&forcegc.lock)
375 if forcegc.idle.Load() {
376 throw("forcegc: phase error")
377 }
378 forcegc.idle.Store(true)
379 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
380
381 if debug.gctrace > 0 {
382 println("GC forced")
383 }
384
385 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
386 }
387 }
388
389
390
391
392
393 func Gosched() {
394 checkTimeouts()
395 mcall(gosched_m)
396 }
397
398
399
400
401
402 func goschedguarded() {
403 mcall(goschedguarded_m)
404 }
405
406
407
408
409
410
411 func goschedIfBusy() {
412 gp := getg()
413
414
415 if !gp.preempt && sched.npidle.Load() > 0 {
416 return
417 }
418 mcall(gosched_m)
419 }
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
450 if reason != waitReasonSleep {
451 checkTimeouts()
452 }
453 mp := acquirem()
454 gp := mp.curg
455 status := readgstatus(gp)
456 if status != _Grunning && status != _Gscanrunning {
457 throw("gopark: bad g status")
458 }
459 mp.waitlock = lock
460 mp.waitunlockf = unlockf
461 gp.waitreason = reason
462 mp.waitTraceBlockReason = traceReason
463 mp.waitTraceSkip = traceskip
464 releasem(mp)
465
466 mcall(park_m)
467 }
468
469
470
471 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
472 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
473 }
474
475
476
477
478
479
480
481
482
483
484
485 func goready(gp *g, traceskip int) {
486 systemstack(func() {
487 ready(gp, traceskip, true)
488 })
489 }
490
491
492 func acquireSudog() *sudog {
493
494
495
496
497
498
499
500
501 mp := acquirem()
502 pp := mp.p.ptr()
503 if len(pp.sudogcache) == 0 {
504 lock(&sched.sudoglock)
505
506 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
507 s := sched.sudogcache
508 sched.sudogcache = s.next
509 s.next = nil
510 pp.sudogcache = append(pp.sudogcache, s)
511 }
512 unlock(&sched.sudoglock)
513
514 if len(pp.sudogcache) == 0 {
515 pp.sudogcache = append(pp.sudogcache, new(sudog))
516 }
517 }
518 n := len(pp.sudogcache)
519 s := pp.sudogcache[n-1]
520 pp.sudogcache[n-1] = nil
521 pp.sudogcache = pp.sudogcache[:n-1]
522 if s.elem.get() != nil {
523 throw("acquireSudog: found s.elem != nil in cache")
524 }
525 releasem(mp)
526 return s
527 }
528
529
530 func releaseSudog(s *sudog) {
531 if s.elem.get() != nil {
532 throw("runtime: sudog with non-nil elem")
533 }
534 if s.isSelect {
535 throw("runtime: sudog with non-false isSelect")
536 }
537 if s.next != nil {
538 throw("runtime: sudog with non-nil next")
539 }
540 if s.prev != nil {
541 throw("runtime: sudog with non-nil prev")
542 }
543 if s.waitlink != nil {
544 throw("runtime: sudog with non-nil waitlink")
545 }
546 if s.c.get() != nil {
547 throw("runtime: sudog with non-nil c")
548 }
549 gp := getg()
550 if gp.param != nil {
551 throw("runtime: releaseSudog with non-nil gp.param")
552 }
553 mp := acquirem()
554 pp := mp.p.ptr()
555 if len(pp.sudogcache) == cap(pp.sudogcache) {
556
557 var first, last *sudog
558 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
559 n := len(pp.sudogcache)
560 p := pp.sudogcache[n-1]
561 pp.sudogcache[n-1] = nil
562 pp.sudogcache = pp.sudogcache[:n-1]
563 if first == nil {
564 first = p
565 } else {
566 last.next = p
567 }
568 last = p
569 }
570 lock(&sched.sudoglock)
571 last.next = sched.sudogcache
572 sched.sudogcache = first
573 unlock(&sched.sudoglock)
574 }
575 pp.sudogcache = append(pp.sudogcache, s)
576 releasem(mp)
577 }
578
579
580 func badmcall(fn func(*g)) {
581 throw("runtime: mcall called on m->g0 stack")
582 }
583
584 func badmcall2(fn func(*g)) {
585 throw("runtime: mcall function returned")
586 }
587
588 func badreflectcall() {
589 panic(plainError("arg size to reflect.call more than 1GB"))
590 }
591
592
593
594 func badmorestackg0() {
595 if !crashStackImplemented {
596 writeErrStr("fatal: morestack on g0\n")
597 return
598 }
599
600 g := getg()
601 switchToCrashStack(func() {
602 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
603 g.m.traceback = 2
604 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
605 print("\n")
606
607 throw("morestack on g0")
608 })
609 }
610
611
612
613 func badmorestackgsignal() {
614 writeErrStr("fatal: morestack on gsignal\n")
615 }
616
617
618 func badctxt() {
619 throw("ctxt != 0")
620 }
621
622
623
624 var gcrash g
625
626 var crashingG atomic.Pointer[g]
627
628
629
630
631
632
633
634
635
636 func switchToCrashStack(fn func()) {
637 me := getg()
638 if crashingG.CompareAndSwapNoWB(nil, me) {
639 switchToCrashStack0(fn)
640 abort()
641 }
642 if crashingG.Load() == me {
643
644 writeErrStr("fatal: recursive switchToCrashStack\n")
645 abort()
646 }
647
648 usleep_no_g(100)
649 writeErrStr("fatal: concurrent switchToCrashStack\n")
650 abort()
651 }
652
653
654
655
656 const crashStackImplemented = GOOS != "windows"
657
658
659 func switchToCrashStack0(fn func())
660
661 func lockedOSThread() bool {
662 gp := getg()
663 return gp.lockedm != 0 && gp.m.lockedg != 0
664 }
665
666 var (
667
668
669
670
671
672
673 allglock mutex
674 allgs []*g
675
676
677
678
679
680
681
682
683
684
685
686
687
688 allglen uintptr
689 allgptr **g
690 )
691
692 func allgadd(gp *g) {
693 if readgstatus(gp) == _Gidle {
694 throw("allgadd: bad status Gidle")
695 }
696
697 lock(&allglock)
698 allgs = append(allgs, gp)
699 if &allgs[0] != allgptr {
700 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
701 }
702 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
703 unlock(&allglock)
704 }
705
706
707
708
709 func allGsSnapshot() []*g {
710 assertWorldStoppedOrLockHeld(&allglock)
711
712
713
714
715
716
717 return allgs[:len(allgs):len(allgs)]
718 }
719
720
721 func atomicAllG() (**g, uintptr) {
722 length := atomic.Loaduintptr(&allglen)
723 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
724 return ptr, length
725 }
726
727
728 func atomicAllGIndex(ptr **g, i uintptr) *g {
729 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
730 }
731
732
733
734
735 func forEachG(fn func(gp *g)) {
736 lock(&allglock)
737 for _, gp := range allgs {
738 fn(gp)
739 }
740 unlock(&allglock)
741 }
742
743
744
745
746
747 func forEachGRace(fn func(gp *g)) {
748 ptr, length := atomicAllG()
749 for i := uintptr(0); i < length; i++ {
750 gp := atomicAllGIndex(ptr, i)
751 fn(gp)
752 }
753 return
754 }
755
756 const (
757
758
759 _GoidCacheBatch = 16
760 )
761
762
763
764 func cpuinit(env string) {
765 cpu.Initialize(env)
766
767
768
769 switch GOARCH {
770 case "386", "amd64":
771 x86HasAVX = cpu.X86.HasAVX
772 x86HasFMA = cpu.X86.HasFMA
773 x86HasPOPCNT = cpu.X86.HasPOPCNT
774 x86HasSSE41 = cpu.X86.HasSSE41
775
776 case "arm":
777 armHasVFPv4 = cpu.ARM.HasVFPv4
778
779 case "arm64":
780 arm64HasATOMICS = cpu.ARM64.HasATOMICS
781
782 case "loong64":
783 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
784 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
785 loong64HasLSX = cpu.Loong64.HasLSX
786
787 case "riscv64":
788 riscv64HasZbb = cpu.RISCV64.HasZbb
789 }
790 }
791
792
793
794
795
796
797 func getGodebugEarly() (string, bool) {
798 const prefix = "GODEBUG="
799 var env string
800 switch GOOS {
801 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
802
803
804
805 n := int32(0)
806 for argv_index(argv, argc+1+n) != nil {
807 n++
808 }
809
810 for i := int32(0); i < n; i++ {
811 p := argv_index(argv, argc+1+i)
812 s := unsafe.String(p, findnull(p))
813
814 if stringslite.HasPrefix(s, prefix) {
815 env = gostringnocopy(p)[len(prefix):]
816 break
817 }
818 }
819 break
820
821 default:
822 return "", false
823 }
824 return env, true
825 }
826
827
828
829
830
831
832
833
834
835 func schedinit() {
836 lockInit(&sched.lock, lockRankSched)
837 lockInit(&sched.sysmonlock, lockRankSysmon)
838 lockInit(&sched.deferlock, lockRankDefer)
839 lockInit(&sched.sudoglock, lockRankSudog)
840 lockInit(&deadlock, lockRankDeadlock)
841 lockInit(&paniclk, lockRankPanic)
842 lockInit(&allglock, lockRankAllg)
843 lockInit(&allpLock, lockRankAllp)
844 lockInit(&reflectOffs.lock, lockRankReflectOffs)
845 lockInit(&finlock, lockRankFin)
846 lockInit(&cpuprof.lock, lockRankCpuprof)
847 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
848 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
849 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
850 traceLockInit()
851
852
853
854 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
855
856 lockVerifyMSize()
857
858 sched.midle.init(unsafe.Offsetof(m{}.idleNode))
859
860
861
862 gp := getg()
863 if raceenabled {
864 gp.racectx, raceprocctx0 = raceinit()
865 }
866
867 sched.maxmcount = 10000
868 crashFD.Store(^uintptr(0))
869
870
871 worldStopped()
872
873 godebug, parsedGodebug := getGodebugEarly()
874 if parsedGodebug {
875 parseRuntimeDebugVars(godebug)
876 }
877 ticks.init()
878 moduledataverify()
879 stackinit()
880 randinit()
881 mallocinit()
882 cpuinit(godebug)
883 alginit()
884 mcommoninit(gp.m, -1)
885 modulesinit()
886 typelinksinit()
887 itabsinit()
888 stkobjinit()
889
890 sigsave(&gp.m.sigmask)
891 initSigmask = gp.m.sigmask
892
893 goargs()
894 goenvs()
895 secure()
896 checkfds()
897 if !parsedGodebug {
898
899
900 parseRuntimeDebugVars(gogetenv("GODEBUG"))
901 }
902 finishDebugVarsSetup()
903 gcinit()
904
905
906
907 gcrash.stack = stackalloc(16384)
908 gcrash.stackguard0 = gcrash.stack.lo + 1000
909 gcrash.stackguard1 = gcrash.stack.lo + 1000
910
911
912
913
914
915 if disableMemoryProfiling {
916 MemProfileRate = 0
917 }
918
919
920 mProfStackInit(gp.m)
921 defaultGOMAXPROCSInit()
922
923 lock(&sched.lock)
924 sched.lastpoll.Store(nanotime())
925 var procs int32
926 if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
927 procs = int32(n)
928 sched.customGOMAXPROCS = true
929 } else {
930
931
932
933
934
935
936
937
938 procs = defaultGOMAXPROCS(numCPUStartup)
939 }
940 if procresize(procs) != nil {
941 throw("unknown runnable goroutine during bootstrap")
942 }
943 unlock(&sched.lock)
944
945
946 worldStarted()
947
948 if buildVersion == "" {
949
950
951 buildVersion = "unknown"
952 }
953 if len(modinfo) == 1 {
954
955
956 modinfo = ""
957 }
958 }
959
960 func dumpgstatus(gp *g) {
961 thisg := getg()
962 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
963 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
964 }
965
966
967 func checkmcount() {
968 assertLockHeld(&sched.lock)
969
970
971
972
973
974
975
976
977
978 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
979 if count > sched.maxmcount {
980 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
981 throw("thread exhaustion")
982 }
983 }
984
985
986
987
988
989 func mReserveID() int64 {
990 assertLockHeld(&sched.lock)
991
992 if sched.mnext+1 < sched.mnext {
993 throw("runtime: thread ID overflow")
994 }
995 id := sched.mnext
996 sched.mnext++
997 checkmcount()
998 return id
999 }
1000
1001
1002 func mcommoninit(mp *m, id int64) {
1003 gp := getg()
1004
1005
1006 if gp != gp.m.g0 {
1007 callers(1, mp.createstack[:])
1008 }
1009
1010 lock(&sched.lock)
1011
1012 if id >= 0 {
1013 mp.id = id
1014 } else {
1015 mp.id = mReserveID()
1016 }
1017
1018 mp.self = newMWeakPointer(mp)
1019
1020 mrandinit(mp)
1021
1022 mpreinit(mp)
1023 if mp.gsignal != nil {
1024 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1025 }
1026
1027
1028
1029 mp.alllink = allm
1030
1031
1032
1033 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1034 unlock(&sched.lock)
1035
1036
1037 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1038 mp.cgoCallers = new(cgoCallers)
1039 }
1040 mProfStackInit(mp)
1041 }
1042
1043
1044
1045
1046
1047 func mProfStackInit(mp *m) {
1048 if debug.profstackdepth == 0 {
1049
1050
1051 return
1052 }
1053 mp.profStack = makeProfStackFP()
1054 mp.mLockProfile.stack = makeProfStackFP()
1055 }
1056
1057
1058
1059
1060 func makeProfStackFP() []uintptr {
1061
1062
1063
1064
1065
1066
1067 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1068 }
1069
1070
1071
1072 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1073
1074
1075 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1076
1077 func (mp *m) becomeSpinning() {
1078 mp.spinning = true
1079 sched.nmspinning.Add(1)
1080 sched.needspinning.Store(0)
1081 }
1082
1083
1084
1085
1086
1087
1088
1089
1090 func (mp *m) snapshotAllp() []*p {
1091 mp.allpSnapshot = allp
1092 return mp.allpSnapshot
1093 }
1094
1095
1096
1097
1098
1099
1100
1101 func (mp *m) clearAllpSnapshot() {
1102 mp.allpSnapshot = nil
1103 }
1104
1105 func (mp *m) hasCgoOnStack() bool {
1106 return mp.ncgo > 0 || mp.isextra
1107 }
1108
1109 const (
1110
1111
1112 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1113
1114
1115
1116 osHasLowResClockInt = goos.IsWindows
1117
1118
1119
1120 osHasLowResClock = osHasLowResClockInt > 0
1121 )
1122
1123
1124 func ready(gp *g, traceskip int, next bool) {
1125 status := readgstatus(gp)
1126
1127
1128 mp := acquirem()
1129 if status&^_Gscan != _Gwaiting {
1130 dumpgstatus(gp)
1131 throw("bad g->status in ready")
1132 }
1133
1134
1135 trace := traceAcquire()
1136 casgstatus(gp, _Gwaiting, _Grunnable)
1137 if trace.ok() {
1138 trace.GoUnpark(gp, traceskip)
1139 traceRelease(trace)
1140 }
1141 runqput(mp.p.ptr(), gp, next)
1142 wakep()
1143 releasem(mp)
1144 }
1145
1146
1147
1148 const freezeStopWait = 0x7fffffff
1149
1150
1151
1152 var freezing atomic.Bool
1153
1154
1155
1156
1157 func freezetheworld() {
1158 freezing.Store(true)
1159 if debug.dontfreezetheworld > 0 {
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184 usleep(1000)
1185 return
1186 }
1187
1188
1189
1190
1191 for i := 0; i < 5; i++ {
1192
1193 sched.stopwait = freezeStopWait
1194 sched.gcwaiting.Store(true)
1195
1196 if !preemptall() {
1197 break
1198 }
1199 usleep(1000)
1200 }
1201
1202 usleep(1000)
1203 preemptall()
1204 usleep(1000)
1205 }
1206
1207
1208
1209
1210
1211 func readgstatus(gp *g) uint32 {
1212 return gp.atomicstatus.Load()
1213 }
1214
1215
1216
1217
1218
1219 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1220 success := false
1221
1222
1223 switch oldval {
1224 default:
1225 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1226 dumpgstatus(gp)
1227 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1228 case _Gscanrunnable,
1229 _Gscanwaiting,
1230 _Gscanrunning,
1231 _Gscansyscall,
1232 _Gscanleaked,
1233 _Gscanpreempted,
1234 _Gscandeadextra:
1235 if newval == oldval&^_Gscan {
1236 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1237 }
1238 }
1239 if !success {
1240 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1241 dumpgstatus(gp)
1242 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1243 }
1244 releaseLockRankAndM(lockRankGscan)
1245 }
1246
1247
1248
1249 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1250 switch oldval {
1251 case _Grunnable,
1252 _Grunning,
1253 _Gwaiting,
1254 _Gleaked,
1255 _Gsyscall,
1256 _Gdeadextra:
1257 if newval == oldval|_Gscan {
1258 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1259 if r {
1260 acquireLockRankAndM(lockRankGscan)
1261 }
1262 return r
1263
1264 }
1265 }
1266 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1267 throw("bad oldval passed to castogscanstatus")
1268 return false
1269 }
1270
1271
1272
1273 var casgstatusAlwaysTrack = false
1274
1275
1276
1277
1278
1279
1280
1281 func casgstatus(gp *g, oldval, newval uint32) {
1282 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1283 systemstack(func() {
1284
1285
1286 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1287 throw("casgstatus: bad incoming values")
1288 })
1289 }
1290
1291 lockWithRankMayAcquire(nil, lockRankGscan)
1292
1293
1294 const yieldDelay = 5 * 1000
1295 var nextYield int64
1296
1297
1298
1299 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1300 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1301 systemstack(func() {
1302
1303
1304 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1305 })
1306 }
1307 if i == 0 {
1308 nextYield = nanotime() + yieldDelay
1309 }
1310 if nanotime() < nextYield {
1311 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1312 procyield(1)
1313 }
1314 } else {
1315 osyield()
1316 nextYield = nanotime() + yieldDelay/2
1317 }
1318 }
1319
1320 if gp.bubble != nil {
1321 systemstack(func() {
1322 gp.bubble.changegstatus(gp, oldval, newval)
1323 })
1324 }
1325
1326 if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
1327
1328
1329 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1330 gp.tracking = true
1331 }
1332 gp.trackingSeq++
1333 }
1334 if !gp.tracking {
1335 return
1336 }
1337
1338
1339
1340
1341
1342
1343 switch oldval {
1344 case _Grunnable:
1345
1346
1347
1348 now := nanotime()
1349 gp.runnableTime += now - gp.trackingStamp
1350 gp.trackingStamp = 0
1351 case _Gwaiting:
1352 if !gp.waitreason.isMutexWait() {
1353
1354 break
1355 }
1356
1357
1358
1359
1360
1361 now := nanotime()
1362 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1363 gp.trackingStamp = 0
1364 }
1365 switch newval {
1366 case _Gwaiting:
1367 if !gp.waitreason.isMutexWait() {
1368
1369 break
1370 }
1371
1372 now := nanotime()
1373 gp.trackingStamp = now
1374 case _Grunnable:
1375
1376
1377 now := nanotime()
1378 gp.trackingStamp = now
1379 case _Grunning:
1380
1381
1382
1383 gp.tracking = false
1384 sched.timeToRun.record(gp.runnableTime)
1385 gp.runnableTime = 0
1386 }
1387 }
1388
1389
1390
1391
1392 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1393
1394 gp.waitreason = reason
1395 casgstatus(gp, old, _Gwaiting)
1396 }
1397
1398
1399
1400
1401
1402
1403
1404
1405 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1406 if !reason.isWaitingForSuspendG() {
1407 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1408 }
1409 casGToWaiting(gp, old, reason)
1410 }
1411
1412
1413
1414
1415
1416 func casGToPreemptScan(gp *g, old, new uint32) {
1417 if old != _Grunning || new != _Gscan|_Gpreempted {
1418 throw("bad g transition")
1419 }
1420 acquireLockRankAndM(lockRankGscan)
1421 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1422 }
1423
1424
1425
1426
1427
1428
1429 }
1430
1431
1432
1433
1434 func casGFromPreempted(gp *g, old, new uint32) bool {
1435 if old != _Gpreempted || new != _Gwaiting {
1436 throw("bad g transition")
1437 }
1438 gp.waitreason = waitReasonPreempted
1439 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1440 return false
1441 }
1442 if bubble := gp.bubble; bubble != nil {
1443 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1444 }
1445 return true
1446 }
1447
1448
1449 type stwReason uint8
1450
1451
1452
1453
1454 const (
1455 stwUnknown stwReason = iota
1456 stwGCMarkTerm
1457 stwGCSweepTerm
1458 stwWriteHeapDump
1459 stwGoroutineProfile
1460 stwGoroutineProfileCleanup
1461 stwAllGoroutinesStack
1462 stwReadMemStats
1463 stwAllThreadsSyscall
1464 stwGOMAXPROCS
1465 stwStartTrace
1466 stwStopTrace
1467 stwForTestCountPagesInUse
1468 stwForTestReadMetricsSlow
1469 stwForTestReadMemStatsSlow
1470 stwForTestPageCachePagesLeaked
1471 stwForTestResetDebugLog
1472 )
1473
1474 func (r stwReason) String() string {
1475 return stwReasonStrings[r]
1476 }
1477
1478 func (r stwReason) isGC() bool {
1479 return r == stwGCMarkTerm || r == stwGCSweepTerm
1480 }
1481
1482
1483
1484
1485 var stwReasonStrings = [...]string{
1486 stwUnknown: "unknown",
1487 stwGCMarkTerm: "GC mark termination",
1488 stwGCSweepTerm: "GC sweep termination",
1489 stwWriteHeapDump: "write heap dump",
1490 stwGoroutineProfile: "goroutine profile",
1491 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1492 stwAllGoroutinesStack: "all goroutines stack trace",
1493 stwReadMemStats: "read mem stats",
1494 stwAllThreadsSyscall: "AllThreadsSyscall",
1495 stwGOMAXPROCS: "GOMAXPROCS",
1496 stwStartTrace: "start trace",
1497 stwStopTrace: "stop trace",
1498 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1499 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1500 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1501 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1502 stwForTestResetDebugLog: "ResetDebugLog (test)",
1503 }
1504
1505
1506
1507 type worldStop struct {
1508 reason stwReason
1509 startedStopping int64
1510 finishedStopping int64
1511 stoppingCPUTime int64
1512 }
1513
1514
1515
1516
1517 var stopTheWorldContext worldStop
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 func stopTheWorld(reason stwReason) worldStop {
1537 semacquire(&worldsema)
1538 gp := getg()
1539 gp.m.preemptoff = reason.String()
1540 systemstack(func() {
1541 stopTheWorldContext = stopTheWorldWithSema(reason)
1542 })
1543 return stopTheWorldContext
1544 }
1545
1546
1547
1548
1549 func startTheWorld(w worldStop) {
1550 systemstack(func() { startTheWorldWithSema(0, w) })
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567 mp := acquirem()
1568 mp.preemptoff = ""
1569 semrelease1(&worldsema, true, 0)
1570 releasem(mp)
1571 }
1572
1573
1574
1575
1576 func stopTheWorldGC(reason stwReason) worldStop {
1577 semacquire(&gcsema)
1578 return stopTheWorld(reason)
1579 }
1580
1581
1582
1583
1584 func startTheWorldGC(w worldStop) {
1585 startTheWorld(w)
1586 semrelease(&gcsema)
1587 }
1588
1589
1590 var worldsema uint32 = 1
1591
1592
1593
1594
1595
1596
1597
1598 var gcsema uint32 = 1
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632 func stopTheWorldWithSema(reason stwReason) worldStop {
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1646
1647 trace := traceAcquire()
1648 if trace.ok() {
1649 trace.STWStart(reason)
1650 traceRelease(trace)
1651 }
1652 gp := getg()
1653
1654
1655
1656 if gp.m.locks > 0 {
1657 throw("stopTheWorld: holding locks")
1658 }
1659
1660 lock(&sched.lock)
1661 start := nanotime()
1662 sched.stopwait = gomaxprocs
1663 sched.gcwaiting.Store(true)
1664 preemptall()
1665
1666
1667 gp.m.p.ptr().status = _Pgcstop
1668 gp.m.p.ptr().gcStopTime = start
1669 sched.stopwait--
1670
1671
1672 for _, pp := range allp {
1673 if thread, ok := setBlockOnExitSyscall(pp); ok {
1674 thread.gcstopP()
1675 thread.resume()
1676 }
1677 }
1678
1679
1680 now := nanotime()
1681 for {
1682 pp, _ := pidleget(now)
1683 if pp == nil {
1684 break
1685 }
1686 pp.status = _Pgcstop
1687 pp.gcStopTime = nanotime()
1688 sched.stopwait--
1689 }
1690 wait := sched.stopwait > 0
1691 unlock(&sched.lock)
1692
1693
1694 if wait {
1695 for {
1696
1697 if notetsleep(&sched.stopnote, 100*1000) {
1698 noteclear(&sched.stopnote)
1699 break
1700 }
1701 preemptall()
1702 }
1703 }
1704
1705 finish := nanotime()
1706 startTime := finish - start
1707 if reason.isGC() {
1708 sched.stwStoppingTimeGC.record(startTime)
1709 } else {
1710 sched.stwStoppingTimeOther.record(startTime)
1711 }
1712
1713
1714
1715
1716
1717 stoppingCPUTime := int64(0)
1718 bad := ""
1719 if sched.stopwait != 0 {
1720 bad = "stopTheWorld: not stopped (stopwait != 0)"
1721 } else {
1722 for _, pp := range allp {
1723 if pp.status != _Pgcstop {
1724 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1725 }
1726 if pp.gcStopTime == 0 && bad == "" {
1727 bad = "stopTheWorld: broken CPU time accounting"
1728 }
1729 stoppingCPUTime += finish - pp.gcStopTime
1730 pp.gcStopTime = 0
1731 }
1732 }
1733 if freezing.Load() {
1734
1735
1736
1737
1738 lock(&deadlock)
1739 lock(&deadlock)
1740 }
1741 if bad != "" {
1742 throw(bad)
1743 }
1744
1745 worldStopped()
1746
1747
1748 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1749
1750 return worldStop{
1751 reason: reason,
1752 startedStopping: start,
1753 finishedStopping: finish,
1754 stoppingCPUTime: stoppingCPUTime,
1755 }
1756 }
1757
1758
1759
1760
1761
1762
1763
1764 func startTheWorldWithSema(now int64, w worldStop) int64 {
1765 assertWorldStopped()
1766
1767 mp := acquirem()
1768 if netpollinited() {
1769 list, delta := netpoll(0)
1770 injectglist(&list)
1771 netpollAdjustWaiters(delta)
1772 }
1773 lock(&sched.lock)
1774
1775 procs := gomaxprocs
1776 if newprocs != 0 {
1777 procs = newprocs
1778 newprocs = 0
1779 }
1780 p1 := procresize(procs)
1781 sched.gcwaiting.Store(false)
1782 if sched.sysmonwait.Load() {
1783 sched.sysmonwait.Store(false)
1784 notewakeup(&sched.sysmonnote)
1785 }
1786 unlock(&sched.lock)
1787
1788 worldStarted()
1789
1790 for p1 != nil {
1791 p := p1
1792 p1 = p1.link.ptr()
1793 if p.m != 0 {
1794 mp := p.m.ptr()
1795 p.m = 0
1796 if mp.nextp != 0 {
1797 throw("startTheWorld: inconsistent mp->nextp")
1798 }
1799 mp.nextp.set(p)
1800 notewakeup(&mp.park)
1801 } else {
1802
1803 newm(nil, p, -1)
1804 }
1805 }
1806
1807
1808 if now == 0 {
1809 now = nanotime()
1810 }
1811 totalTime := now - w.startedStopping
1812 if w.reason.isGC() {
1813 sched.stwTotalTimeGC.record(totalTime)
1814 } else {
1815 sched.stwTotalTimeOther.record(totalTime)
1816 }
1817 trace := traceAcquire()
1818 if trace.ok() {
1819 trace.STWDone()
1820 traceRelease(trace)
1821 }
1822
1823
1824
1825
1826 wakep()
1827
1828 releasem(mp)
1829
1830 return now
1831 }
1832
1833
1834
1835 func usesLibcall() bool {
1836 switch GOOS {
1837 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1838 return true
1839 }
1840 return false
1841 }
1842
1843
1844
1845 func mStackIsSystemAllocated() bool {
1846 switch GOOS {
1847 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1848 return true
1849 }
1850 return false
1851 }
1852
1853
1854
1855 func mstart()
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866 func mstart0() {
1867 gp := getg()
1868
1869 osStack := gp.stack.lo == 0
1870 if osStack {
1871
1872
1873
1874
1875
1876
1877
1878
1879 size := gp.stack.hi
1880 if size == 0 {
1881 size = 16384 * sys.StackGuardMultiplier
1882 }
1883 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1884 gp.stack.lo = gp.stack.hi - size + 1024
1885 }
1886
1887
1888 gp.stackguard0 = gp.stack.lo + stackGuard
1889
1890
1891 gp.stackguard1 = gp.stackguard0
1892 mstart1()
1893
1894
1895 if mStackIsSystemAllocated() {
1896
1897
1898
1899 osStack = true
1900 }
1901 mexit(osStack)
1902 }
1903
1904
1905
1906
1907
1908 func mstart1() {
1909 gp := getg()
1910
1911 if gp != gp.m.g0 {
1912 throw("bad runtime·mstart")
1913 }
1914
1915
1916
1917
1918
1919
1920
1921 gp.sched.g = guintptr(unsafe.Pointer(gp))
1922 gp.sched.pc = sys.GetCallerPC()
1923 gp.sched.sp = sys.GetCallerSP()
1924
1925 asminit()
1926 minit()
1927
1928
1929
1930 if gp.m == &m0 {
1931 mstartm0()
1932 }
1933
1934 if debug.dataindependenttiming == 1 {
1935 sys.EnableDIT()
1936 }
1937
1938 if fn := gp.m.mstartfn; fn != nil {
1939 fn()
1940 }
1941
1942 if gp.m != &m0 {
1943 acquirep(gp.m.nextp.ptr())
1944 gp.m.nextp = 0
1945 }
1946 schedule()
1947 }
1948
1949
1950
1951
1952
1953
1954
1955 func mstartm0() {
1956
1957
1958
1959 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1960 cgoHasExtraM = true
1961 newextram()
1962 }
1963 initsig(false)
1964 }
1965
1966
1967
1968
1969 func mPark() {
1970 gp := getg()
1971
1972
1973 if goexperiment.RuntimeSecret {
1974 eraseSecretsSignalStk()
1975 }
1976 notesleep(&gp.m.park)
1977 noteclear(&gp.m.park)
1978 }
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990 func mexit(osStack bool) {
1991 mp := getg().m
1992
1993 if mp == &m0 {
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005 handoffp(releasep())
2006 lock(&sched.lock)
2007 sched.nmfreed++
2008 checkdead()
2009 unlock(&sched.lock)
2010 mPark()
2011 throw("locked m0 woke up")
2012 }
2013
2014 sigblock(true)
2015 unminit()
2016
2017
2018 if mp.gsignal != nil {
2019 stackfree(mp.gsignal.stack)
2020 if valgrindenabled {
2021 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2022 mp.gsignal.valgrindStackID = 0
2023 }
2024
2025
2026
2027
2028 mp.gsignal = nil
2029 }
2030
2031
2032 vgetrandomDestroy(mp)
2033
2034
2035
2036 mp.self.clear()
2037
2038
2039 lock(&sched.lock)
2040 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2041 if *pprev == mp {
2042 *pprev = mp.alllink
2043 goto found
2044 }
2045 }
2046 throw("m not found in allm")
2047 found:
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062 mp.freeWait.Store(freeMWait)
2063 mp.freelink = sched.freem
2064 sched.freem = mp
2065 unlock(&sched.lock)
2066
2067 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2068 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2069
2070
2071 handoffp(releasep())
2072
2073
2074
2075
2076
2077 lock(&sched.lock)
2078 sched.nmfreed++
2079 checkdead()
2080 unlock(&sched.lock)
2081
2082 if GOOS == "darwin" || GOOS == "ios" {
2083
2084
2085 if mp.signalPending.Load() != 0 {
2086 pendingPreemptSignals.Add(-1)
2087 }
2088 }
2089
2090
2091
2092 mdestroy(mp)
2093
2094 if osStack {
2095
2096 mp.freeWait.Store(freeMRef)
2097
2098
2099
2100 return
2101 }
2102
2103
2104
2105
2106
2107 exitThread(&mp.freeWait)
2108 }
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120 func forEachP(reason waitReason, fn func(*p)) {
2121 systemstack(func() {
2122 gp := getg().m.curg
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134 casGToWaitingForSuspendG(gp, _Grunning, reason)
2135 forEachPInternal(fn)
2136 casgstatus(gp, _Gwaiting, _Grunning)
2137 })
2138 }
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149 func forEachPInternal(fn func(*p)) {
2150 mp := acquirem()
2151 pp := getg().m.p.ptr()
2152
2153 lock(&sched.lock)
2154 if sched.safePointWait != 0 {
2155 throw("forEachP: sched.safePointWait != 0")
2156 }
2157 sched.safePointWait = gomaxprocs - 1
2158 sched.safePointFn = fn
2159
2160
2161 for _, p2 := range allp {
2162 if p2 != pp {
2163 atomic.Store(&p2.runSafePointFn, 1)
2164 }
2165 }
2166 preemptall()
2167
2168
2169
2170
2171
2172
2173
2174 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2175 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2176 fn(p)
2177 sched.safePointWait--
2178 }
2179 }
2180
2181 wait := sched.safePointWait > 0
2182 unlock(&sched.lock)
2183
2184
2185 fn(pp)
2186
2187
2188
2189 for _, p2 := range allp {
2190 if atomic.Load(&p2.runSafePointFn) != 1 {
2191
2192 continue
2193 }
2194 if thread, ok := setBlockOnExitSyscall(p2); ok {
2195 thread.takeP()
2196 thread.resume()
2197 handoffp(p2)
2198 }
2199 }
2200
2201
2202 if wait {
2203 for {
2204
2205
2206
2207
2208 if notetsleep(&sched.safePointNote, 100*1000) {
2209 noteclear(&sched.safePointNote)
2210 break
2211 }
2212 preemptall()
2213 }
2214 }
2215 if sched.safePointWait != 0 {
2216 throw("forEachP: not done")
2217 }
2218 for _, p2 := range allp {
2219 if p2.runSafePointFn != 0 {
2220 throw("forEachP: P did not run fn")
2221 }
2222 }
2223
2224 lock(&sched.lock)
2225 sched.safePointFn = nil
2226 unlock(&sched.lock)
2227 releasem(mp)
2228 }
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241 func runSafePointFn() {
2242 p := getg().m.p.ptr()
2243
2244
2245
2246 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2247 return
2248 }
2249 sched.safePointFn(p)
2250 lock(&sched.lock)
2251 sched.safePointWait--
2252 if sched.safePointWait == 0 {
2253 notewakeup(&sched.safePointNote)
2254 }
2255 unlock(&sched.lock)
2256 }
2257
2258
2259
2260
2261 var cgoThreadStart unsafe.Pointer
2262
2263 type cgothreadstart struct {
2264 g guintptr
2265 tls *uint64
2266 fn unsafe.Pointer
2267 }
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278 func allocm(pp *p, fn func(), id int64) *m {
2279 allocmLock.rlock()
2280
2281
2282
2283
2284 acquirem()
2285
2286 gp := getg()
2287 if gp.m.p == 0 {
2288 acquirep(pp)
2289 }
2290
2291
2292
2293 if sched.freem != nil {
2294 lock(&sched.lock)
2295 var newList *m
2296 for freem := sched.freem; freem != nil; {
2297
2298 wait := freem.freeWait.Load()
2299 if wait == freeMWait {
2300 next := freem.freelink
2301 freem.freelink = newList
2302 newList = freem
2303 freem = next
2304 continue
2305 }
2306
2307
2308
2309 if traceEnabled() || traceShuttingDown() {
2310 traceThreadDestroy(freem)
2311 }
2312
2313
2314
2315 if wait == freeMStack {
2316
2317
2318
2319 systemstack(func() {
2320 stackfree(freem.g0.stack)
2321 if valgrindenabled {
2322 valgrindDeregisterStack(freem.g0.valgrindStackID)
2323 freem.g0.valgrindStackID = 0
2324 }
2325 })
2326 }
2327 freem = freem.freelink
2328 }
2329 sched.freem = newList
2330 unlock(&sched.lock)
2331 }
2332
2333 mp := &new(mPadded).m
2334 mp.mstartfn = fn
2335 mcommoninit(mp, id)
2336
2337
2338
2339 if iscgo || mStackIsSystemAllocated() {
2340 mp.g0 = malg(-1)
2341 } else {
2342 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2343 }
2344 mp.g0.m = mp
2345
2346 if pp == gp.m.p.ptr() {
2347 releasep()
2348 }
2349
2350 releasem(gp.m)
2351 allocmLock.runlock()
2352 return mp
2353 }
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394 func needm(signal bool) {
2395 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2396
2397
2398
2399
2400
2401
2402 writeErrStr("fatal error: cgo callback before cgo call\n")
2403 exit(1)
2404 }
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414 var sigmask sigset
2415 sigsave(&sigmask)
2416 sigblock(false)
2417
2418
2419
2420
2421 mp, last := getExtraM()
2422
2423
2424
2425
2426
2427
2428
2429
2430 mp.needextram = last
2431
2432
2433 mp.sigmask = sigmask
2434
2435
2436
2437 osSetupTLS(mp)
2438
2439
2440
2441 setg(mp.g0)
2442 sp := sys.GetCallerSP()
2443 callbackUpdateSystemStack(mp, sp, signal)
2444
2445
2446
2447
2448 mp.isExtraInC = false
2449
2450
2451 asminit()
2452 minit()
2453
2454
2455
2456
2457
2458
2459 var trace traceLocker
2460 if !signal {
2461 trace = traceAcquire()
2462 }
2463
2464
2465 casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
2466 sched.ngsys.Add(-1)
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476 addGSyscallNoP(mp)
2477
2478 if !signal {
2479 if trace.ok() {
2480 trace.GoCreateSyscall(mp.curg)
2481 traceRelease(trace)
2482 }
2483 }
2484 mp.isExtraInSig = signal
2485 }
2486
2487
2488
2489
2490 func needAndBindM() {
2491 needm(false)
2492
2493 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2494 cgoBindM()
2495 }
2496 }
2497
2498
2499
2500
2501 func newextram() {
2502 c := extraMWaiters.Swap(0)
2503 if c > 0 {
2504 for i := uint32(0); i < c; i++ {
2505 oneNewExtraM()
2506 }
2507 } else if extraMLength.Load() == 0 {
2508
2509 oneNewExtraM()
2510 }
2511 }
2512
2513
2514 func oneNewExtraM() {
2515
2516
2517
2518
2519
2520 mp := allocm(nil, nil, -1)
2521 gp := malg(4096)
2522 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2523 gp.sched.sp = gp.stack.hi
2524 gp.sched.sp -= 4 * goarch.PtrSize
2525 gp.sched.lr = 0
2526 gp.sched.g = guintptr(unsafe.Pointer(gp))
2527 gp.syscallpc = gp.sched.pc
2528 gp.syscallsp = gp.sched.sp
2529 gp.stktopsp = gp.sched.sp
2530
2531
2532
2533 casgstatus(gp, _Gidle, _Gdeadextra)
2534 gp.m = mp
2535 mp.curg = gp
2536 mp.isextra = true
2537
2538 mp.isExtraInC = true
2539 mp.lockedInt++
2540 mp.lockedg.set(gp)
2541 gp.lockedm.set(mp)
2542 gp.goid = sched.goidgen.Add(1)
2543 if raceenabled {
2544 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2545 }
2546
2547 allgadd(gp)
2548
2549
2550
2551
2552
2553 sched.ngsys.Add(1)
2554
2555
2556 addExtraM(mp)
2557 }
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592 func dropm() {
2593
2594
2595
2596 mp := getg().m
2597
2598
2599
2600
2601
2602 var trace traceLocker
2603 if !mp.isExtraInSig {
2604 trace = traceAcquire()
2605 }
2606
2607
2608 casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
2609 mp.curg.preemptStop = false
2610 sched.ngsys.Add(1)
2611 decGSyscallNoP(mp)
2612
2613 if !mp.isExtraInSig {
2614 if trace.ok() {
2615 trace.GoDestroySyscall()
2616 traceRelease(trace)
2617 }
2618 }
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633 mp.syscalltick--
2634
2635
2636
2637 mp.curg.trace.reset()
2638
2639
2640
2641
2642 if traceEnabled() || traceShuttingDown() {
2643
2644
2645
2646
2647
2648
2649
2650 lock(&sched.lock)
2651 traceThreadDestroy(mp)
2652 unlock(&sched.lock)
2653 }
2654 mp.isExtraInSig = false
2655
2656
2657
2658
2659
2660 sigmask := mp.sigmask
2661 sigblock(false)
2662 unminit()
2663
2664 setg(nil)
2665
2666
2667
2668 g0 := mp.g0
2669 g0.stack.hi = 0
2670 g0.stack.lo = 0
2671 g0.stackguard0 = 0
2672 g0.stackguard1 = 0
2673 mp.g0StackAccurate = false
2674
2675 putExtraM(mp)
2676
2677 msigrestore(sigmask)
2678 }
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700 func cgoBindM() {
2701 if GOOS == "windows" || GOOS == "plan9" {
2702 fatal("bindm in unexpected GOOS")
2703 }
2704 g := getg()
2705 if g.m.g0 != g {
2706 fatal("the current g is not g0")
2707 }
2708 if _cgo_bindm != nil {
2709 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2710 }
2711 }
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724 func getm() uintptr {
2725 return uintptr(unsafe.Pointer(getg().m))
2726 }
2727
2728 var (
2729
2730
2731
2732
2733
2734
2735 extraM atomic.Uintptr
2736
2737 extraMLength atomic.Uint32
2738
2739 extraMWaiters atomic.Uint32
2740
2741
2742 extraMInUse atomic.Uint32
2743 )
2744
2745
2746
2747
2748
2749
2750
2751
2752 func lockextra(nilokay bool) *m {
2753 const locked = 1
2754
2755 incr := false
2756 for {
2757 old := extraM.Load()
2758 if old == locked {
2759 osyield_no_g()
2760 continue
2761 }
2762 if old == 0 && !nilokay {
2763 if !incr {
2764
2765
2766
2767 extraMWaiters.Add(1)
2768 incr = true
2769 }
2770 usleep_no_g(1)
2771 continue
2772 }
2773 if extraM.CompareAndSwap(old, locked) {
2774 return (*m)(unsafe.Pointer(old))
2775 }
2776 osyield_no_g()
2777 continue
2778 }
2779 }
2780
2781
2782 func unlockextra(mp *m, delta int32) {
2783 extraMLength.Add(delta)
2784 extraM.Store(uintptr(unsafe.Pointer(mp)))
2785 }
2786
2787
2788
2789
2790
2791
2792
2793
2794 func getExtraM() (mp *m, last bool) {
2795 mp = lockextra(false)
2796 extraMInUse.Add(1)
2797 unlockextra(mp.schedlink.ptr(), -1)
2798 return mp, mp.schedlink.ptr() == nil
2799 }
2800
2801
2802
2803
2804
2805 func putExtraM(mp *m) {
2806 extraMInUse.Add(-1)
2807 addExtraM(mp)
2808 }
2809
2810
2811
2812
2813 func addExtraM(mp *m) {
2814 mnext := lockextra(true)
2815 mp.schedlink.set(mnext)
2816 unlockextra(mp, 1)
2817 }
2818
2819 var (
2820
2821
2822
2823 allocmLock rwmutex
2824
2825
2826
2827
2828 execLock rwmutex
2829 )
2830
2831
2832
2833 const (
2834 failthreadcreate = "runtime: failed to create new OS thread\n"
2835 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2836 )
2837
2838
2839
2840
2841 var newmHandoff struct {
2842 lock mutex
2843
2844
2845
2846 newm muintptr
2847
2848
2849
2850 waiting bool
2851 wake note
2852
2853
2854
2855
2856 haveTemplateThread uint32
2857 }
2858
2859
2860
2861
2862
2863
2864
2865
2866 func newm(fn func(), pp *p, id int64) {
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877 acquirem()
2878
2879 mp := allocm(pp, fn, id)
2880 mp.nextp.set(pp)
2881 mp.sigmask = initSigmask
2882 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894 lock(&newmHandoff.lock)
2895 if newmHandoff.haveTemplateThread == 0 {
2896 throw("on a locked thread with no template thread")
2897 }
2898 mp.schedlink = newmHandoff.newm
2899 newmHandoff.newm.set(mp)
2900 if newmHandoff.waiting {
2901 newmHandoff.waiting = false
2902 notewakeup(&newmHandoff.wake)
2903 }
2904 unlock(&newmHandoff.lock)
2905
2906
2907
2908 releasem(getg().m)
2909 return
2910 }
2911 newm1(mp)
2912 releasem(getg().m)
2913 }
2914
2915 func newm1(mp *m) {
2916 if iscgo && _cgo_thread_start != nil {
2917 var ts cgothreadstart
2918 ts.g.set(mp.g0)
2919 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2920 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2921 if msanenabled {
2922 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2923 }
2924 if asanenabled {
2925 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2926 }
2927 execLock.rlock()
2928 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2929 execLock.runlock()
2930 return
2931 }
2932 execLock.rlock()
2933 newosproc(mp)
2934 execLock.runlock()
2935 }
2936
2937
2938
2939
2940
2941 func startTemplateThread() {
2942 if GOARCH == "wasm" {
2943 return
2944 }
2945
2946
2947
2948 mp := acquirem()
2949 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2950 releasem(mp)
2951 return
2952 }
2953 newm(templateThread, nil, -1)
2954 releasem(mp)
2955 }
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969 func templateThread() {
2970 lock(&sched.lock)
2971 sched.nmsys++
2972 checkdead()
2973 unlock(&sched.lock)
2974
2975 for {
2976 lock(&newmHandoff.lock)
2977 for newmHandoff.newm != 0 {
2978 newm := newmHandoff.newm.ptr()
2979 newmHandoff.newm = 0
2980 unlock(&newmHandoff.lock)
2981 for newm != nil {
2982 next := newm.schedlink.ptr()
2983 newm.schedlink = 0
2984 newm1(newm)
2985 newm = next
2986 }
2987 lock(&newmHandoff.lock)
2988 }
2989 newmHandoff.waiting = true
2990 noteclear(&newmHandoff.wake)
2991 unlock(&newmHandoff.lock)
2992 notesleep(&newmHandoff.wake)
2993 }
2994 }
2995
2996
2997
2998 func stopm() {
2999 gp := getg()
3000
3001 if gp.m.locks != 0 {
3002 throw("stopm holding locks")
3003 }
3004 if gp.m.p != 0 {
3005 throw("stopm holding p")
3006 }
3007 if gp.m.spinning {
3008 throw("stopm spinning")
3009 }
3010
3011 lock(&sched.lock)
3012 mput(gp.m)
3013 unlock(&sched.lock)
3014 mPark()
3015 acquirep(gp.m.nextp.ptr())
3016 gp.m.nextp = 0
3017 }
3018
3019 func mspinning() {
3020
3021 getg().m.spinning = true
3022 }
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041 func startm(pp *p, spinning, lockheld bool) {
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058 mp := acquirem()
3059 if !lockheld {
3060 lock(&sched.lock)
3061 }
3062 if pp == nil {
3063 if spinning {
3064
3065
3066
3067 throw("startm: P required for spinning=true")
3068 }
3069 pp, _ = pidleget(0)
3070 if pp == nil {
3071 if !lockheld {
3072 unlock(&sched.lock)
3073 }
3074 releasem(mp)
3075 return
3076 }
3077 }
3078 nmp := mget()
3079 if nmp == nil {
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094 id := mReserveID()
3095 unlock(&sched.lock)
3096
3097 var fn func()
3098 if spinning {
3099
3100 fn = mspinning
3101 }
3102 newm(fn, pp, id)
3103
3104 if lockheld {
3105 lock(&sched.lock)
3106 }
3107
3108
3109 releasem(mp)
3110 return
3111 }
3112 if !lockheld {
3113 unlock(&sched.lock)
3114 }
3115 if nmp.spinning {
3116 throw("startm: m is spinning")
3117 }
3118 if nmp.nextp != 0 {
3119 throw("startm: m has p")
3120 }
3121 if spinning && !runqempty(pp) {
3122 throw("startm: p has runnable gs")
3123 }
3124
3125 nmp.spinning = spinning
3126 nmp.nextp.set(pp)
3127 notewakeup(&nmp.park)
3128
3129
3130 releasem(mp)
3131 }
3132
3133
3134
3135
3136
3137 func handoffp(pp *p) {
3138
3139
3140
3141
3142 if !runqempty(pp) || !sched.runq.empty() {
3143 startm(pp, false, false)
3144 return
3145 }
3146
3147 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3148 startm(pp, false, false)
3149 return
3150 }
3151
3152 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3153 startm(pp, false, false)
3154 return
3155 }
3156
3157
3158 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3159 sched.needspinning.Store(0)
3160 startm(pp, true, false)
3161 return
3162 }
3163 lock(&sched.lock)
3164 if sched.gcwaiting.Load() {
3165 pp.status = _Pgcstop
3166 pp.gcStopTime = nanotime()
3167 sched.stopwait--
3168 if sched.stopwait == 0 {
3169 notewakeup(&sched.stopnote)
3170 }
3171 unlock(&sched.lock)
3172 return
3173 }
3174 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3175 sched.safePointFn(pp)
3176 sched.safePointWait--
3177 if sched.safePointWait == 0 {
3178 notewakeup(&sched.safePointNote)
3179 }
3180 }
3181 if !sched.runq.empty() {
3182 unlock(&sched.lock)
3183 startm(pp, false, false)
3184 return
3185 }
3186
3187
3188 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3189 unlock(&sched.lock)
3190 startm(pp, false, false)
3191 return
3192 }
3193
3194
3195
3196 when := pp.timers.wakeTime()
3197 pidleput(pp, 0)
3198 unlock(&sched.lock)
3199
3200 if when != 0 {
3201 wakeNetPoller(when)
3202 }
3203 }
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218 func wakep() {
3219
3220
3221 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3222 return
3223 }
3224
3225
3226
3227
3228
3229
3230 mp := acquirem()
3231
3232 var pp *p
3233 lock(&sched.lock)
3234 pp, _ = pidlegetSpinning(0)
3235 if pp == nil {
3236 if sched.nmspinning.Add(-1) < 0 {
3237 throw("wakep: negative nmspinning")
3238 }
3239 unlock(&sched.lock)
3240 releasem(mp)
3241 return
3242 }
3243
3244
3245
3246
3247 unlock(&sched.lock)
3248
3249 startm(pp, true, false)
3250
3251 releasem(mp)
3252 }
3253
3254
3255
3256 func stoplockedm() {
3257 gp := getg()
3258
3259 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3260 throw("stoplockedm: inconsistent locking")
3261 }
3262 if gp.m.p != 0 {
3263
3264 pp := releasep()
3265 handoffp(pp)
3266 }
3267 incidlelocked(1)
3268
3269 mPark()
3270 status := readgstatus(gp.m.lockedg.ptr())
3271 if status&^_Gscan != _Grunnable {
3272 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3273 dumpgstatus(gp.m.lockedg.ptr())
3274 throw("stoplockedm: not runnable")
3275 }
3276 acquirep(gp.m.nextp.ptr())
3277 gp.m.nextp = 0
3278 }
3279
3280
3281
3282
3283
3284 func startlockedm(gp *g) {
3285 mp := gp.lockedm.ptr()
3286 if mp == getg().m {
3287 throw("startlockedm: locked to me")
3288 }
3289 if mp.nextp != 0 {
3290 throw("startlockedm: m has p")
3291 }
3292
3293 incidlelocked(-1)
3294 pp := releasep()
3295 mp.nextp.set(pp)
3296 notewakeup(&mp.park)
3297 stopm()
3298 }
3299
3300
3301
3302 func gcstopm() {
3303 gp := getg()
3304
3305 if !sched.gcwaiting.Load() {
3306 throw("gcstopm: not waiting for gc")
3307 }
3308 if gp.m.spinning {
3309 gp.m.spinning = false
3310
3311
3312 if sched.nmspinning.Add(-1) < 0 {
3313 throw("gcstopm: negative nmspinning")
3314 }
3315 }
3316 pp := releasep()
3317 lock(&sched.lock)
3318 pp.status = _Pgcstop
3319 pp.gcStopTime = nanotime()
3320 sched.stopwait--
3321 if sched.stopwait == 0 {
3322 notewakeup(&sched.stopnote)
3323 }
3324 unlock(&sched.lock)
3325 stopm()
3326 }
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337 func execute(gp *g, inheritTime bool) {
3338 mp := getg().m
3339
3340 if goroutineProfile.active {
3341
3342
3343
3344 tryRecordGoroutineProfile(gp, nil, osyield)
3345 }
3346
3347
3348 mp.curg = gp
3349 gp.m = mp
3350 gp.syncSafePoint = false
3351 casgstatus(gp, _Grunnable, _Grunning)
3352 gp.waitsince = 0
3353 gp.preempt = false
3354 gp.stackguard0 = gp.stack.lo + stackGuard
3355 if !inheritTime {
3356 mp.p.ptr().schedtick++
3357 }
3358
3359 if sys.DITSupported && debug.dataindependenttiming != 1 {
3360 if gp.ditWanted && !mp.ditEnabled {
3361
3362
3363 sys.EnableDIT()
3364 mp.ditEnabled = true
3365 } else if !gp.ditWanted && mp.ditEnabled {
3366
3367
3368
3369
3370
3371 sys.DisableDIT()
3372 mp.ditEnabled = false
3373 }
3374 }
3375
3376
3377 hz := sched.profilehz
3378 if mp.profilehz != hz {
3379 setThreadCPUProfiler(hz)
3380 }
3381
3382 trace := traceAcquire()
3383 if trace.ok() {
3384 trace.GoStart()
3385 traceRelease(trace)
3386 }
3387
3388 gogo(&gp.sched)
3389 }
3390
3391
3392
3393
3394
3395 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3396 mp := getg().m
3397
3398
3399
3400
3401
3402 top:
3403
3404
3405
3406 mp.clearAllpSnapshot()
3407
3408 pp := mp.p.ptr()
3409 if sched.gcwaiting.Load() {
3410 gcstopm()
3411 goto top
3412 }
3413 if pp.runSafePointFn != 0 {
3414 runSafePointFn()
3415 }
3416
3417
3418
3419
3420
3421 now, pollUntil, _ := pp.timers.check(0, nil)
3422
3423
3424 if traceEnabled() || traceShuttingDown() {
3425 gp := traceReader()
3426 if gp != nil {
3427 trace := traceAcquire()
3428 casgstatus(gp, _Gwaiting, _Grunnable)
3429 if trace.ok() {
3430 trace.GoUnpark(gp, 0)
3431 traceRelease(trace)
3432 }
3433 return gp, false, true
3434 }
3435 }
3436
3437
3438 if gcBlackenEnabled != 0 {
3439 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3440 if gp != nil {
3441 return gp, false, true
3442 }
3443 now = tnow
3444 }
3445
3446
3447
3448
3449 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3450 lock(&sched.lock)
3451 gp := globrunqget()
3452 unlock(&sched.lock)
3453 if gp != nil {
3454 return gp, false, false
3455 }
3456 }
3457
3458
3459 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3460 if gp := wakefing(); gp != nil {
3461 ready(gp, 0, true)
3462 }
3463 }
3464
3465
3466 if gcCleanups.needsWake() {
3467 gcCleanups.wake()
3468 }
3469
3470 if *cgo_yield != nil {
3471 asmcgocall(*cgo_yield, nil)
3472 }
3473
3474
3475 if gp, inheritTime := runqget(pp); gp != nil {
3476 return gp, inheritTime, false
3477 }
3478
3479
3480 if !sched.runq.empty() {
3481 lock(&sched.lock)
3482 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3483 unlock(&sched.lock)
3484 if gp != nil {
3485 if runqputbatch(pp, &q); !q.empty() {
3486 throw("Couldn't put Gs into empty local runq")
3487 }
3488 return gp, false, false
3489 }
3490 }
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3502 list, delta := netpoll(0)
3503 sched.pollingNet.Store(0)
3504 if !list.empty() {
3505 gp := list.pop()
3506 injectglist(&list)
3507 netpollAdjustWaiters(delta)
3508 trace := traceAcquire()
3509 casgstatus(gp, _Gwaiting, _Grunnable)
3510 if trace.ok() {
3511 trace.GoUnpark(gp, 0)
3512 traceRelease(trace)
3513 }
3514 return gp, false, false
3515 }
3516 }
3517
3518
3519
3520
3521
3522
3523 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3524 if !mp.spinning {
3525 mp.becomeSpinning()
3526 }
3527
3528 gp, inheritTime, tnow, w, newWork := stealWork(now)
3529 if gp != nil {
3530
3531 return gp, inheritTime, false
3532 }
3533 if newWork {
3534
3535
3536 goto top
3537 }
3538
3539 now = tnow
3540 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3541
3542 pollUntil = w
3543 }
3544 }
3545
3546
3547
3548
3549
3550 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3551 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3552 if node != nil {
3553 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3554 gp := node.gp.ptr()
3555
3556 trace := traceAcquire()
3557 casgstatus(gp, _Gwaiting, _Grunnable)
3558 if trace.ok() {
3559 trace.GoUnpark(gp, 0)
3560 traceRelease(trace)
3561 }
3562 return gp, false, false
3563 }
3564 gcController.removeIdleMarkWorker()
3565 }
3566
3567
3568
3569
3570
3571 gp, otherReady := beforeIdle(now, pollUntil)
3572 if gp != nil {
3573 trace := traceAcquire()
3574 casgstatus(gp, _Gwaiting, _Grunnable)
3575 if trace.ok() {
3576 trace.GoUnpark(gp, 0)
3577 traceRelease(trace)
3578 }
3579 return gp, false, false
3580 }
3581 if otherReady {
3582 goto top
3583 }
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593 allpSnapshot := mp.snapshotAllp()
3594
3595
3596 idlepMaskSnapshot := idlepMask
3597 timerpMaskSnapshot := timerpMask
3598
3599
3600 lock(&sched.lock)
3601 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3602 unlock(&sched.lock)
3603 goto top
3604 }
3605 if !sched.runq.empty() {
3606 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3607 unlock(&sched.lock)
3608 if gp == nil {
3609 throw("global runq empty with non-zero runqsize")
3610 }
3611 if runqputbatch(pp, &q); !q.empty() {
3612 throw("Couldn't put Gs into empty local runq")
3613 }
3614 return gp, false, false
3615 }
3616 if !mp.spinning && sched.needspinning.Load() == 1 {
3617
3618 mp.becomeSpinning()
3619 unlock(&sched.lock)
3620 goto top
3621 }
3622 if releasep() != pp {
3623 throw("findRunnable: wrong p")
3624 }
3625 now = pidleput(pp, now)
3626 unlock(&sched.lock)
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664 wasSpinning := mp.spinning
3665 if mp.spinning {
3666 mp.spinning = false
3667 if sched.nmspinning.Add(-1) < 0 {
3668 throw("findRunnable: negative nmspinning")
3669 }
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682 lock(&sched.lock)
3683 if !sched.runq.empty() {
3684 pp, _ := pidlegetSpinning(0)
3685 if pp != nil {
3686 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3687 unlock(&sched.lock)
3688 if gp == nil {
3689 throw("global runq empty with non-zero runqsize")
3690 }
3691 if runqputbatch(pp, &q); !q.empty() {
3692 throw("Couldn't put Gs into empty local runq")
3693 }
3694 acquirep(pp)
3695 mp.becomeSpinning()
3696 return gp, false, false
3697 }
3698 }
3699 unlock(&sched.lock)
3700
3701 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3702 if pp != nil {
3703 acquirep(pp)
3704 mp.becomeSpinning()
3705 goto top
3706 }
3707
3708
3709 pp, gp := checkIdleGCNoP()
3710 if pp != nil {
3711 acquirep(pp)
3712 mp.becomeSpinning()
3713
3714
3715 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3716 trace := traceAcquire()
3717 casgstatus(gp, _Gwaiting, _Grunnable)
3718 if trace.ok() {
3719 trace.GoUnpark(gp, 0)
3720 traceRelease(trace)
3721 }
3722 return gp, false, false
3723 }
3724
3725
3726
3727
3728
3729
3730
3731 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3732 }
3733
3734
3735
3736
3737
3738 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3739 sched.pollUntil.Store(pollUntil)
3740 if mp.p != 0 {
3741 throw("findRunnable: netpoll with p")
3742 }
3743 if mp.spinning {
3744 throw("findRunnable: netpoll with spinning")
3745 }
3746 delay := int64(-1)
3747 if pollUntil != 0 {
3748 if now == 0 {
3749 now = nanotime()
3750 }
3751 delay = pollUntil - now
3752 if delay < 0 {
3753 delay = 0
3754 }
3755 }
3756 if faketime != 0 {
3757
3758 delay = 0
3759 }
3760 list, delta := netpoll(delay)
3761
3762 now = nanotime()
3763 sched.pollUntil.Store(0)
3764 sched.lastpoll.Store(now)
3765 if faketime != 0 && list.empty() {
3766
3767
3768 stopm()
3769 goto top
3770 }
3771 lock(&sched.lock)
3772 pp, _ := pidleget(now)
3773 unlock(&sched.lock)
3774 if pp == nil {
3775 injectglist(&list)
3776 netpollAdjustWaiters(delta)
3777 } else {
3778 acquirep(pp)
3779 if !list.empty() {
3780 gp := list.pop()
3781 injectglist(&list)
3782 netpollAdjustWaiters(delta)
3783 trace := traceAcquire()
3784 casgstatus(gp, _Gwaiting, _Grunnable)
3785 if trace.ok() {
3786 trace.GoUnpark(gp, 0)
3787 traceRelease(trace)
3788 }
3789 return gp, false, false
3790 }
3791 if wasSpinning {
3792 mp.becomeSpinning()
3793 }
3794 goto top
3795 }
3796 } else if pollUntil != 0 && netpollinited() {
3797 pollerPollUntil := sched.pollUntil.Load()
3798 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3799 netpollBreak()
3800 }
3801 }
3802 stopm()
3803 goto top
3804 }
3805
3806
3807
3808
3809
3810 func pollWork() bool {
3811 if !sched.runq.empty() {
3812 return true
3813 }
3814 p := getg().m.p.ptr()
3815 if !runqempty(p) {
3816 return true
3817 }
3818 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3819 if list, delta := netpoll(0); !list.empty() {
3820 injectglist(&list)
3821 netpollAdjustWaiters(delta)
3822 return true
3823 }
3824 }
3825 return false
3826 }
3827
3828
3829
3830
3831
3832
3833
3834 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3835 pp := getg().m.p.ptr()
3836
3837 ranTimer := false
3838
3839 const stealTries = 4
3840 for i := 0; i < stealTries; i++ {
3841 stealTimersOrRunNextG := i == stealTries-1
3842
3843 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3844 if sched.gcwaiting.Load() {
3845
3846 return nil, false, now, pollUntil, true
3847 }
3848 p2 := allp[enum.position()]
3849 if pp == p2 {
3850 continue
3851 }
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3867 tnow, w, ran := p2.timers.check(now, nil)
3868 now = tnow
3869 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3870 pollUntil = w
3871 }
3872 if ran {
3873
3874
3875
3876
3877
3878
3879
3880
3881 if gp, inheritTime := runqget(pp); gp != nil {
3882 return gp, inheritTime, now, pollUntil, ranTimer
3883 }
3884 ranTimer = true
3885 }
3886 }
3887
3888
3889 if !idlepMask.read(enum.position()) {
3890 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3891 return gp, false, now, pollUntil, ranTimer
3892 }
3893 }
3894 }
3895 }
3896
3897
3898
3899
3900 return nil, false, now, pollUntil, ranTimer
3901 }
3902
3903
3904
3905
3906
3907
3908 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3909 for id, p2 := range allpSnapshot {
3910 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3911 lock(&sched.lock)
3912 pp, _ := pidlegetSpinning(0)
3913 if pp == nil {
3914
3915 unlock(&sched.lock)
3916 return nil
3917 }
3918 unlock(&sched.lock)
3919 return pp
3920 }
3921 }
3922
3923
3924 return nil
3925 }
3926
3927
3928
3929
3930 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3931 for id, p2 := range allpSnapshot {
3932 if timerpMaskSnapshot.read(uint32(id)) {
3933 w := p2.timers.wakeTime()
3934 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3935 pollUntil = w
3936 }
3937 }
3938 }
3939
3940 return pollUntil
3941 }
3942
3943
3944
3945
3946
3947 func checkIdleGCNoP() (*p, *g) {
3948
3949
3950
3951
3952
3953
3954 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3955 return nil, nil
3956 }
3957 if !gcShouldScheduleWorker(nil) {
3958 return nil, nil
3959 }
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978 lock(&sched.lock)
3979 pp, now := pidlegetSpinning(0)
3980 if pp == nil {
3981 unlock(&sched.lock)
3982 return nil, nil
3983 }
3984
3985
3986 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3987 pidleput(pp, now)
3988 unlock(&sched.lock)
3989 return nil, nil
3990 }
3991
3992 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3993 if node == nil {
3994 pidleput(pp, now)
3995 unlock(&sched.lock)
3996 gcController.removeIdleMarkWorker()
3997 return nil, nil
3998 }
3999
4000 unlock(&sched.lock)
4001
4002 return pp, node.gp.ptr()
4003 }
4004
4005
4006
4007
4008 func wakeNetPoller(when int64) {
4009 if sched.lastpoll.Load() == 0 {
4010
4011
4012
4013
4014 pollerPollUntil := sched.pollUntil.Load()
4015 if pollerPollUntil == 0 || pollerPollUntil > when {
4016 netpollBreak()
4017 }
4018 } else {
4019
4020
4021 if GOOS != "plan9" {
4022 wakep()
4023 }
4024 }
4025 }
4026
4027 func resetspinning() {
4028 gp := getg()
4029 if !gp.m.spinning {
4030 throw("resetspinning: not a spinning m")
4031 }
4032 gp.m.spinning = false
4033 nmspinning := sched.nmspinning.Add(-1)
4034 if nmspinning < 0 {
4035 throw("findRunnable: negative nmspinning")
4036 }
4037
4038
4039
4040 wakep()
4041 }
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051 func injectglist(glist *gList) {
4052 if glist.empty() {
4053 return
4054 }
4055
4056
4057
4058 var tail *g
4059 trace := traceAcquire()
4060 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4061 tail = gp
4062 casgstatus(gp, _Gwaiting, _Grunnable)
4063 if trace.ok() {
4064 trace.GoUnpark(gp, 0)
4065 }
4066 }
4067 if trace.ok() {
4068 traceRelease(trace)
4069 }
4070
4071
4072 q := gQueue{glist.head, tail.guintptr(), glist.size}
4073 *glist = gList{}
4074
4075 startIdle := func(n int32) {
4076 for ; n > 0; n-- {
4077 mp := acquirem()
4078 lock(&sched.lock)
4079
4080 pp, _ := pidlegetSpinning(0)
4081 if pp == nil {
4082 unlock(&sched.lock)
4083 releasem(mp)
4084 break
4085 }
4086
4087 startm(pp, false, true)
4088 unlock(&sched.lock)
4089 releasem(mp)
4090 }
4091 }
4092
4093 pp := getg().m.p.ptr()
4094 if pp == nil {
4095 n := q.size
4096 lock(&sched.lock)
4097 globrunqputbatch(&q)
4098 unlock(&sched.lock)
4099 startIdle(n)
4100 return
4101 }
4102
4103 var globq gQueue
4104 npidle := sched.npidle.Load()
4105 for ; npidle > 0 && !q.empty(); npidle-- {
4106 g := q.pop()
4107 globq.pushBack(g)
4108 }
4109 if !globq.empty() {
4110 n := globq.size
4111 lock(&sched.lock)
4112 globrunqputbatch(&globq)
4113 unlock(&sched.lock)
4114 startIdle(n)
4115 }
4116
4117 if runqputbatch(pp, &q); !q.empty() {
4118 lock(&sched.lock)
4119 globrunqputbatch(&q)
4120 unlock(&sched.lock)
4121 }
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136 wakep()
4137 }
4138
4139
4140
4141 func schedule() {
4142 mp := getg().m
4143
4144 if mp.locks != 0 {
4145 throw("schedule: holding locks")
4146 }
4147
4148 if mp.lockedg != 0 {
4149 stoplockedm()
4150 execute(mp.lockedg.ptr(), false)
4151 }
4152
4153
4154
4155 if mp.incgo {
4156 throw("schedule: in cgo")
4157 }
4158
4159 top:
4160 pp := mp.p.ptr()
4161 pp.preempt = false
4162
4163
4164
4165
4166 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4167 throw("schedule: spinning with local work")
4168 }
4169
4170 gp, inheritTime, tryWakeP := findRunnable()
4171
4172
4173 pp = mp.p.ptr()
4174
4175
4176
4177
4178 mp.clearAllpSnapshot()
4179
4180
4181
4182
4183
4184
4185
4186
4187 gcController.releaseNextGCMarkWorker(pp)
4188
4189 if debug.dontfreezetheworld > 0 && freezing.Load() {
4190
4191
4192
4193
4194
4195
4196
4197 lock(&deadlock)
4198 lock(&deadlock)
4199 }
4200
4201
4202
4203
4204 if mp.spinning {
4205 resetspinning()
4206 }
4207
4208 if sched.disable.user && !schedEnabled(gp) {
4209
4210
4211
4212 lock(&sched.lock)
4213 if schedEnabled(gp) {
4214
4215
4216 unlock(&sched.lock)
4217 } else {
4218 sched.disable.runnable.pushBack(gp)
4219 unlock(&sched.lock)
4220 goto top
4221 }
4222 }
4223
4224
4225
4226 if tryWakeP {
4227 wakep()
4228 }
4229 if gp.lockedm != 0 {
4230
4231
4232 startlockedm(gp)
4233 goto top
4234 }
4235
4236 execute(gp, inheritTime)
4237 }
4238
4239
4240
4241
4242
4243
4244
4245
4246 func dropg() {
4247 gp := getg()
4248
4249 setMNoWB(&gp.m.curg.m, nil)
4250 setGNoWB(&gp.m.curg, nil)
4251 }
4252
4253 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4254 unlock((*mutex)(lock))
4255 return true
4256 }
4257
4258
4259 func park_m(gp *g) {
4260 mp := getg().m
4261
4262 trace := traceAcquire()
4263
4264
4265
4266
4267
4268 bubble := gp.bubble
4269 if bubble != nil {
4270 bubble.incActive()
4271 }
4272
4273 if trace.ok() {
4274
4275
4276
4277 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4278 }
4279
4280
4281 casgstatus(gp, _Grunning, _Gwaiting)
4282 if trace.ok() {
4283 traceRelease(trace)
4284 }
4285
4286 dropg()
4287
4288 if fn := mp.waitunlockf; fn != nil {
4289 ok := fn(gp, mp.waitlock)
4290 mp.waitunlockf = nil
4291 mp.waitlock = nil
4292 if !ok {
4293 trace := traceAcquire()
4294 casgstatus(gp, _Gwaiting, _Grunnable)
4295 if bubble != nil {
4296 bubble.decActive()
4297 }
4298 if trace.ok() {
4299 trace.GoUnpark(gp, 2)
4300 traceRelease(trace)
4301 }
4302 execute(gp, true)
4303 }
4304 }
4305
4306 if bubble != nil {
4307 bubble.decActive()
4308 }
4309
4310 schedule()
4311 }
4312
4313 func goschedImpl(gp *g, preempted bool) {
4314 pp := gp.m.p.ptr()
4315 trace := traceAcquire()
4316 status := readgstatus(gp)
4317 if status&^_Gscan != _Grunning {
4318 dumpgstatus(gp)
4319 throw("bad g status")
4320 }
4321 if trace.ok() {
4322
4323
4324
4325 if preempted {
4326 trace.GoPreempt()
4327 } else {
4328 trace.GoSched()
4329 }
4330 }
4331 casgstatus(gp, _Grunning, _Grunnable)
4332 if trace.ok() {
4333 traceRelease(trace)
4334 }
4335
4336 dropg()
4337 if preempted && sched.gcwaiting.Load() {
4338
4339
4340 runqput(pp, gp, true)
4341 } else {
4342 lock(&sched.lock)
4343 globrunqput(gp)
4344 unlock(&sched.lock)
4345 }
4346
4347 if mainStarted {
4348 wakep()
4349 }
4350
4351 schedule()
4352 }
4353
4354
4355 func gosched_m(gp *g) {
4356 goschedImpl(gp, false)
4357 }
4358
4359
4360 func goschedguarded_m(gp *g) {
4361 if !canPreemptM(gp.m) {
4362 gogo(&gp.sched)
4363 }
4364 goschedImpl(gp, false)
4365 }
4366
4367 func gopreempt_m(gp *g) {
4368 goschedImpl(gp, true)
4369 }
4370
4371
4372
4373
4374 func preemptPark(gp *g) {
4375 status := readgstatus(gp)
4376 if status&^_Gscan != _Grunning {
4377 dumpgstatus(gp)
4378 throw("bad g status")
4379 }
4380
4381 if gp.asyncSafePoint {
4382
4383
4384
4385 f := findfunc(gp.sched.pc)
4386 if !f.valid() {
4387 throw("preempt at unknown pc")
4388 }
4389 if f.flag&abi.FuncFlagSPWrite != 0 {
4390 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4391 throw("preempt SPWRITE")
4392 }
4393 }
4394
4395
4396
4397
4398
4399
4400
4401 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423 trace := traceAcquire()
4424 if trace.ok() {
4425 trace.GoPark(traceBlockPreempted, 0)
4426 }
4427
4428
4429
4430
4431 dropg()
4432
4433
4434 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4435 if trace.ok() {
4436 traceRelease(trace)
4437 }
4438
4439
4440 schedule()
4441 }
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457 func goyield() {
4458 checkTimeouts()
4459 mcall(goyield_m)
4460 }
4461
4462 func goyield_m(gp *g) {
4463 trace := traceAcquire()
4464 pp := gp.m.p.ptr()
4465 if trace.ok() {
4466
4467
4468
4469 trace.GoPreempt()
4470 }
4471 casgstatus(gp, _Grunning, _Grunnable)
4472 if trace.ok() {
4473 traceRelease(trace)
4474 }
4475 dropg()
4476 runqput(pp, gp, false)
4477 schedule()
4478 }
4479
4480
4481 func goexit1() {
4482 if raceenabled {
4483 if gp := getg(); gp.bubble != nil {
4484 racereleasemergeg(gp, gp.bubble.raceaddr())
4485 }
4486 racegoend()
4487 }
4488 trace := traceAcquire()
4489 if trace.ok() {
4490 trace.GoEnd()
4491 traceRelease(trace)
4492 }
4493 mcall(goexit0)
4494 }
4495
4496
4497 func goexit0(gp *g) {
4498 if goexperiment.RuntimeSecret && gp.secret > 0 {
4499
4500
4501 memclrNoHeapPointers(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4502
4503
4504 }
4505 gdestroy(gp)
4506 schedule()
4507 }
4508
4509 func gdestroy(gp *g) {
4510 mp := getg().m
4511 pp := mp.p.ptr()
4512
4513 casgstatus(gp, _Grunning, _Gdead)
4514 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4515 if isSystemGoroutine(gp, false) {
4516 sched.ngsys.Add(-1)
4517 }
4518 gp.m = nil
4519 locked := gp.lockedm != 0
4520 gp.lockedm = 0
4521 mp.lockedg = 0
4522 gp.preemptStop = false
4523 gp.paniconfault = false
4524 gp._defer = nil
4525 gp._panic = nil
4526 gp.writebuf = nil
4527 gp.waitreason = waitReasonZero
4528 gp.param = nil
4529 gp.labels = nil
4530 gp.timer = nil
4531 gp.bubble = nil
4532 gp.fipsOnlyBypass = false
4533 gp.secret = 0
4534
4535 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4536
4537
4538
4539 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4540 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4541 gcController.bgScanCredit.Add(scanCredit)
4542 gp.gcAssistBytes = 0
4543 }
4544
4545 dropg()
4546
4547 if GOARCH == "wasm" {
4548 gfput(pp, gp)
4549 return
4550 }
4551
4552 if locked && mp.lockedInt != 0 {
4553 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4554 if mp.isextra {
4555 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4556 }
4557 throw("exited a goroutine internally locked to the OS thread")
4558 }
4559 gfput(pp, gp)
4560 if locked {
4561
4562
4563
4564
4565
4566
4567 if GOOS != "plan9" {
4568 gogo(&mp.g0.sched)
4569 } else {
4570
4571
4572 mp.lockedExt = 0
4573 }
4574 }
4575 }
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585 func save(pc, sp, bp uintptr) {
4586 gp := getg()
4587
4588 if gp == gp.m.g0 || gp == gp.m.gsignal {
4589
4590
4591
4592
4593
4594 throw("save on system g not allowed")
4595 }
4596
4597 gp.sched.pc = pc
4598 gp.sched.sp = sp
4599 gp.sched.lr = 0
4600 gp.sched.bp = bp
4601
4602
4603
4604 if gp.sched.ctxt != nil {
4605 badctxt()
4606 }
4607 }
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633 func reentersyscall(pc, sp, bp uintptr) {
4634 gp := getg()
4635
4636
4637
4638 gp.m.locks++
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660 if goexperiment.RuntimeSecret {
4661 eraseSecretsSignalStk()
4662 }
4663
4664
4665
4666
4667
4668 gp.stackguard0 = stackPreempt
4669 gp.throwsplit = true
4670
4671
4672 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4673
4674 pp := gp.m.p.ptr()
4675 if pp.runSafePointFn != 0 {
4676
4677 systemstack(runSafePointFn)
4678 }
4679 gp.m.oldp.set(pp)
4680
4681
4682 save(pc, sp, bp)
4683 gp.syscallsp = sp
4684 gp.syscallpc = pc
4685 gp.syscallbp = bp
4686
4687
4688 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4689 systemstack(func() {
4690 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4691 throw("entersyscall")
4692 })
4693 }
4694 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4695 systemstack(func() {
4696 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4697 throw("entersyscall")
4698 })
4699 }
4700 trace := traceAcquire()
4701 if trace.ok() {
4702
4703
4704
4705
4706 systemstack(func() {
4707 trace.GoSysCall()
4708 })
4709
4710 save(pc, sp, bp)
4711 }
4712 if sched.gcwaiting.Load() {
4713
4714
4715
4716 systemstack(func() {
4717 entersyscallHandleGCWait(trace)
4718 })
4719
4720 save(pc, sp, bp)
4721 }
4722
4723
4724
4725
4726
4727 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
4728 casgstatus(gp, _Grunning, _Gsyscall)
4729 }
4730 if staticLockRanking {
4731
4732 save(pc, sp, bp)
4733 }
4734 if trace.ok() {
4735
4736
4737
4738 traceRelease(trace)
4739 }
4740 if sched.sysmonwait.Load() {
4741 systemstack(entersyscallWakeSysmon)
4742
4743 save(pc, sp, bp)
4744 }
4745 gp.m.locks--
4746 }
4747
4748
4749
4750
4751 const debugExtendGrunningNoP = false
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767 func entersyscall() {
4768
4769
4770
4771
4772 fp := getcallerfp()
4773 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4774 }
4775
4776 func entersyscallWakeSysmon() {
4777 lock(&sched.lock)
4778 if sched.sysmonwait.Load() {
4779 sched.sysmonwait.Store(false)
4780 notewakeup(&sched.sysmonnote)
4781 }
4782 unlock(&sched.lock)
4783 }
4784
4785 func entersyscallHandleGCWait(trace traceLocker) {
4786 gp := getg()
4787
4788 lock(&sched.lock)
4789 if sched.stopwait > 0 {
4790
4791 pp := gp.m.p.ptr()
4792 pp.m = 0
4793 gp.m.p = 0
4794 atomic.Store(&pp.status, _Pgcstop)
4795
4796 if trace.ok() {
4797 trace.ProcStop(pp)
4798 }
4799 addGSyscallNoP(gp.m)
4800 pp.gcStopTime = nanotime()
4801 pp.syscalltick++
4802 if sched.stopwait--; sched.stopwait == 0 {
4803 notewakeup(&sched.stopnote)
4804 }
4805 }
4806 unlock(&sched.lock)
4807 }
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821 func entersyscallblock() {
4822 gp := getg()
4823
4824 gp.m.locks++
4825 gp.throwsplit = true
4826 gp.stackguard0 = stackPreempt
4827 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4828 gp.m.p.ptr().syscalltick++
4829
4830 addGSyscallNoP(gp.m)
4831
4832
4833 pc := sys.GetCallerPC()
4834 sp := sys.GetCallerSP()
4835 bp := getcallerfp()
4836 save(pc, sp, bp)
4837 gp.syscallsp = gp.sched.sp
4838 gp.syscallpc = gp.sched.pc
4839 gp.syscallbp = gp.sched.bp
4840 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4841 sp1 := sp
4842 sp2 := gp.sched.sp
4843 sp3 := gp.syscallsp
4844 systemstack(func() {
4845 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4846 throw("entersyscallblock")
4847 })
4848 }
4849
4850
4851
4852
4853
4854
4855 trace := traceAcquire()
4856 systemstack(func() {
4857 if trace.ok() {
4858 trace.GoSysCall()
4859 }
4860 handoffp(releasep())
4861 })
4862
4863
4864
4865 if debugExtendGrunningNoP {
4866 usleep(10)
4867 }
4868 casgstatus(gp, _Grunning, _Gsyscall)
4869 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4870 systemstack(func() {
4871 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4872 throw("entersyscallblock")
4873 })
4874 }
4875 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4876 systemstack(func() {
4877 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4878 throw("entersyscallblock")
4879 })
4880 }
4881 if trace.ok() {
4882 systemstack(func() {
4883 traceRelease(trace)
4884 })
4885 }
4886
4887
4888 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4889
4890 gp.m.locks--
4891 }
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913 func exitsyscall() {
4914 gp := getg()
4915
4916 gp.m.locks++
4917 if sys.GetCallerSP() > gp.syscallsp {
4918 throw("exitsyscall: syscall frame is no longer valid")
4919 }
4920 gp.waitsince = 0
4921
4922 if sched.stopwait == freezeStopWait {
4923
4924
4925
4926 systemstack(func() {
4927 lock(&deadlock)
4928 lock(&deadlock)
4929 })
4930 }
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
4944 casgstatus(gp, _Gsyscall, _Grunning)
4945 }
4946
4947
4948
4949
4950 if debugExtendGrunningNoP {
4951 usleep(10)
4952 }
4953
4954
4955 oldp := gp.m.oldp.ptr()
4956 gp.m.oldp.set(nil)
4957
4958
4959 pp := gp.m.p.ptr()
4960 if pp != nil {
4961
4962 if trace := traceAcquire(); trace.ok() {
4963 systemstack(func() {
4964
4965
4966
4967
4968
4969
4970
4971
4972 if pp.syscalltick == gp.m.syscalltick {
4973 trace.GoSysExit(false)
4974 } else {
4975
4976
4977
4978
4979 trace.ProcSteal(pp)
4980 trace.ProcStart()
4981 trace.GoSysExit(true)
4982 trace.GoStart()
4983 }
4984 traceRelease(trace)
4985 })
4986 }
4987 } else {
4988
4989 systemstack(func() {
4990
4991 if pp := exitsyscallTryGetP(oldp); pp != nil {
4992
4993 acquirepNoTrace(pp)
4994
4995
4996 if trace := traceAcquire(); trace.ok() {
4997 trace.ProcStart()
4998 trace.GoSysExit(true)
4999 trace.GoStart()
5000 traceRelease(trace)
5001 }
5002 }
5003 })
5004 pp = gp.m.p.ptr()
5005 }
5006
5007
5008 if pp != nil {
5009 if goroutineProfile.active {
5010
5011
5012
5013 systemstack(func() {
5014 tryRecordGoroutineProfileWB(gp)
5015 })
5016 }
5017
5018
5019 pp.syscalltick++
5020
5021
5022
5023 gp.syscallsp = 0
5024 gp.m.locks--
5025 if gp.preempt {
5026
5027 gp.stackguard0 = stackPreempt
5028 } else {
5029
5030 gp.stackguard0 = gp.stack.lo + stackGuard
5031 }
5032 gp.throwsplit = false
5033
5034 if sched.disable.user && !schedEnabled(gp) {
5035
5036 Gosched()
5037 }
5038 return
5039 }
5040
5041 gp.m.locks--
5042
5043
5044 mcall(exitsyscallNoP)
5045
5046
5047
5048
5049
5050
5051
5052 gp.syscallsp = 0
5053 gp.m.p.ptr().syscalltick++
5054 gp.throwsplit = false
5055 }
5056
5057
5058
5059
5060
5061
5062
5063 func exitsyscallTryGetP(oldp *p) *p {
5064
5065 if oldp != nil {
5066 if thread, ok := setBlockOnExitSyscall(oldp); ok {
5067 thread.takeP()
5068 decGSyscallNoP(getg().m)
5069 thread.resume()
5070 return oldp
5071 }
5072 }
5073
5074
5075 if sched.pidle != 0 {
5076 lock(&sched.lock)
5077 pp, _ := pidleget(0)
5078 if pp != nil && sched.sysmonwait.Load() {
5079 sched.sysmonwait.Store(false)
5080 notewakeup(&sched.sysmonnote)
5081 }
5082 unlock(&sched.lock)
5083 if pp != nil {
5084 decGSyscallNoP(getg().m)
5085 return pp
5086 }
5087 }
5088 return nil
5089 }
5090
5091
5092
5093
5094
5095
5096
5097 func exitsyscallNoP(gp *g) {
5098 traceExitingSyscall()
5099 trace := traceAcquire()
5100 casgstatus(gp, _Grunning, _Grunnable)
5101 traceExitedSyscall()
5102 if trace.ok() {
5103
5104
5105
5106
5107 trace.GoSysExit(true)
5108 traceRelease(trace)
5109 }
5110 decGSyscallNoP(getg().m)
5111 dropg()
5112 lock(&sched.lock)
5113 var pp *p
5114 if schedEnabled(gp) {
5115 pp, _ = pidleget(0)
5116 }
5117 var locked bool
5118 if pp == nil {
5119 globrunqput(gp)
5120
5121
5122
5123
5124
5125
5126 locked = gp.lockedm != 0
5127 } else if sched.sysmonwait.Load() {
5128 sched.sysmonwait.Store(false)
5129 notewakeup(&sched.sysmonnote)
5130 }
5131 unlock(&sched.lock)
5132 if pp != nil {
5133 acquirep(pp)
5134 execute(gp, false)
5135 }
5136 if locked {
5137
5138
5139
5140
5141 stoplockedm()
5142 execute(gp, false)
5143 }
5144 stopm()
5145 schedule()
5146 }
5147
5148
5149
5150
5151
5152
5153
5154 func addGSyscallNoP(mp *m) {
5155
5156
5157
5158 if !mp.isExtraInC {
5159
5160
5161
5162
5163
5164 sched.nGsyscallNoP.Add(1)
5165 }
5166 }
5167
5168
5169
5170
5171
5172
5173
5174 func decGSyscallNoP(mp *m) {
5175
5176
5177
5178 if !mp.isExtraInC {
5179 sched.nGsyscallNoP.Add(-1)
5180 }
5181 }
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195 func syscall_runtime_BeforeFork() {
5196 gp := getg().m.curg
5197
5198
5199
5200
5201 gp.m.locks++
5202 sigsave(&gp.m.sigmask)
5203 sigblock(false)
5204
5205
5206
5207
5208
5209 gp.stackguard0 = stackFork
5210 }
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224 func syscall_runtime_AfterFork() {
5225 gp := getg().m.curg
5226
5227
5228 gp.stackguard0 = gp.stack.lo + stackGuard
5229
5230 msigrestore(gp.m.sigmask)
5231
5232 gp.m.locks--
5233 }
5234
5235
5236
5237 var inForkedChild bool
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258 func syscall_runtime_AfterForkInChild() {
5259
5260
5261
5262
5263 inForkedChild = true
5264
5265 clearSignalHandlers()
5266
5267
5268
5269 msigrestore(getg().m.sigmask)
5270
5271 inForkedChild = false
5272 }
5273
5274
5275
5276
5277 var pendingPreemptSignals atomic.Int32
5278
5279
5280
5281
5282 func syscall_runtime_BeforeExec() {
5283
5284 execLock.lock()
5285
5286
5287
5288 if GOOS == "darwin" || GOOS == "ios" {
5289 for pendingPreemptSignals.Load() > 0 {
5290 osyield()
5291 }
5292 }
5293 }
5294
5295
5296
5297
5298 func syscall_runtime_AfterExec() {
5299 execLock.unlock()
5300 }
5301
5302
5303 func malg(stacksize int32) *g {
5304 newg := new(g)
5305 if stacksize >= 0 {
5306 stacksize = round2(stackSystem + stacksize)
5307 systemstack(func() {
5308 newg.stack = stackalloc(uint32(stacksize))
5309 if valgrindenabled {
5310 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5311 }
5312 })
5313 newg.stackguard0 = newg.stack.lo + stackGuard
5314 newg.stackguard1 = ^uintptr(0)
5315
5316
5317 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5318 }
5319 return newg
5320 }
5321
5322
5323
5324
5325 func newproc(fn *funcval) {
5326 gp := getg()
5327 pc := sys.GetCallerPC()
5328 systemstack(func() {
5329 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5330
5331 pp := getg().m.p.ptr()
5332 runqput(pp, newg, true)
5333
5334 if mainStarted {
5335 wakep()
5336 }
5337 })
5338 }
5339
5340
5341
5342
5343 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5344 if fn == nil {
5345 fatal("go of nil func value")
5346 }
5347
5348 mp := acquirem()
5349 pp := mp.p.ptr()
5350 newg := gfget(pp)
5351 if newg == nil {
5352 newg = malg(stackMin)
5353 casgstatus(newg, _Gidle, _Gdead)
5354 allgadd(newg)
5355 }
5356 if newg.stack.hi == 0 {
5357 throw("newproc1: newg missing stack")
5358 }
5359
5360 if readgstatus(newg) != _Gdead {
5361 throw("newproc1: new g is not Gdead")
5362 }
5363
5364 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5365 totalSize = alignUp(totalSize, sys.StackAlign)
5366 sp := newg.stack.hi - totalSize
5367 if usesLR {
5368
5369 *(*uintptr)(unsafe.Pointer(sp)) = 0
5370 prepGoExitFrame(sp)
5371 }
5372 if GOARCH == "arm64" {
5373
5374 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5375 }
5376
5377 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5378 newg.sched.sp = sp
5379 newg.stktopsp = sp
5380 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5381 newg.sched.g = guintptr(unsafe.Pointer(newg))
5382 gostartcallfn(&newg.sched, fn)
5383 newg.parentGoid = callergp.goid
5384 newg.gopc = callerpc
5385 newg.ancestors = saveAncestors(callergp)
5386 newg.startpc = fn.fn
5387 newg.runningCleanups.Store(false)
5388 if isSystemGoroutine(newg, false) {
5389 sched.ngsys.Add(1)
5390 } else {
5391
5392 newg.bubble = callergp.bubble
5393 if mp.curg != nil {
5394 newg.labels = mp.curg.labels
5395 }
5396 if goroutineProfile.active {
5397
5398
5399
5400
5401
5402 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5403 }
5404 }
5405
5406 newg.trackingSeq = uint8(cheaprand())
5407 if newg.trackingSeq%gTrackingPeriod == 0 {
5408 newg.tracking = true
5409 }
5410 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5411
5412
5413
5414 trace := traceAcquire()
5415 var status uint32 = _Grunnable
5416 if parked {
5417 status = _Gwaiting
5418 newg.waitreason = waitreason
5419 }
5420 if pp.goidcache == pp.goidcacheend {
5421
5422
5423
5424 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5425 pp.goidcache -= _GoidCacheBatch - 1
5426 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5427 }
5428 newg.goid = pp.goidcache
5429 casgstatus(newg, _Gdead, status)
5430 pp.goidcache++
5431 newg.trace.reset()
5432 if trace.ok() {
5433 trace.GoCreate(newg, newg.startpc, parked)
5434 traceRelease(trace)
5435 }
5436
5437
5438 newg.fipsOnlyBypass = callergp.fipsOnlyBypass
5439
5440
5441 newg.ditWanted = callergp.ditWanted
5442
5443
5444 if raceenabled {
5445 newg.racectx = racegostart(callerpc)
5446 newg.raceignore = 0
5447 if newg.labels != nil {
5448
5449
5450 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5451 }
5452 }
5453 pp.goroutinesCreated++
5454 releasem(mp)
5455
5456 return newg
5457 }
5458
5459
5460
5461
5462 func saveAncestors(callergp *g) *[]ancestorInfo {
5463
5464 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5465 return nil
5466 }
5467 var callerAncestors []ancestorInfo
5468 if callergp.ancestors != nil {
5469 callerAncestors = *callergp.ancestors
5470 }
5471 n := int32(len(callerAncestors)) + 1
5472 if n > debug.tracebackancestors {
5473 n = debug.tracebackancestors
5474 }
5475 ancestors := make([]ancestorInfo, n)
5476 copy(ancestors[1:], callerAncestors)
5477
5478 var pcs [tracebackInnerFrames]uintptr
5479 npcs := gcallers(callergp, 0, pcs[:])
5480 ipcs := make([]uintptr, npcs)
5481 copy(ipcs, pcs[:])
5482 ancestors[0] = ancestorInfo{
5483 pcs: ipcs,
5484 goid: callergp.goid,
5485 gopc: callergp.gopc,
5486 }
5487
5488 ancestorsp := new([]ancestorInfo)
5489 *ancestorsp = ancestors
5490 return ancestorsp
5491 }
5492
5493
5494
5495 func gfput(pp *p, gp *g) {
5496 if readgstatus(gp) != _Gdead {
5497 throw("gfput: bad status (not Gdead)")
5498 }
5499
5500 stksize := gp.stack.hi - gp.stack.lo
5501
5502 if stksize != uintptr(startingStackSize) {
5503
5504 stackfree(gp.stack)
5505 gp.stack.lo = 0
5506 gp.stack.hi = 0
5507 gp.stackguard0 = 0
5508 if valgrindenabled {
5509 valgrindDeregisterStack(gp.valgrindStackID)
5510 gp.valgrindStackID = 0
5511 }
5512 }
5513
5514 pp.gFree.push(gp)
5515 if pp.gFree.size >= 64 {
5516 var (
5517 stackQ gQueue
5518 noStackQ gQueue
5519 )
5520 for pp.gFree.size >= 32 {
5521 gp := pp.gFree.pop()
5522 if gp.stack.lo == 0 {
5523 noStackQ.push(gp)
5524 } else {
5525 stackQ.push(gp)
5526 }
5527 }
5528 lock(&sched.gFree.lock)
5529 sched.gFree.noStack.pushAll(noStackQ)
5530 sched.gFree.stack.pushAll(stackQ)
5531 unlock(&sched.gFree.lock)
5532 }
5533 }
5534
5535
5536
5537 func gfget(pp *p) *g {
5538 retry:
5539 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5540 lock(&sched.gFree.lock)
5541
5542 for pp.gFree.size < 32 {
5543
5544 gp := sched.gFree.stack.pop()
5545 if gp == nil {
5546 gp = sched.gFree.noStack.pop()
5547 if gp == nil {
5548 break
5549 }
5550 }
5551 pp.gFree.push(gp)
5552 }
5553 unlock(&sched.gFree.lock)
5554 goto retry
5555 }
5556 gp := pp.gFree.pop()
5557 if gp == nil {
5558 return nil
5559 }
5560 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5561
5562
5563
5564 systemstack(func() {
5565 stackfree(gp.stack)
5566 gp.stack.lo = 0
5567 gp.stack.hi = 0
5568 gp.stackguard0 = 0
5569 if valgrindenabled {
5570 valgrindDeregisterStack(gp.valgrindStackID)
5571 gp.valgrindStackID = 0
5572 }
5573 })
5574 }
5575 if gp.stack.lo == 0 {
5576
5577 systemstack(func() {
5578 gp.stack = stackalloc(startingStackSize)
5579 if valgrindenabled {
5580 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5581 }
5582 })
5583 gp.stackguard0 = gp.stack.lo + stackGuard
5584 } else {
5585 if raceenabled {
5586 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5587 }
5588 if msanenabled {
5589 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5590 }
5591 if asanenabled {
5592 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5593 }
5594 }
5595 return gp
5596 }
5597
5598
5599 func gfpurge(pp *p) {
5600 var (
5601 stackQ gQueue
5602 noStackQ gQueue
5603 )
5604 for !pp.gFree.empty() {
5605 gp := pp.gFree.pop()
5606 if gp.stack.lo == 0 {
5607 noStackQ.push(gp)
5608 } else {
5609 stackQ.push(gp)
5610 }
5611 }
5612 lock(&sched.gFree.lock)
5613 sched.gFree.noStack.pushAll(noStackQ)
5614 sched.gFree.stack.pushAll(stackQ)
5615 unlock(&sched.gFree.lock)
5616 }
5617
5618
5619 func Breakpoint() {
5620 breakpoint()
5621 }
5622
5623
5624
5625
5626
5627
5628 func dolockOSThread() {
5629 if GOARCH == "wasm" {
5630 return
5631 }
5632 gp := getg()
5633 gp.m.lockedg.set(gp)
5634 gp.lockedm.set(gp.m)
5635 }
5636
5637
5638
5639
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650
5651
5652
5653 func LockOSThread() {
5654 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5655
5656
5657
5658 startTemplateThread()
5659 }
5660 gp := getg()
5661 gp.m.lockedExt++
5662 if gp.m.lockedExt == 0 {
5663 gp.m.lockedExt--
5664 panic("LockOSThread nesting overflow")
5665 }
5666 dolockOSThread()
5667 }
5668
5669
5670 func lockOSThread() {
5671 getg().m.lockedInt++
5672 dolockOSThread()
5673 }
5674
5675
5676
5677
5678
5679
5680 func dounlockOSThread() {
5681 if GOARCH == "wasm" {
5682 return
5683 }
5684 gp := getg()
5685 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5686 return
5687 }
5688 gp.m.lockedg = 0
5689 gp.lockedm = 0
5690 }
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706 func UnlockOSThread() {
5707 gp := getg()
5708 if gp.m.lockedExt == 0 {
5709 return
5710 }
5711 gp.m.lockedExt--
5712 dounlockOSThread()
5713 }
5714
5715
5716 func unlockOSThread() {
5717 gp := getg()
5718 if gp.m.lockedInt == 0 {
5719 systemstack(badunlockosthread)
5720 }
5721 gp.m.lockedInt--
5722 dounlockOSThread()
5723 }
5724
5725 func badunlockosthread() {
5726 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5727 }
5728
5729 func gcount(includeSys bool) int32 {
5730 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5731 if !includeSys {
5732 n -= sched.ngsys.Load()
5733 }
5734 for _, pp := range allp {
5735 n -= pp.gFree.size
5736 }
5737
5738
5739
5740 if n < 1 {
5741 n = 1
5742 }
5743 return n
5744 }
5745
5746
5747
5748
5749
5750 func goroutineleakcount() int {
5751 return work.goroutineLeak.count
5752 }
5753
5754 func mcount() int32 {
5755 return int32(sched.mnext - sched.nmfreed)
5756 }
5757
5758 var prof struct {
5759 signalLock atomic.Uint32
5760
5761
5762
5763 hz atomic.Int32
5764 }
5765
5766 func _System() { _System() }
5767 func _ExternalCode() { _ExternalCode() }
5768 func _LostExternalCode() { _LostExternalCode() }
5769 func _GC() { _GC() }
5770 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5771 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5772 func _VDSO() { _VDSO() }
5773
5774
5775
5776
5777
5778 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5779 if prof.hz.Load() == 0 {
5780 return
5781 }
5782
5783
5784
5785
5786 if mp != nil && mp.profilehz == 0 {
5787 return
5788 }
5789
5790
5791
5792
5793
5794
5795
5796 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5797 if f := findfunc(pc); f.valid() {
5798 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5799 cpuprof.lostAtomic++
5800 return
5801 }
5802 }
5803 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5804
5805
5806
5807 cpuprof.lostAtomic++
5808 return
5809 }
5810 }
5811
5812
5813
5814
5815
5816
5817
5818 getg().m.mallocing++
5819
5820 var u unwinder
5821 var stk [maxCPUProfStack]uintptr
5822 n := 0
5823 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5824 cgoOff := 0
5825
5826
5827
5828
5829
5830 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5831 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5832 cgoOff++
5833 }
5834 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5835 mp.cgoCallers[0] = 0
5836 }
5837
5838
5839 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5840 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5841
5842
5843 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5844 } else if mp != nil && mp.vdsoSP != 0 {
5845
5846
5847 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5848 } else {
5849 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5850 }
5851 n += tracebackPCs(&u, 0, stk[n:])
5852
5853 if n <= 0 {
5854
5855
5856 n = 2
5857 if inVDSOPage(pc) {
5858 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5859 } else if pc > firstmoduledata.etext {
5860
5861 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5862 }
5863 stk[0] = pc
5864 if mp.preemptoff != "" {
5865 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5866 } else {
5867 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5868 }
5869 }
5870
5871 if prof.hz.Load() != 0 {
5872
5873
5874
5875 var tagPtr *unsafe.Pointer
5876 if gp != nil && gp.m != nil && gp.m.curg != nil {
5877 tagPtr = &gp.m.curg.labels
5878 }
5879 cpuprof.add(tagPtr, stk[:n])
5880
5881 gprof := gp
5882 var mp *m
5883 var pp *p
5884 if gp != nil && gp.m != nil {
5885 if gp.m.curg != nil {
5886 gprof = gp.m.curg
5887 }
5888 mp = gp.m
5889 pp = gp.m.p.ptr()
5890 }
5891 traceCPUSample(gprof, mp, pp, stk[:n])
5892 }
5893 getg().m.mallocing--
5894 }
5895
5896
5897
5898 func setcpuprofilerate(hz int32) {
5899
5900 if hz < 0 {
5901 hz = 0
5902 }
5903
5904
5905
5906 gp := getg()
5907 gp.m.locks++
5908
5909
5910
5911
5912 setThreadCPUProfiler(0)
5913
5914 for !prof.signalLock.CompareAndSwap(0, 1) {
5915 osyield()
5916 }
5917 if prof.hz.Load() != hz {
5918 setProcessCPUProfiler(hz)
5919 prof.hz.Store(hz)
5920 }
5921 prof.signalLock.Store(0)
5922
5923 lock(&sched.lock)
5924 sched.profilehz = hz
5925 unlock(&sched.lock)
5926
5927 if hz != 0 {
5928 setThreadCPUProfiler(hz)
5929 }
5930
5931 gp.m.locks--
5932 }
5933
5934
5935
5936 func (pp *p) init(id int32) {
5937 pp.id = id
5938 pp.gcw.id = id
5939 pp.status = _Pgcstop
5940 pp.sudogcache = pp.sudogbuf[:0]
5941 pp.deferpool = pp.deferpoolbuf[:0]
5942 pp.wbBuf.reset()
5943 if pp.mcache == nil {
5944 if id == 0 {
5945 if mcache0 == nil {
5946 throw("missing mcache?")
5947 }
5948
5949
5950 pp.mcache = mcache0
5951 } else {
5952 pp.mcache = allocmcache()
5953 }
5954 }
5955 if raceenabled && pp.raceprocctx == 0 {
5956 if id == 0 {
5957 pp.raceprocctx = raceprocctx0
5958 raceprocctx0 = 0
5959 } else {
5960 pp.raceprocctx = raceproccreate()
5961 }
5962 }
5963 lockInit(&pp.timers.mu, lockRankTimers)
5964
5965
5966
5967 timerpMask.set(id)
5968
5969
5970 idlepMask.clear(id)
5971 }
5972
5973
5974
5975
5976
5977 func (pp *p) destroy() {
5978 assertLockHeld(&sched.lock)
5979 assertWorldStopped()
5980
5981
5982 for pp.runqhead != pp.runqtail {
5983
5984 pp.runqtail--
5985 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5986
5987 globrunqputhead(gp)
5988 }
5989 if pp.runnext != 0 {
5990 globrunqputhead(pp.runnext.ptr())
5991 pp.runnext = 0
5992 }
5993
5994
5995 getg().m.p.ptr().timers.take(&pp.timers)
5996
5997
5998
5999 if phase := gcphase; phase != _GCoff {
6000 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
6001 throw("P destroyed while GC is running")
6002 }
6003
6004 pp.gcw.spanq.destroy()
6005
6006 clear(pp.sudogbuf[:])
6007 pp.sudogcache = pp.sudogbuf[:0]
6008 pp.pinnerCache = nil
6009 clear(pp.deferpoolbuf[:])
6010 pp.deferpool = pp.deferpoolbuf[:0]
6011 systemstack(func() {
6012 for i := 0; i < pp.mspancache.len; i++ {
6013
6014 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
6015 }
6016 pp.mspancache.len = 0
6017 lock(&mheap_.lock)
6018 pp.pcache.flush(&mheap_.pages)
6019 unlock(&mheap_.lock)
6020 })
6021 freemcache(pp.mcache)
6022 pp.mcache = nil
6023 gfpurge(pp)
6024 if raceenabled {
6025 if pp.timers.raceCtx != 0 {
6026
6027
6028
6029
6030
6031 mp := getg().m
6032 phold := mp.p.ptr()
6033 mp.p.set(pp)
6034
6035 racectxend(pp.timers.raceCtx)
6036 pp.timers.raceCtx = 0
6037
6038 mp.p.set(phold)
6039 }
6040 raceprocdestroy(pp.raceprocctx)
6041 pp.raceprocctx = 0
6042 }
6043 pp.gcAssistTime = 0
6044 gcCleanups.queued += pp.cleanupsQueued
6045 pp.cleanupsQueued = 0
6046 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
6047 pp.goroutinesCreated = 0
6048 pp.xRegs.free()
6049 pp.status = _Pdead
6050 }
6051
6052
6053
6054
6055
6056
6057
6058
6059
6060 func procresize(nprocs int32) *p {
6061 assertLockHeld(&sched.lock)
6062 assertWorldStopped()
6063
6064 old := gomaxprocs
6065 if old < 0 || nprocs <= 0 {
6066 throw("procresize: invalid arg")
6067 }
6068 trace := traceAcquire()
6069 if trace.ok() {
6070 trace.Gomaxprocs(nprocs)
6071 traceRelease(trace)
6072 }
6073
6074
6075 now := nanotime()
6076 if sched.procresizetime != 0 {
6077 sched.totaltime += int64(old) * (now - sched.procresizetime)
6078 }
6079 sched.procresizetime = now
6080
6081
6082 if nprocs > int32(len(allp)) {
6083
6084
6085 lock(&allpLock)
6086 if nprocs <= int32(cap(allp)) {
6087 allp = allp[:nprocs]
6088 } else {
6089 nallp := make([]*p, nprocs)
6090
6091
6092 copy(nallp, allp[:cap(allp)])
6093 allp = nallp
6094 }
6095
6096 idlepMask = idlepMask.resize(nprocs)
6097 timerpMask = timerpMask.resize(nprocs)
6098 work.spanqMask = work.spanqMask.resize(nprocs)
6099 unlock(&allpLock)
6100 }
6101
6102
6103 for i := old; i < nprocs; i++ {
6104 pp := allp[i]
6105 if pp == nil {
6106 pp = new(p)
6107 }
6108 pp.init(i)
6109 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
6110 }
6111
6112 gp := getg()
6113 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
6114
6115 gp.m.p.ptr().status = _Prunning
6116 gp.m.p.ptr().mcache.prepareForSweep()
6117 } else {
6118
6119
6120
6121
6122
6123 if gp.m.p != 0 {
6124 trace := traceAcquire()
6125 if trace.ok() {
6126
6127
6128
6129 trace.GoSched()
6130 trace.ProcStop(gp.m.p.ptr())
6131 traceRelease(trace)
6132 }
6133 gp.m.p.ptr().m = 0
6134 }
6135 gp.m.p = 0
6136 pp := allp[0]
6137 pp.m = 0
6138 pp.status = _Pidle
6139 acquirep(pp)
6140 trace := traceAcquire()
6141 if trace.ok() {
6142 trace.GoStart()
6143 traceRelease(trace)
6144 }
6145 }
6146
6147
6148 mcache0 = nil
6149
6150
6151 for i := nprocs; i < old; i++ {
6152 pp := allp[i]
6153 pp.destroy()
6154
6155 }
6156
6157
6158 if int32(len(allp)) != nprocs {
6159 lock(&allpLock)
6160 allp = allp[:nprocs]
6161 idlepMask = idlepMask.resize(nprocs)
6162 timerpMask = timerpMask.resize(nprocs)
6163 work.spanqMask = work.spanqMask.resize(nprocs)
6164 unlock(&allpLock)
6165 }
6166
6167
6168 var runnablePs *p
6169 var runnablePsNeedM *p
6170 var idlePs *p
6171 for i := nprocs - 1; i >= 0; i-- {
6172 pp := allp[i]
6173 if gp.m.p.ptr() == pp {
6174 continue
6175 }
6176 pp.status = _Pidle
6177 if runqempty(pp) {
6178 pp.link.set(idlePs)
6179 idlePs = pp
6180 continue
6181 }
6182
6183
6184
6185
6186
6187
6188
6189
6190 var mp *m
6191 if oldm := pp.oldm.get(); oldm != nil {
6192
6193 mp = mgetSpecific(oldm)
6194 }
6195 if mp == nil {
6196
6197 pp.link.set(runnablePsNeedM)
6198 runnablePsNeedM = pp
6199 continue
6200 }
6201 pp.m.set(mp)
6202 pp.link.set(runnablePs)
6203 runnablePs = pp
6204 }
6205
6206
6207 for runnablePsNeedM != nil {
6208 pp := runnablePsNeedM
6209 runnablePsNeedM = pp.link.ptr()
6210
6211 mp := mget()
6212 pp.m.set(mp)
6213 pp.link.set(runnablePs)
6214 runnablePs = pp
6215 }
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239
6240
6241 if gcBlackenEnabled != 0 {
6242 for idlePs != nil {
6243 pp := idlePs
6244
6245 ok, _ := gcController.assignWaitingGCWorker(pp, now)
6246 if !ok {
6247
6248 break
6249 }
6250
6251
6252
6253
6254
6255
6256
6257
6258 idlePs = pp.link.ptr()
6259 mp := mget()
6260 pp.m.set(mp)
6261 pp.link.set(runnablePs)
6262 runnablePs = pp
6263 }
6264 }
6265
6266
6267 for idlePs != nil {
6268 pp := idlePs
6269 idlePs = pp.link.ptr()
6270 pidleput(pp, now)
6271 }
6272
6273 stealOrder.reset(uint32(nprocs))
6274 var int32p *int32 = &gomaxprocs
6275 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6276 if old != nprocs {
6277
6278 gcCPULimiter.resetCapacity(now, nprocs)
6279 }
6280 return runnablePs
6281 }
6282
6283
6284
6285
6286
6287
6288
6289 func acquirep(pp *p) {
6290
6291 acquirepNoTrace(pp)
6292
6293
6294 trace := traceAcquire()
6295 if trace.ok() {
6296 trace.ProcStart()
6297 traceRelease(trace)
6298 }
6299 }
6300
6301
6302
6303
6304 func acquirepNoTrace(pp *p) {
6305
6306 wirep(pp)
6307
6308
6309
6310
6311
6312
6313 pp.oldm = pp.m.ptr().self
6314
6315
6316
6317 pp.mcache.prepareForSweep()
6318 }
6319
6320
6321
6322
6323
6324
6325
6326 func wirep(pp *p) {
6327 gp := getg()
6328
6329 if gp.m.p != 0 {
6330
6331
6332 systemstack(func() {
6333 throw("wirep: already in go")
6334 })
6335 }
6336 if pp.m != 0 || pp.status != _Pidle {
6337
6338
6339 systemstack(func() {
6340 id := int64(0)
6341 if pp.m != 0 {
6342 id = pp.m.ptr().id
6343 }
6344 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6345 throw("wirep: invalid p state")
6346 })
6347 }
6348 gp.m.p.set(pp)
6349 pp.m.set(gp.m)
6350 pp.status = _Prunning
6351 }
6352
6353
6354 func releasep() *p {
6355 trace := traceAcquire()
6356 if trace.ok() {
6357 trace.ProcStop(getg().m.p.ptr())
6358 traceRelease(trace)
6359 }
6360 return releasepNoTrace()
6361 }
6362
6363
6364 func releasepNoTrace() *p {
6365 gp := getg()
6366
6367 if gp.m.p == 0 {
6368 throw("releasep: invalid arg")
6369 }
6370 pp := gp.m.p.ptr()
6371 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6372 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6373 throw("releasep: invalid p state")
6374 }
6375
6376
6377 gcController.releaseNextGCMarkWorker(pp)
6378
6379 gp.m.p = 0
6380 pp.m = 0
6381 pp.status = _Pidle
6382 return pp
6383 }
6384
6385 func incidlelocked(v int32) {
6386 lock(&sched.lock)
6387 sched.nmidlelocked += v
6388 if v > 0 {
6389 checkdead()
6390 }
6391 unlock(&sched.lock)
6392 }
6393
6394
6395
6396
6397 func checkdead() {
6398 assertLockHeld(&sched.lock)
6399
6400
6401
6402
6403
6404
6405 if (islibrary || isarchive) && GOARCH != "wasm" {
6406 return
6407 }
6408
6409
6410
6411
6412
6413 if panicking.Load() > 0 {
6414 return
6415 }
6416
6417
6418
6419
6420
6421 var run0 int32
6422 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6423 run0 = 1
6424 }
6425
6426 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6427 if run > run0 {
6428 return
6429 }
6430 if run < 0 {
6431 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6432 unlock(&sched.lock)
6433 throw("checkdead: inconsistent counts")
6434 }
6435
6436 grunning := 0
6437 forEachG(func(gp *g) {
6438 if isSystemGoroutine(gp, false) {
6439 return
6440 }
6441 s := readgstatus(gp)
6442 switch s &^ _Gscan {
6443 case _Gwaiting,
6444 _Gpreempted:
6445 grunning++
6446 case _Grunnable,
6447 _Grunning,
6448 _Gsyscall:
6449 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6450 unlock(&sched.lock)
6451 throw("checkdead: runnable g")
6452 }
6453 })
6454 if grunning == 0 {
6455 unlock(&sched.lock)
6456 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6457 }
6458
6459
6460 if faketime != 0 {
6461 if when := timeSleepUntil(); when < maxWhen {
6462 faketime = when
6463
6464
6465 pp, _ := pidleget(faketime)
6466 if pp == nil {
6467
6468
6469 unlock(&sched.lock)
6470 throw("checkdead: no p for timer")
6471 }
6472 mp := mget()
6473 if mp == nil {
6474
6475
6476 unlock(&sched.lock)
6477 throw("checkdead: no m for timer")
6478 }
6479
6480
6481
6482 sched.nmspinning.Add(1)
6483 mp.spinning = true
6484 mp.nextp.set(pp)
6485 notewakeup(&mp.park)
6486 return
6487 }
6488 }
6489
6490
6491 for _, pp := range allp {
6492 if len(pp.timers.heap) > 0 {
6493 return
6494 }
6495 }
6496
6497 unlock(&sched.lock)
6498 fatal("all goroutines are asleep - deadlock!")
6499 }
6500
6501
6502
6503
6504
6505
6506 var forcegcperiod int64 = 2 * 60 * 1e9
6507
6508
6509
6510
6511 const haveSysmon = GOARCH != "wasm"
6512
6513
6514
6515
6516 func sysmon() {
6517 lock(&sched.lock)
6518 sched.nmsys++
6519 checkdead()
6520 unlock(&sched.lock)
6521
6522 lastgomaxprocs := int64(0)
6523 lasttrace := int64(0)
6524 idle := 0
6525 delay := uint32(0)
6526
6527 for {
6528 if idle == 0 {
6529 delay = 20
6530 } else if idle > 50 {
6531 delay *= 2
6532 }
6533 if delay > 10*1000 {
6534 delay = 10 * 1000
6535 }
6536 usleep(delay)
6537
6538
6539
6540
6541
6542
6543
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553 now := nanotime()
6554 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6555 lock(&sched.lock)
6556 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6557 syscallWake := false
6558 next := timeSleepUntil()
6559 if next > now {
6560 sched.sysmonwait.Store(true)
6561 unlock(&sched.lock)
6562
6563
6564 sleep := forcegcperiod / 2
6565 if next-now < sleep {
6566 sleep = next - now
6567 }
6568 shouldRelax := sleep >= osRelaxMinNS
6569 if shouldRelax {
6570 osRelax(true)
6571 }
6572 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6573 if shouldRelax {
6574 osRelax(false)
6575 }
6576 lock(&sched.lock)
6577 sched.sysmonwait.Store(false)
6578 noteclear(&sched.sysmonnote)
6579 }
6580 if syscallWake {
6581 idle = 0
6582 delay = 20
6583 }
6584 }
6585 unlock(&sched.lock)
6586 }
6587
6588 lock(&sched.sysmonlock)
6589
6590
6591 now = nanotime()
6592
6593
6594 if *cgo_yield != nil {
6595 asmcgocall(*cgo_yield, nil)
6596 }
6597
6598 lastpoll := sched.lastpoll.Load()
6599 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6600 sched.lastpoll.CompareAndSwap(lastpoll, now)
6601 list, delta := netpoll(0)
6602 if !list.empty() {
6603
6604
6605
6606
6607
6608
6609
6610 incidlelocked(-1)
6611 injectglist(&list)
6612 incidlelocked(1)
6613 netpollAdjustWaiters(delta)
6614 }
6615 }
6616
6617 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6618 sysmonUpdateGOMAXPROCS()
6619 lastgomaxprocs = now
6620 }
6621 if scavenger.sysmonWake.Load() != 0 {
6622
6623 scavenger.wake()
6624 }
6625
6626
6627 if retake(now) != 0 {
6628 idle = 0
6629 } else {
6630 idle++
6631 }
6632
6633 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6634 lock(&forcegc.lock)
6635 forcegc.idle.Store(false)
6636 var list gList
6637 list.push(forcegc.g)
6638 injectglist(&list)
6639 unlock(&forcegc.lock)
6640 }
6641 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6642 lasttrace = now
6643 schedtrace(debug.scheddetail > 0)
6644 }
6645 unlock(&sched.sysmonlock)
6646 }
6647 }
6648
6649 type sysmontick struct {
6650 schedtick uint32
6651 syscalltick uint32
6652 schedwhen int64
6653 syscallwhen int64
6654 }
6655
6656
6657
6658 const forcePreemptNS = 10 * 1000 * 1000
6659
6660 func retake(now int64) uint32 {
6661 n := 0
6662
6663
6664 lock(&allpLock)
6665
6666
6667
6668 for i := 0; i < len(allp); i++ {
6669
6670
6671
6672
6673
6674
6675
6676
6677 pp := allp[i]
6678 if pp == nil || atomic.Load(&pp.status) != _Prunning {
6679
6680
6681 continue
6682 }
6683 pd := &pp.sysmontick
6684 sysretake := false
6685
6686
6687
6688
6689
6690 schedt := int64(pp.schedtick)
6691 if int64(pd.schedtick) != schedt {
6692 pd.schedtick = uint32(schedt)
6693 pd.schedwhen = now
6694 } else if pd.schedwhen+forcePreemptNS <= now {
6695 preemptone(pp)
6696
6697
6698
6699
6700 sysretake = true
6701 }
6702
6703
6704 unlock(&allpLock)
6705
6706
6707
6708
6709
6710
6711
6712
6713 incidlelocked(-1)
6714
6715
6716 thread, ok := setBlockOnExitSyscall(pp)
6717 if !ok {
6718
6719 goto done
6720 }
6721
6722
6723 if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
6724 pd.syscalltick = uint32(syst)
6725 pd.syscallwhen = now
6726 thread.resume()
6727 goto done
6728 }
6729
6730
6731
6732
6733 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6734 thread.resume()
6735 goto done
6736 }
6737
6738
6739
6740 thread.takeP()
6741 thread.resume()
6742 n++
6743
6744
6745 handoffp(pp)
6746
6747
6748
6749 done:
6750 incidlelocked(1)
6751 lock(&allpLock)
6752 }
6753 unlock(&allpLock)
6754 return uint32(n)
6755 }
6756
6757
6758
6759 type syscallingThread struct {
6760 gp *g
6761 mp *m
6762 pp *p
6763 status uint32
6764 }
6765
6766
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776
6777
6778
6779
6780 func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
6781 if pp.status != _Prunning {
6782 return syscallingThread{}, false
6783 }
6784
6785
6786
6787
6788
6789
6790
6791
6792
6793
6794
6795 mp := pp.m.ptr()
6796 if mp == nil {
6797
6798 return syscallingThread{}, false
6799 }
6800 gp := mp.curg
6801 if gp == nil {
6802
6803 return syscallingThread{}, false
6804 }
6805 status := readgstatus(gp) &^ _Gscan
6806
6807
6808
6809
6810 if status != _Gsyscall && status != _Gdeadextra {
6811
6812 return syscallingThread{}, false
6813 }
6814 if !castogscanstatus(gp, status, status|_Gscan) {
6815
6816 return syscallingThread{}, false
6817 }
6818 if gp.m != mp || gp.m.p.ptr() != pp {
6819
6820 casfrom_Gscanstatus(gp, status|_Gscan, status)
6821 return syscallingThread{}, false
6822 }
6823 return syscallingThread{gp, mp, pp, status}, true
6824 }
6825
6826
6827
6828
6829
6830 func (s syscallingThread) gcstopP() {
6831 assertLockHeld(&sched.lock)
6832
6833 s.releaseP(_Pgcstop)
6834 s.pp.gcStopTime = nanotime()
6835 sched.stopwait--
6836 }
6837
6838
6839
6840 func (s syscallingThread) takeP() {
6841 s.releaseP(_Pidle)
6842 }
6843
6844
6845
6846
6847 func (s syscallingThread) releaseP(state uint32) {
6848 if state != _Pidle && state != _Pgcstop {
6849 throw("attempted to release P into a bad state")
6850 }
6851 trace := traceAcquire()
6852 s.pp.m = 0
6853 s.mp.p = 0
6854 atomic.Store(&s.pp.status, state)
6855 if trace.ok() {
6856 trace.ProcSteal(s.pp)
6857 traceRelease(trace)
6858 }
6859 addGSyscallNoP(s.mp)
6860 s.pp.syscalltick++
6861 }
6862
6863
6864 func (s syscallingThread) resume() {
6865 casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
6866 }
6867
6868
6869
6870
6871
6872
6873 func preemptall() bool {
6874 res := false
6875 for _, pp := range allp {
6876 if pp.status != _Prunning {
6877 continue
6878 }
6879 if preemptone(pp) {
6880 res = true
6881 }
6882 }
6883 return res
6884 }
6885
6886
6887
6888
6889
6890
6891
6892
6893
6894
6895
6896 func preemptone(pp *p) bool {
6897 mp := pp.m.ptr()
6898 if mp == nil || mp == getg().m {
6899 return false
6900 }
6901 gp := mp.curg
6902 if gp == nil || gp == mp.g0 {
6903 return false
6904 }
6905 if readgstatus(gp)&^_Gscan == _Gsyscall {
6906
6907 return false
6908 }
6909
6910 gp.preempt = true
6911
6912
6913
6914
6915
6916 gp.stackguard0 = stackPreempt
6917
6918
6919 if preemptMSupported && debug.asyncpreemptoff == 0 {
6920 pp.preempt = true
6921 preemptM(mp)
6922 }
6923
6924 return true
6925 }
6926
6927 var starttime int64
6928
6929 func schedtrace(detailed bool) {
6930 now := nanotime()
6931 if starttime == 0 {
6932 starttime = now
6933 }
6934
6935 lock(&sched.lock)
6936 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6937 if detailed {
6938 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6939 }
6940
6941
6942
6943 for i, pp := range allp {
6944 h := atomic.Load(&pp.runqhead)
6945 t := atomic.Load(&pp.runqtail)
6946 if detailed {
6947 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6948 mp := pp.m.ptr()
6949 if mp != nil {
6950 print(mp.id)
6951 } else {
6952 print("nil")
6953 }
6954 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6955 } else {
6956
6957
6958 print(" ")
6959 if i == 0 {
6960 print("[ ")
6961 }
6962 print(t - h)
6963 if i == len(allp)-1 {
6964 print(" ]")
6965 }
6966 }
6967 }
6968
6969 if !detailed {
6970
6971 print(" schedticks=[ ")
6972 for _, pp := range allp {
6973 print(pp.schedtick)
6974 print(" ")
6975 }
6976 print("]\n")
6977 }
6978
6979 if !detailed {
6980 unlock(&sched.lock)
6981 return
6982 }
6983
6984 for mp := allm; mp != nil; mp = mp.alllink {
6985 pp := mp.p.ptr()
6986 print(" M", mp.id, ": p=")
6987 if pp != nil {
6988 print(pp.id)
6989 } else {
6990 print("nil")
6991 }
6992 print(" curg=")
6993 if mp.curg != nil {
6994 print(mp.curg.goid)
6995 } else {
6996 print("nil")
6997 }
6998 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6999 if lockedg := mp.lockedg.ptr(); lockedg != nil {
7000 print(lockedg.goid)
7001 } else {
7002 print("nil")
7003 }
7004 print("\n")
7005 }
7006
7007 forEachG(func(gp *g) {
7008 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
7009 if gp.m != nil {
7010 print(gp.m.id)
7011 } else {
7012 print("nil")
7013 }
7014 print(" lockedm=")
7015 if lockedm := gp.lockedm.ptr(); lockedm != nil {
7016 print(lockedm.id)
7017 } else {
7018 print("nil")
7019 }
7020 print("\n")
7021 })
7022 unlock(&sched.lock)
7023 }
7024
7025 type updateMaxProcsGState struct {
7026 lock mutex
7027 g *g
7028 idle atomic.Bool
7029
7030
7031 procs int32
7032 }
7033
7034 var (
7035
7036
7037 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
7038
7039
7040
7041 updateMaxProcsG updateMaxProcsGState
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063
7064
7065
7066
7067
7068
7069
7070
7071
7072
7073
7074
7075
7076
7077
7078
7079
7080
7081
7082
7083
7084
7085
7086
7087
7088
7089
7090 computeMaxProcsLock mutex
7091 )
7092
7093
7094
7095
7096 func defaultGOMAXPROCSUpdateEnable() {
7097 if debug.updatemaxprocs == 0 {
7098
7099
7100
7101
7102
7103
7104
7105
7106
7107
7108
7109 updatemaxprocs.IncNonDefault()
7110 return
7111 }
7112
7113 go updateMaxProcsGoroutine()
7114 }
7115
7116 func updateMaxProcsGoroutine() {
7117 updateMaxProcsG.g = getg()
7118 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
7119 for {
7120 lock(&updateMaxProcsG.lock)
7121 if updateMaxProcsG.idle.Load() {
7122 throw("updateMaxProcsGoroutine: phase error")
7123 }
7124 updateMaxProcsG.idle.Store(true)
7125 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
7126
7127
7128 stw := stopTheWorldGC(stwGOMAXPROCS)
7129
7130
7131 lock(&sched.lock)
7132 custom := sched.customGOMAXPROCS
7133 unlock(&sched.lock)
7134 if custom {
7135 startTheWorldGC(stw)
7136 return
7137 }
7138
7139
7140
7141
7142
7143 newprocs = updateMaxProcsG.procs
7144 lock(&sched.lock)
7145 sched.customGOMAXPROCS = false
7146 unlock(&sched.lock)
7147
7148 startTheWorldGC(stw)
7149 }
7150 }
7151
7152 func sysmonUpdateGOMAXPROCS() {
7153
7154 lock(&computeMaxProcsLock)
7155
7156
7157 lock(&sched.lock)
7158 custom := sched.customGOMAXPROCS
7159 curr := gomaxprocs
7160 unlock(&sched.lock)
7161 if custom {
7162 unlock(&computeMaxProcsLock)
7163 return
7164 }
7165
7166
7167 procs := defaultGOMAXPROCS(0)
7168 unlock(&computeMaxProcsLock)
7169 if procs == curr {
7170
7171 return
7172 }
7173
7174
7175
7176
7177 if updateMaxProcsG.idle.Load() {
7178 lock(&updateMaxProcsG.lock)
7179 updateMaxProcsG.procs = procs
7180 updateMaxProcsG.idle.Store(false)
7181 var list gList
7182 list.push(updateMaxProcsG.g)
7183 injectglist(&list)
7184 unlock(&updateMaxProcsG.lock)
7185 }
7186 }
7187
7188
7189
7190
7191
7192
7193 func schedEnableUser(enable bool) {
7194 lock(&sched.lock)
7195 if sched.disable.user == !enable {
7196 unlock(&sched.lock)
7197 return
7198 }
7199 sched.disable.user = !enable
7200 if enable {
7201 n := sched.disable.runnable.size
7202 globrunqputbatch(&sched.disable.runnable)
7203 unlock(&sched.lock)
7204 for ; n != 0 && sched.npidle.Load() != 0; n-- {
7205 startm(nil, false, false)
7206 }
7207 } else {
7208 unlock(&sched.lock)
7209 }
7210 }
7211
7212
7213
7214
7215
7216 func schedEnabled(gp *g) bool {
7217 assertLockHeld(&sched.lock)
7218
7219 if sched.disable.user {
7220 return isSystemGoroutine(gp, true)
7221 }
7222 return true
7223 }
7224
7225
7226
7227
7228
7229
7230 func mput(mp *m) {
7231 assertLockHeld(&sched.lock)
7232
7233 sched.midle.push(unsafe.Pointer(mp))
7234 sched.nmidle++
7235 checkdead()
7236 }
7237
7238
7239
7240
7241
7242
7243 func mget() *m {
7244 assertLockHeld(&sched.lock)
7245
7246 mp := (*m)(sched.midle.pop())
7247 if mp != nil {
7248 sched.nmidle--
7249 }
7250 return mp
7251 }
7252
7253
7254
7255
7256
7257
7258
7259
7260 func mgetSpecific(mp *m) *m {
7261 assertLockHeld(&sched.lock)
7262
7263 if mp.idleNode.prev == 0 && mp.idleNode.next == 0 {
7264
7265 return nil
7266 }
7267
7268 sched.midle.remove(unsafe.Pointer(mp))
7269 sched.nmidle--
7270
7271 return mp
7272 }
7273
7274
7275
7276
7277
7278
7279 func globrunqput(gp *g) {
7280 assertLockHeld(&sched.lock)
7281
7282 sched.runq.pushBack(gp)
7283 }
7284
7285
7286
7287
7288
7289
7290 func globrunqputhead(gp *g) {
7291 assertLockHeld(&sched.lock)
7292
7293 sched.runq.push(gp)
7294 }
7295
7296
7297
7298
7299
7300
7301
7302 func globrunqputbatch(batch *gQueue) {
7303 assertLockHeld(&sched.lock)
7304
7305 sched.runq.pushBackAll(*batch)
7306 *batch = gQueue{}
7307 }
7308
7309
7310
7311 func globrunqget() *g {
7312 assertLockHeld(&sched.lock)
7313
7314 if sched.runq.size == 0 {
7315 return nil
7316 }
7317
7318 return sched.runq.pop()
7319 }
7320
7321
7322
7323 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
7324 assertLockHeld(&sched.lock)
7325
7326 if sched.runq.size == 0 {
7327 return
7328 }
7329
7330 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
7331
7332 gp = sched.runq.pop()
7333 n--
7334
7335 for ; n > 0; n-- {
7336 gp1 := sched.runq.pop()
7337 q.pushBack(gp1)
7338 }
7339 return
7340 }
7341
7342
7343 type pMask []uint32
7344
7345
7346 func (p pMask) read(id uint32) bool {
7347 word := id / 32
7348 mask := uint32(1) << (id % 32)
7349 return (atomic.Load(&p[word]) & mask) != 0
7350 }
7351
7352
7353 func (p pMask) set(id int32) {
7354 word := id / 32
7355 mask := uint32(1) << (id % 32)
7356 atomic.Or(&p[word], mask)
7357 }
7358
7359
7360 func (p pMask) clear(id int32) {
7361 word := id / 32
7362 mask := uint32(1) << (id % 32)
7363 atomic.And(&p[word], ^mask)
7364 }
7365
7366
7367 func (p pMask) any() bool {
7368 for i := range p {
7369 if atomic.Load(&p[i]) != 0 {
7370 return true
7371 }
7372 }
7373 return false
7374 }
7375
7376
7377
7378
7379
7380 func (p pMask) resize(nprocs int32) pMask {
7381 maskWords := (nprocs + 31) / 32
7382
7383 if maskWords <= int32(cap(p)) {
7384 return p[:maskWords]
7385 }
7386 newMask := make([]uint32, maskWords)
7387
7388 copy(newMask, p)
7389 return newMask
7390 }
7391
7392
7393
7394
7395
7396
7397
7398
7399
7400
7401
7402
7403 func pidleput(pp *p, now int64) int64 {
7404 assertLockHeld(&sched.lock)
7405
7406 if !runqempty(pp) {
7407 throw("pidleput: P has non-empty run queue")
7408 }
7409 if now == 0 {
7410 now = nanotime()
7411 }
7412 if pp.timers.len.Load() == 0 {
7413 timerpMask.clear(pp.id)
7414 }
7415 idlepMask.set(pp.id)
7416 pp.link = sched.pidle
7417 sched.pidle.set(pp)
7418 sched.npidle.Add(1)
7419 if !pp.limiterEvent.start(limiterEventIdle, now) {
7420 throw("must be able to track idle limiter event")
7421 }
7422 return now
7423 }
7424
7425
7426
7427
7428
7429
7430
7431
7432 func pidleget(now int64) (*p, int64) {
7433 assertLockHeld(&sched.lock)
7434
7435 pp := sched.pidle.ptr()
7436 if pp != nil {
7437
7438 if now == 0 {
7439 now = nanotime()
7440 }
7441 timerpMask.set(pp.id)
7442 idlepMask.clear(pp.id)
7443 sched.pidle = pp.link
7444 sched.npidle.Add(-1)
7445 pp.limiterEvent.stop(limiterEventIdle, now)
7446 }
7447 return pp, now
7448 }
7449
7450
7451
7452
7453
7454
7455
7456
7457
7458
7459
7460 func pidlegetSpinning(now int64) (*p, int64) {
7461 assertLockHeld(&sched.lock)
7462
7463 pp, now := pidleget(now)
7464 if pp == nil {
7465
7466
7467
7468 sched.needspinning.Store(1)
7469 return nil, now
7470 }
7471
7472 return pp, now
7473 }
7474
7475
7476
7477 func runqempty(pp *p) bool {
7478
7479
7480
7481
7482 for {
7483 head := atomic.Load(&pp.runqhead)
7484 tail := atomic.Load(&pp.runqtail)
7485 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7486 if tail == atomic.Load(&pp.runqtail) {
7487 return head == tail && runnext == 0
7488 }
7489 }
7490 }
7491
7492
7493
7494
7495
7496
7497
7498
7499
7500
7501 const randomizeScheduler = raceenabled
7502
7503
7504
7505
7506
7507
7508 func runqput(pp *p, gp *g, next bool) {
7509 if !haveSysmon && next {
7510
7511
7512
7513
7514
7515
7516
7517
7518 next = false
7519 }
7520 if randomizeScheduler && next && randn(2) == 0 {
7521 next = false
7522 }
7523
7524 if next {
7525 retryNext:
7526 oldnext := pp.runnext
7527 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7528 goto retryNext
7529 }
7530 if oldnext == 0 {
7531 return
7532 }
7533
7534 gp = oldnext.ptr()
7535 }
7536
7537 retry:
7538 h := atomic.LoadAcq(&pp.runqhead)
7539 t := pp.runqtail
7540 if t-h < uint32(len(pp.runq)) {
7541 pp.runq[t%uint32(len(pp.runq))].set(gp)
7542 atomic.StoreRel(&pp.runqtail, t+1)
7543 return
7544 }
7545 if runqputslow(pp, gp, h, t) {
7546 return
7547 }
7548
7549 goto retry
7550 }
7551
7552
7553
7554 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7555 var batch [len(pp.runq)/2 + 1]*g
7556
7557
7558 n := t - h
7559 n = n / 2
7560 if n != uint32(len(pp.runq)/2) {
7561 throw("runqputslow: queue is not full")
7562 }
7563 for i := uint32(0); i < n; i++ {
7564 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7565 }
7566 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7567 return false
7568 }
7569 batch[n] = gp
7570
7571 if randomizeScheduler {
7572 for i := uint32(1); i <= n; i++ {
7573 j := cheaprandn(i + 1)
7574 batch[i], batch[j] = batch[j], batch[i]
7575 }
7576 }
7577
7578
7579 for i := uint32(0); i < n; i++ {
7580 batch[i].schedlink.set(batch[i+1])
7581 }
7582
7583 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7584
7585
7586 lock(&sched.lock)
7587 globrunqputbatch(&q)
7588 unlock(&sched.lock)
7589 return true
7590 }
7591
7592
7593
7594
7595 func runqputbatch(pp *p, q *gQueue) {
7596 if q.empty() {
7597 return
7598 }
7599 h := atomic.LoadAcq(&pp.runqhead)
7600 t := pp.runqtail
7601 n := uint32(0)
7602 for !q.empty() && t-h < uint32(len(pp.runq)) {
7603 gp := q.pop()
7604 pp.runq[t%uint32(len(pp.runq))].set(gp)
7605 t++
7606 n++
7607 }
7608
7609 if randomizeScheduler {
7610 off := func(o uint32) uint32 {
7611 return (pp.runqtail + o) % uint32(len(pp.runq))
7612 }
7613 for i := uint32(1); i < n; i++ {
7614 j := cheaprandn(i + 1)
7615 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7616 }
7617 }
7618
7619 atomic.StoreRel(&pp.runqtail, t)
7620
7621 return
7622 }
7623
7624
7625
7626
7627
7628 func runqget(pp *p) (gp *g, inheritTime bool) {
7629
7630 next := pp.runnext
7631
7632
7633
7634 if next != 0 && pp.runnext.cas(next, 0) {
7635 return next.ptr(), true
7636 }
7637
7638 for {
7639 h := atomic.LoadAcq(&pp.runqhead)
7640 t := pp.runqtail
7641 if t == h {
7642 return nil, false
7643 }
7644 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7645 if atomic.CasRel(&pp.runqhead, h, h+1) {
7646 return gp, false
7647 }
7648 }
7649 }
7650
7651
7652
7653 func runqdrain(pp *p) (drainQ gQueue) {
7654 oldNext := pp.runnext
7655 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7656 drainQ.pushBack(oldNext.ptr())
7657 }
7658
7659 retry:
7660 h := atomic.LoadAcq(&pp.runqhead)
7661 t := pp.runqtail
7662 qn := t - h
7663 if qn == 0 {
7664 return
7665 }
7666 if qn > uint32(len(pp.runq)) {
7667 goto retry
7668 }
7669
7670 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7671 goto retry
7672 }
7673
7674
7675
7676
7677
7678
7679
7680
7681 for i := uint32(0); i < qn; i++ {
7682 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7683 drainQ.pushBack(gp)
7684 }
7685 return
7686 }
7687
7688
7689
7690
7691
7692 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7693 for {
7694 h := atomic.LoadAcq(&pp.runqhead)
7695 t := atomic.LoadAcq(&pp.runqtail)
7696 n := t - h
7697 n = n - n/2
7698 if n == 0 {
7699 if stealRunNextG {
7700
7701 if next := pp.runnext; next != 0 {
7702 if pp.status == _Prunning {
7703 if mp := pp.m.ptr(); mp != nil {
7704 if gp := mp.curg; gp == nil || readgstatus(gp)&^_Gscan != _Gsyscall {
7705
7706
7707
7708
7709
7710
7711
7712
7713
7714
7715
7716
7717
7718
7719
7720
7721
7722
7723
7724 if !osHasLowResTimer {
7725 usleep(3)
7726 } else {
7727
7728
7729
7730 osyield()
7731 }
7732 }
7733 }
7734 }
7735 if !pp.runnext.cas(next, 0) {
7736 continue
7737 }
7738 batch[batchHead%uint32(len(batch))] = next
7739 return 1
7740 }
7741 }
7742 return 0
7743 }
7744 if n > uint32(len(pp.runq)/2) {
7745 continue
7746 }
7747 for i := uint32(0); i < n; i++ {
7748 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7749 batch[(batchHead+i)%uint32(len(batch))] = g
7750 }
7751 if atomic.CasRel(&pp.runqhead, h, h+n) {
7752 return n
7753 }
7754 }
7755 }
7756
7757
7758
7759
7760 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7761 t := pp.runqtail
7762 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7763 if n == 0 {
7764 return nil
7765 }
7766 n--
7767 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7768 if n == 0 {
7769 return gp
7770 }
7771 h := atomic.LoadAcq(&pp.runqhead)
7772 if t-h+n >= uint32(len(pp.runq)) {
7773 throw("runqsteal: runq overflow")
7774 }
7775 atomic.StoreRel(&pp.runqtail, t+n)
7776 return gp
7777 }
7778
7779
7780
7781 type gQueue struct {
7782 head guintptr
7783 tail guintptr
7784 size int32
7785 }
7786
7787
7788 func (q *gQueue) empty() bool {
7789 return q.head == 0
7790 }
7791
7792
7793 func (q *gQueue) push(gp *g) {
7794 gp.schedlink = q.head
7795 q.head.set(gp)
7796 if q.tail == 0 {
7797 q.tail.set(gp)
7798 }
7799 q.size++
7800 }
7801
7802
7803 func (q *gQueue) pushBack(gp *g) {
7804 gp.schedlink = 0
7805 if q.tail != 0 {
7806 q.tail.ptr().schedlink.set(gp)
7807 } else {
7808 q.head.set(gp)
7809 }
7810 q.tail.set(gp)
7811 q.size++
7812 }
7813
7814
7815
7816 func (q *gQueue) pushBackAll(q2 gQueue) {
7817 if q2.tail == 0 {
7818 return
7819 }
7820 q2.tail.ptr().schedlink = 0
7821 if q.tail != 0 {
7822 q.tail.ptr().schedlink = q2.head
7823 } else {
7824 q.head = q2.head
7825 }
7826 q.tail = q2.tail
7827 q.size += q2.size
7828 }
7829
7830
7831
7832 func (q *gQueue) pop() *g {
7833 gp := q.head.ptr()
7834 if gp != nil {
7835 q.head = gp.schedlink
7836 if q.head == 0 {
7837 q.tail = 0
7838 }
7839 q.size--
7840 }
7841 return gp
7842 }
7843
7844
7845 func (q *gQueue) popList() gList {
7846 stack := gList{q.head, q.size}
7847 *q = gQueue{}
7848 return stack
7849 }
7850
7851
7852
7853 type gList struct {
7854 head guintptr
7855 size int32
7856 }
7857
7858
7859 func (l *gList) empty() bool {
7860 return l.head == 0
7861 }
7862
7863
7864 func (l *gList) push(gp *g) {
7865 gp.schedlink = l.head
7866 l.head.set(gp)
7867 l.size++
7868 }
7869
7870
7871 func (l *gList) pushAll(q gQueue) {
7872 if !q.empty() {
7873 q.tail.ptr().schedlink = l.head
7874 l.head = q.head
7875 l.size += q.size
7876 }
7877 }
7878
7879
7880 func (l *gList) pop() *g {
7881 gp := l.head.ptr()
7882 if gp != nil {
7883 l.head = gp.schedlink
7884 l.size--
7885 }
7886 return gp
7887 }
7888
7889
7890 func setMaxThreads(in int) (out int) {
7891 lock(&sched.lock)
7892 out = int(sched.maxmcount)
7893 if in > 0x7fffffff {
7894 sched.maxmcount = 0x7fffffff
7895 } else {
7896 sched.maxmcount = int32(in)
7897 }
7898 checkmcount()
7899 unlock(&sched.lock)
7900 return
7901 }
7902
7903
7904
7905
7906
7907
7908
7909
7910
7911
7912
7913
7914
7915 func procPin() int {
7916 gp := getg()
7917 mp := gp.m
7918
7919 mp.locks++
7920 return int(mp.p.ptr().id)
7921 }
7922
7923
7924
7925
7926
7927
7928
7929
7930
7931
7932
7933
7934
7935 func procUnpin() {
7936 gp := getg()
7937 gp.m.locks--
7938 }
7939
7940
7941
7942 func sync_runtime_procPin() int {
7943 return procPin()
7944 }
7945
7946
7947
7948 func sync_runtime_procUnpin() {
7949 procUnpin()
7950 }
7951
7952
7953
7954 func sync_atomic_runtime_procPin() int {
7955 return procPin()
7956 }
7957
7958
7959
7960 func sync_atomic_runtime_procUnpin() {
7961 procUnpin()
7962 }
7963
7964
7965
7966
7967
7968 func internal_sync_runtime_canSpin(i int) bool {
7969
7970
7971
7972
7973
7974 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7975 return false
7976 }
7977 if p := getg().m.p.ptr(); !runqempty(p) {
7978 return false
7979 }
7980 return true
7981 }
7982
7983
7984
7985 func internal_sync_runtime_doSpin() {
7986 procyield(active_spin_cnt)
7987 }
7988
7989
7990
7991
7992
7993
7994
7995
7996
7997
7998
7999
8000
8001
8002
8003 func sync_runtime_canSpin(i int) bool {
8004 return internal_sync_runtime_canSpin(i)
8005 }
8006
8007
8008
8009
8010
8011
8012
8013
8014
8015
8016
8017
8018
8019 func sync_runtime_doSpin() {
8020 internal_sync_runtime_doSpin()
8021 }
8022
8023 var stealOrder randomOrder
8024
8025
8026
8027
8028
8029 type randomOrder struct {
8030 count uint32
8031 coprimes []uint32
8032 }
8033
8034 type randomEnum struct {
8035 i uint32
8036 count uint32
8037 pos uint32
8038 inc uint32
8039 }
8040
8041 func (ord *randomOrder) reset(count uint32) {
8042 ord.count = count
8043 ord.coprimes = ord.coprimes[:0]
8044 for i := uint32(1); i <= count; i++ {
8045 if gcd(i, count) == 1 {
8046 ord.coprimes = append(ord.coprimes, i)
8047 }
8048 }
8049 }
8050
8051 func (ord *randomOrder) start(i uint32) randomEnum {
8052 return randomEnum{
8053 count: ord.count,
8054 pos: i % ord.count,
8055 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
8056 }
8057 }
8058
8059 func (enum *randomEnum) done() bool {
8060 return enum.i == enum.count
8061 }
8062
8063 func (enum *randomEnum) next() {
8064 enum.i++
8065 enum.pos = (enum.pos + enum.inc) % enum.count
8066 }
8067
8068 func (enum *randomEnum) position() uint32 {
8069 return enum.pos
8070 }
8071
8072 func gcd(a, b uint32) uint32 {
8073 for b != 0 {
8074 a, b = b, a%b
8075 }
8076 return a
8077 }
8078
8079
8080
8081 type initTask struct {
8082 state uint32
8083 nfns uint32
8084
8085 }
8086
8087
8088
8089 var inittrace tracestat
8090
8091 type tracestat struct {
8092 active bool
8093 id uint64
8094 allocs uint64
8095 bytes uint64
8096 }
8097
8098 func doInit(ts []*initTask) {
8099 for _, t := range ts {
8100 doInit1(t)
8101 }
8102 }
8103
8104 func doInit1(t *initTask) {
8105 switch t.state {
8106 case 2:
8107 return
8108 case 1:
8109 throw("recursive call during initialization - linker skew")
8110 default:
8111 t.state = 1
8112
8113 var (
8114 start int64
8115 before tracestat
8116 )
8117
8118 if inittrace.active {
8119 start = nanotime()
8120
8121 before = inittrace
8122 }
8123
8124 if t.nfns == 0 {
8125
8126 throw("inittask with no functions")
8127 }
8128
8129 firstFunc := add(unsafe.Pointer(t), 8)
8130 for i := uint32(0); i < t.nfns; i++ {
8131 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
8132 f := *(*func())(unsafe.Pointer(&p))
8133 f()
8134 }
8135
8136 if inittrace.active {
8137 end := nanotime()
8138
8139 after := inittrace
8140
8141 f := *(*func())(unsafe.Pointer(&firstFunc))
8142 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
8143
8144 var sbuf [24]byte
8145 print("init ", pkg, " @")
8146 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
8147 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
8148 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
8149 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
8150 print("\n")
8151 }
8152
8153 t.state = 2
8154 }
8155 }
8156
View as plain text