Source file
src/runtime/runtime1.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/bytealg"
10 "internal/goarch"
11 "internal/runtime/atomic"
12 "internal/strconv"
13 "unsafe"
14 )
15
16
17
18
19
20
21 const (
22 tracebackCrash = 1 << iota
23 tracebackAll
24 tracebackShift = iota
25 )
26
27 var traceback_cache uint32 = 2 << tracebackShift
28 var traceback_env uint32
29
30
31
32
33
34
35
36
37
38
39 func gotraceback() (level int32, all, crash bool) {
40 gp := getg()
41 t := atomic.Load(&traceback_cache)
42 crash = t&tracebackCrash != 0
43 all = gp.m.throwing > throwTypeUser || t&tracebackAll != 0
44 if gp.m.traceback != 0 {
45 level = int32(gp.m.traceback)
46 } else if gp.m.throwing >= throwTypeRuntime {
47
48
49 level = 2
50 } else {
51 level = int32(t >> tracebackShift)
52 }
53 return
54 }
55
56 var (
57 argc int32
58 argv **byte
59 )
60
61
62
63
64 func argv_index(argv **byte, i int32) *byte {
65 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
66 }
67
68 func args(c int32, v **byte) {
69 argc = c
70 argv = v
71 sysargs(c, v)
72 }
73
74 func goargs() {
75 if GOOS == "windows" {
76 return
77 }
78 argslice = make([]string, argc)
79 for i := int32(0); i < argc; i++ {
80 argslice[i] = gostringnocopy(argv_index(argv, i))
81 }
82 }
83
84 func goenvs_unix() {
85
86
87
88 n := int32(0)
89 for argv_index(argv, argc+1+n) != nil {
90 n++
91 }
92
93 envs = make([]string, n)
94 for i := int32(0); i < n; i++ {
95 envs[i] = gostring(argv_index(argv, argc+1+i))
96 }
97 }
98
99 func environ() []string {
100 return envs
101 }
102
103
104
105 var test_z64, test_x64 uint64
106
107 func testAtomic64() {
108 test_z64 = 42
109 test_x64 = 0
110 if atomic.Cas64(&test_z64, test_x64, 1) {
111 throw("cas64 failed")
112 }
113 if test_x64 != 0 {
114 throw("cas64 failed")
115 }
116 test_x64 = 42
117 if !atomic.Cas64(&test_z64, test_x64, 1) {
118 throw("cas64 failed")
119 }
120 if test_x64 != 42 || test_z64 != 1 {
121 throw("cas64 failed")
122 }
123 if atomic.Load64(&test_z64) != 1 {
124 throw("load64 failed")
125 }
126 atomic.Store64(&test_z64, (1<<40)+1)
127 if atomic.Load64(&test_z64) != (1<<40)+1 {
128 throw("store64 failed")
129 }
130 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
131 throw("xadd64 failed")
132 }
133 if atomic.Load64(&test_z64) != (2<<40)+2 {
134 throw("xadd64 failed")
135 }
136 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
137 throw("xchg64 failed")
138 }
139 if atomic.Load64(&test_z64) != (3<<40)+3 {
140 throw("xchg64 failed")
141 }
142 }
143
144 func check() {
145 var (
146 a int8
147 b uint8
148 c int16
149 d uint16
150 e int32
151 f uint32
152 g int64
153 h uint64
154 i, i1 float32
155 j, j1 float64
156 k unsafe.Pointer
157 l *uint16
158 m [4]byte
159 )
160 type x1t struct {
161 x uint8
162 }
163 type y1t struct {
164 x1 x1t
165 y uint8
166 }
167 var x1 x1t
168 var y1 y1t
169
170 if unsafe.Sizeof(a) != 1 {
171 throw("bad a")
172 }
173 if unsafe.Sizeof(b) != 1 {
174 throw("bad b")
175 }
176 if unsafe.Sizeof(c) != 2 {
177 throw("bad c")
178 }
179 if unsafe.Sizeof(d) != 2 {
180 throw("bad d")
181 }
182 if unsafe.Sizeof(e) != 4 {
183 throw("bad e")
184 }
185 if unsafe.Sizeof(f) != 4 {
186 throw("bad f")
187 }
188 if unsafe.Sizeof(g) != 8 {
189 throw("bad g")
190 }
191 if unsafe.Sizeof(h) != 8 {
192 throw("bad h")
193 }
194 if unsafe.Sizeof(i) != 4 {
195 throw("bad i")
196 }
197 if unsafe.Sizeof(j) != 8 {
198 throw("bad j")
199 }
200 if unsafe.Sizeof(k) != goarch.PtrSize {
201 throw("bad k")
202 }
203 if unsafe.Sizeof(l) != goarch.PtrSize {
204 throw("bad l")
205 }
206 if unsafe.Sizeof(x1) != 1 {
207 throw("bad unsafe.Sizeof x1")
208 }
209 if unsafe.Offsetof(y1.y) != 1 {
210 throw("bad offsetof y1.y")
211 }
212 if unsafe.Sizeof(y1) != 2 {
213 throw("bad unsafe.Sizeof y1")
214 }
215
216 var z uint32
217 z = 1
218 if !atomic.Cas(&z, 1, 2) {
219 throw("cas1")
220 }
221 if z != 2 {
222 throw("cas2")
223 }
224
225 z = 4
226 if atomic.Cas(&z, 5, 6) {
227 throw("cas3")
228 }
229 if z != 4 {
230 throw("cas4")
231 }
232
233 z = 0xffffffff
234 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
235 throw("cas5")
236 }
237 if z != 0xfffffffe {
238 throw("cas6")
239 }
240
241 m = [4]byte{1, 1, 1, 1}
242 atomic.Or8(&m[1], 0xf0)
243 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
244 throw("atomicor8")
245 }
246
247 m = [4]byte{0xff, 0xff, 0xff, 0xff}
248 atomic.And8(&m[1], 0x1)
249 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
250 throw("atomicand8")
251 }
252
253 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
254 if j == j {
255 throw("float64nan")
256 }
257 if !(j != j) {
258 throw("float64nan1")
259 }
260
261 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
262 if j == j1 {
263 throw("float64nan2")
264 }
265 if !(j != j1) {
266 throw("float64nan3")
267 }
268
269 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
270 if i == i {
271 throw("float32nan")
272 }
273 if i == i {
274 throw("float32nan1")
275 }
276
277 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
278 if i == i1 {
279 throw("float32nan2")
280 }
281 if i == i1 {
282 throw("float32nan3")
283 }
284
285 testAtomic64()
286
287 if fixedStack != round2(fixedStack) {
288 throw("FixedStack is not power-of-2")
289 }
290
291 if !checkASM() {
292 throw("assembly checks failed")
293 }
294 }
295
296 type dbgVar struct {
297 name string
298 value *int32
299 atomic *atomic.Int32
300 def int32
301 }
302
303
304
305
306
307 var debug struct {
308 cgocheck int32
309 clobberfree int32
310 containermaxprocs int32
311 decoratemappings int32
312 disablethp int32
313 dontfreezetheworld int32
314 efence int32
315 gccheckmark int32
316 gcpacertrace int32
317 gcshrinkstackoff int32
318 gcstoptheworld int32
319 gctrace int32
320 invalidptr int32
321 madvdontneed int32
322 scavtrace int32
323 scheddetail int32
324 schedtrace int32
325 tracebackancestors int32
326 updatemaxprocs int32
327 asyncpreemptoff int32
328 harddecommit int32
329 adaptivestackstart int32
330 tracefpunwindoff int32
331 traceadvanceperiod int32
332 traceCheckStackOwnership int32
333 profstackdepth int32
334 dataindependenttiming int32
335
336
337
338
339 malloc bool
340 inittrace int32
341 sbrk int32
342 checkfinalizers int32
343
344
345
346
347
348
349
350
351 traceallocfree atomic.Int32
352
353 panicnil atomic.Int32
354
355
356
357
358
359
360
361
362
363 asynctimerchan atomic.Int32
364
365
366
367 tracebacklabels atomic.Int32
368 }
369
370 var dbgvars = []*dbgVar{
371 {name: "adaptivestackstart", value: &debug.adaptivestackstart},
372 {name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
373 {name: "asynctimerchan", atomic: &debug.asynctimerchan},
374 {name: "cgocheck", value: &debug.cgocheck},
375 {name: "clobberfree", value: &debug.clobberfree},
376 {name: "containermaxprocs", value: &debug.containermaxprocs, def: 1},
377 {name: "dataindependenttiming", value: &debug.dataindependenttiming},
378 {name: "decoratemappings", value: &debug.decoratemappings, def: 1},
379 {name: "disablethp", value: &debug.disablethp},
380 {name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
381 {name: "checkfinalizers", value: &debug.checkfinalizers},
382 {name: "efence", value: &debug.efence},
383 {name: "gccheckmark", value: &debug.gccheckmark},
384 {name: "gcpacertrace", value: &debug.gcpacertrace},
385 {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
386 {name: "gcstoptheworld", value: &debug.gcstoptheworld},
387 {name: "gctrace", value: &debug.gctrace},
388 {name: "harddecommit", value: &debug.harddecommit},
389 {name: "inittrace", value: &debug.inittrace},
390 {name: "invalidptr", value: &debug.invalidptr},
391 {name: "madvdontneed", value: &debug.madvdontneed},
392 {name: "panicnil", atomic: &debug.panicnil},
393 {name: "profstackdepth", value: &debug.profstackdepth, def: 128},
394 {name: "sbrk", value: &debug.sbrk},
395 {name: "scavtrace", value: &debug.scavtrace},
396 {name: "scheddetail", value: &debug.scheddetail},
397 {name: "schedtrace", value: &debug.schedtrace},
398 {name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
399 {name: "traceallocfree", atomic: &debug.traceallocfree},
400 {name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership},
401 {name: "tracebackancestors", value: &debug.tracebackancestors},
402 {name: "tracebacklabels", atomic: &debug.tracebacklabels, def: 1},
403 {name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
404 {name: "updatemaxprocs", value: &debug.updatemaxprocs, def: 1},
405 }
406
407 func parseRuntimeDebugVars(godebug string) {
408
409 debug.cgocheck = 1
410 debug.invalidptr = 1
411 debug.adaptivestackstart = 1
412 if GOOS == "linux" {
413
414
415
416
417
418
419
420
421 debug.madvdontneed = 1
422 }
423 debug.traceadvanceperiod = defaultTraceAdvancePeriod
424
425
426 for _, v := range dbgvars {
427 if v.def != 0 {
428
429 if v.value != nil {
430 *v.value = v.def
431 } else if v.atomic != nil {
432 v.atomic.Store(v.def)
433 }
434 }
435 }
436
437 parsegodebug(godebugDefault, nil)
438
439
440 parsegodebug(godebug, nil)
441
442 debug.malloc = (debug.inittrace | debug.sbrk | debug.checkfinalizers) != 0
443 debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth)
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458 if debug.gccheckmark > 0 {
459 debug.asyncpreemptoff = 1
460 }
461 }
462
463 func finishDebugVarsSetup() {
464 p := new(string)
465 *p = gogetenv("GODEBUG")
466 godebugEnv.Store(p)
467
468 setTraceback(gogetenv("GOTRACEBACK"))
469 traceback_env = traceback_cache
470 }
471
472
473
474 func reparsedebugvars(env string) {
475 seen := make(map[string]bool)
476
477 parsegodebug(env, seen)
478
479 parsegodebug(godebugDefault, seen)
480
481 for _, v := range dbgvars {
482 if v.atomic != nil && !seen[v.name] {
483 v.atomic.Store(0)
484 }
485 }
486 }
487
488
489
490
491
492
493
494
495
496
497
498 func parsegodebug(godebug string, seen map[string]bool) {
499 for p := godebug; p != ""; {
500 var field string
501 if seen == nil {
502
503 i := bytealg.IndexByteString(p, ',')
504 if i < 0 {
505 field, p = p, ""
506 } else {
507 field, p = p[:i], p[i+1:]
508 }
509 } else {
510
511 i := len(p) - 1
512 for i >= 0 && p[i] != ',' {
513 i--
514 }
515 if i < 0 {
516 p, field = "", p
517 } else {
518 p, field = p[:i], p[i+1:]
519 }
520 }
521 i := bytealg.IndexByteString(field, '=')
522 if i < 0 {
523 continue
524 }
525 key, value := field[:i], field[i+1:]
526 if seen[key] {
527 continue
528 }
529 if seen != nil {
530 seen[key] = true
531 }
532
533
534
535
536 if seen == nil && key == "memprofilerate" {
537 if n, err := strconv.Atoi(value); err == nil {
538 MemProfileRate = n
539 }
540 } else {
541 for _, v := range dbgvars {
542 if v.name == key {
543 if n, err := strconv.ParseInt(value, 10, 32); err == nil {
544 if seen == nil && v.value != nil {
545 *v.value = int32(n)
546 } else if v.atomic != nil {
547 v.atomic.Store(int32(n))
548 }
549 }
550 }
551 }
552 }
553 }
554
555 if debug.cgocheck > 1 {
556 throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
557 }
558 }
559
560
561 func setTraceback(level string) {
562 var t uint32
563 switch level {
564 case "none":
565 t = 0
566 case "single", "":
567 t = 1 << tracebackShift
568 case "all":
569 t = 1<<tracebackShift | tracebackAll
570 case "system":
571 t = 2<<tracebackShift | tracebackAll
572 case "crash":
573 t = 2<<tracebackShift | tracebackAll | tracebackCrash
574 case "wer":
575 if GOOS == "windows" {
576 t = 2<<tracebackShift | tracebackAll | tracebackCrash
577 enableWER()
578 break
579 }
580 fallthrough
581 default:
582 t = tracebackAll
583 if n, err := strconv.Atoi(level); err == nil && n == int(uint32(n)) {
584 t |= uint32(n) << tracebackShift
585 }
586 }
587
588
589 if islibrary || isarchive {
590 t |= tracebackCrash
591 }
592
593 t |= traceback_env
594
595 atomic.Store(&traceback_cache, t)
596 }
597
598
599
600
601 func acquirem() *m {
602 gp := getg()
603 gp.m.locks++
604 return gp.m
605 }
606
607
608 func releasem(mp *m) {
609 gp := getg()
610 mp.locks--
611 if mp.locks == 0 && gp.preempt {
612
613 gp.stackguard0 = stackPreempt
614 }
615 }
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
636 modules := activeModules()
637
638 typesToOffsets := func(md *moduledata) []int32 {
639 types := moduleTypelinks(md)
640 ret := make([]int32, 0, len(types))
641 for _, typ := range types {
642 ret = append(ret, int32(uintptr(unsafe.Pointer(typ))-md.types))
643 }
644 return ret
645 }
646
647 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
648 ret := [][]int32{typesToOffsets(modules[0])}
649 for _, md := range modules[1:] {
650 sections = append(sections, unsafe.Pointer(md.types))
651 ret = append(ret, typesToOffsets(md))
652 }
653 return sections, ret
654 }
655
656
657
658
659
660
661
662
663 func reflect_compiledTypelinks() ([]*abi.Type, [][]*abi.Type) {
664 modules := activeModules()
665 firstTypes := moduleTypelinks(modules[0])
666 var rest [][]*abi.Type
667 for _, md := range modules[1:] {
668 rest = append(rest, moduleTypelinks(md))
669 }
670 return firstTypes, rest
671 }
672
673
674
675
676
677
678
679
680
681
682
683
684 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
685 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
686 }
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
703 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
704 }
705
706
707
708
709
710
711
712
713
714
715
716
717 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
718 return toRType((*_type)(rtype)).textOff(textOff(off))
719 }
720
721
722
723
724 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
725 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
726 }
727
728
729
730
731 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
732 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
733 }
734
735
736
737
738 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
739 reflectOffsLock()
740 if reflectOffs.m == nil {
741 reflectOffs.m = make(map[int32]unsafe.Pointer)
742 reflectOffs.minv = make(map[unsafe.Pointer]int32)
743 reflectOffs.next = -1
744 }
745 id, found := reflectOffs.minv[ptr]
746 if !found {
747 id = reflectOffs.next
748 reflectOffs.next--
749 reflectOffs.m[id] = ptr
750 reflectOffs.minv[ptr] = id
751 }
752 reflectOffsUnlock()
753 return id
754 }
755
756
757
758
759
760
761 func reflect_adjustAIXGCDataForRuntime(addr *byte) *byte {
762 return (*byte)(add(unsafe.Pointer(addr), aixStaticDataBase-firstmoduledata.data))
763 }
764
765
766 func fips_getIndicator() uint8 {
767 return getg().fipsIndicator
768 }
769
770
771 func fips_setIndicator(indicator uint8) {
772 getg().fipsIndicator = indicator
773 }
774
View as plain text