Source file
src/runtime/iface.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/goarch"
10 "internal/runtime/atomic"
11 "internal/runtime/sys"
12 "unsafe"
13 )
14
15 const itabInitSize = 512
16
17 var (
18 itabLock mutex
19 itabTable = &itabTableInit
20 itabTableInit = itabTableType{size: itabInitSize}
21 )
22
23
24 type itabTableType struct {
25 size uintptr
26 count uintptr
27 entries [itabInitSize]*itab
28 }
29
30 func itabHashFunc(inter *interfacetype, typ *_type) uintptr {
31
32 return uintptr(inter.Type.Hash ^ typ.Hash)
33 }
34
35
36
37
38
39
40
41
42
43
44 func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
45 if len(inter.Methods) == 0 {
46 throw("internal error - misuse of itab")
47 }
48
49
50 if typ.TFlag&abi.TFlagUncommon == 0 {
51 if canfail {
52 return nil
53 }
54 name := toRType(&inter.Type).nameOff(inter.Methods[0].Name)
55 panic(&TypeAssertionError{nil, typ, &inter.Type, name.Name()})
56 }
57
58 var m *itab
59
60
61
62
63
64 t := (*itabTableType)(atomic.Loadp(unsafe.Pointer(&itabTable)))
65 if m = t.find(inter, typ); m != nil {
66 goto finish
67 }
68
69
70 lock(&itabLock)
71 if m = itabTable.find(inter, typ); m != nil {
72 unlock(&itabLock)
73 goto finish
74 }
75
76
77 m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
78 m.Inter = inter
79 m.Type = typ
80
81
82
83
84
85 m.Hash = 0
86 itabInit(m, true)
87 itabAdd(m)
88 unlock(&itabLock)
89 finish:
90 if m.Fun[0] != 0 {
91 return m
92 }
93 if canfail {
94 return nil
95 }
96
97
98
99
100
101
102 panic(&TypeAssertionError{concrete: typ, asserted: &inter.Type, missingMethod: itabInit(m, false)})
103 }
104
105
106
107 func (t *itabTableType) find(inter *interfacetype, typ *_type) *itab {
108
109
110
111 mask := t.size - 1
112 h := itabHashFunc(inter, typ) & mask
113 for i := uintptr(1); ; i++ {
114 p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
115
116
117
118 m := (*itab)(atomic.Loadp(unsafe.Pointer(p)))
119 if m == nil {
120 return nil
121 }
122 if m.Inter == inter && m.Type == typ {
123 return m
124 }
125 h += i
126 h &= mask
127 }
128 }
129
130
131
132 func itabAdd(m *itab) {
133
134
135
136
137 if getg().m.mallocing != 0 {
138 throw("malloc deadlock")
139 }
140
141 t := itabTable
142 if t.count >= 3*(t.size/4) {
143
144
145
146
147 t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true))
148 t2.size = t.size * 2
149
150
151
152
153
154 iterate_itabs(t2.add)
155 if t2.count != t.count {
156 throw("mismatched count during itab table copy")
157 }
158
159 atomicstorep(unsafe.Pointer(&itabTable), unsafe.Pointer(t2))
160
161 t = itabTable
162
163 }
164 t.add(m)
165 }
166
167
168
169 func (t *itabTableType) add(m *itab) {
170
171
172 mask := t.size - 1
173 h := itabHashFunc(m.Inter, m.Type) & mask
174 for i := uintptr(1); ; i++ {
175 p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
176 m2 := *p
177 if m2 == m {
178
179
180
181
182 return
183 }
184 if m2 == nil {
185
186
187
188
189 atomic.StorepNoWB(unsafe.Pointer(p), unsafe.Pointer(m))
190 t.count++
191 return
192 }
193 h += i
194 h &= mask
195 }
196 }
197
198
199
200
201
202
203
204 func itabInit(m *itab, firstTime bool) string {
205 inter := m.Inter
206 typ := m.Type
207 x := typ.Uncommon()
208
209
210
211
212
213 ni := len(inter.Methods)
214 nt := int(x.Mcount)
215 xmhdr := unsafe.Slice((*abi.Method)(add(unsafe.Pointer(x), uintptr(x.Moff))), nt)
216 j := 0
217 methods := unsafe.Slice((*unsafe.Pointer)(unsafe.Pointer(&m.Fun[0])), ni)
218 var fun0 unsafe.Pointer
219 imethods:
220 for k := 0; k < ni; k++ {
221 i := &inter.Methods[k]
222 itype := toRType(&inter.Type).typeOff(i.Typ)
223 name := toRType(&inter.Type).nameOff(i.Name)
224 iname := name.Name()
225 ipkg := pkgPath(name)
226 if ipkg == "" {
227 ipkg = inter.PkgPath.Name()
228 }
229 for ; j < nt; j++ {
230 t := &xmhdr[j]
231 rtyp := toRType(typ)
232 tname := rtyp.nameOff(t.Name)
233 if rtyp.typeOff(t.Mtyp) == itype && tname.Name() == iname {
234 pkgPath := pkgPath(tname)
235 if pkgPath == "" {
236 pkgPath = rtyp.nameOff(x.PkgPath).Name()
237 }
238 if tname.IsExported() || pkgPath == ipkg {
239 ifn := rtyp.textOff(t.Ifn)
240 if k == 0 {
241 fun0 = ifn
242 } else if firstTime {
243 methods[k] = ifn
244 }
245 continue imethods
246 }
247 }
248 }
249
250
251 return iname
252 }
253 if firstTime {
254 m.Fun[0] = uintptr(fun0)
255 }
256 return ""
257 }
258
259 func itabsinit() {
260 lockInit(&itabLock, lockRankItab)
261 lock(&itabLock)
262 for _, md := range activeModules() {
263 addModuleItabs(md)
264 }
265 unlock(&itabLock)
266 }
267
268
269
270 func addModuleItabs(md *moduledata) {
271 p := md.types + md.itaboffset
272 end := p + md.itabsize
273 for p < end {
274 itab := (*itab)(unsafe.Pointer(p))
275 itabAdd(itab)
276 p += uintptr(itab.Size())
277 }
278 }
279
280
281
282
283
284 func panicdottypeE(have, want, iface *_type) {
285 panic(&TypeAssertionError{iface, have, want, ""})
286 }
287
288
289
290 func panicdottypeI(have *itab, want, iface *_type) {
291 var t *_type
292 if have != nil {
293 t = have.Type
294 }
295 panicdottypeE(t, want, iface)
296 }
297
298
299
300 func panicnildottype(want *_type) {
301 panic(&TypeAssertionError{nil, nil, want, ""})
302
303
304
305 }
306
307
308
309
310
311
312
313 type (
314 uint16InterfacePtr uint16
315 uint32InterfacePtr uint32
316 uint64InterfacePtr uint64
317 stringInterfacePtr string
318 sliceInterfacePtr []byte
319 )
320
321 var (
322 uint16Eface any = uint16InterfacePtr(0)
323 uint32Eface any = uint32InterfacePtr(0)
324 uint64Eface any = uint64InterfacePtr(0)
325 stringEface any = stringInterfacePtr("")
326 sliceEface any = sliceInterfacePtr(nil)
327
328 uint16Type *_type = efaceOf(&uint16Eface)._type
329 uint32Type *_type = efaceOf(&uint32Eface)._type
330 uint64Type *_type = efaceOf(&uint64Eface)._type
331 stringType *_type = efaceOf(&stringEface)._type
332 sliceType *_type = efaceOf(&sliceEface)._type
333 )
334
335
336
337
338
339
340
341
342
343
344 func convT(t *_type, v unsafe.Pointer) unsafe.Pointer {
345 if raceenabled {
346 raceReadObjectPC(t, v, sys.GetCallerPC(), abi.FuncPCABIInternal(convT))
347 }
348 if msanenabled {
349 msanread(v, t.Size_)
350 }
351 if asanenabled {
352 asanread(v, t.Size_)
353 }
354 x := mallocgc(t.Size_, t, true)
355 typedmemmove(t, x, v)
356 return x
357 }
358 func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer {
359
360 if raceenabled {
361 raceReadObjectPC(t, v, sys.GetCallerPC(), abi.FuncPCABIInternal(convTnoptr))
362 }
363 if msanenabled {
364 msanread(v, t.Size_)
365 }
366 if asanenabled {
367 asanread(v, t.Size_)
368 }
369
370 x := mallocgc(t.Size_, t, false)
371 memmove(x, v, t.Size_)
372 return x
373 }
374
375 func convT16(val uint16) (x unsafe.Pointer) {
376 if val < uint16(len(staticuint64s)) {
377 x = unsafe.Pointer(&staticuint64s[val])
378 if goarch.BigEndian {
379 x = add(x, 6)
380 }
381 } else {
382 x = mallocgc(2, uint16Type, false)
383 *(*uint16)(x) = val
384 }
385 return
386 }
387
388 func convT32(val uint32) (x unsafe.Pointer) {
389 if val < uint32(len(staticuint64s)) {
390 x = unsafe.Pointer(&staticuint64s[val])
391 if goarch.BigEndian {
392 x = add(x, 4)
393 }
394 } else {
395 x = mallocgc(4, uint32Type, false)
396 *(*uint32)(x) = val
397 }
398 return
399 }
400
401
402
403
404
405
406
407
408
409
410 func convT64(val uint64) (x unsafe.Pointer) {
411 if val < uint64(len(staticuint64s)) {
412 x = unsafe.Pointer(&staticuint64s[val])
413 } else {
414 x = mallocgc(8, uint64Type, false)
415 *(*uint64)(x) = val
416 }
417 return
418 }
419
420
421
422
423
424
425
426
427
428
429 func convTstring(val string) (x unsafe.Pointer) {
430 if val == "" {
431 x = unsafe.Pointer(&zeroVal[0])
432 } else {
433 x = mallocgc(unsafe.Sizeof(val), stringType, true)
434 *(*string)(x) = val
435 }
436 return
437 }
438
439
440
441
442
443
444
445
446
447
448 func convTslice(val []byte) (x unsafe.Pointer) {
449
450 if (*slice)(unsafe.Pointer(&val)).array == nil {
451 x = unsafe.Pointer(&zeroVal[0])
452 } else {
453 x = mallocgc(unsafe.Sizeof(val), sliceType, true)
454 *(*[]byte)(x) = val
455 }
456 return
457 }
458
459 func assertE2I(inter *interfacetype, t *_type) *itab {
460 if t == nil {
461
462 panic(&TypeAssertionError{nil, nil, &inter.Type, ""})
463 }
464 return getitab(inter, t, false)
465 }
466
467 func assertE2I2(inter *interfacetype, t *_type) *itab {
468 if t == nil {
469 return nil
470 }
471 return getitab(inter, t, true)
472 }
473
474
475
476
477 func typeAssert(s *abi.TypeAssert, t *_type) *itab {
478 var tab *itab
479 if t == nil {
480 if !s.CanFail {
481 panic(&TypeAssertionError{nil, nil, &s.Inter.Type, ""})
482 }
483 } else {
484 tab = getitab(s.Inter, t, s.CanFail)
485 }
486
487 if !abi.UseInterfaceSwitchCache(goarch.ArchFamily) {
488 return tab
489 }
490
491
492
493 if cheaprand()&1023 != 0 {
494
495 return tab
496 }
497
498 oldC := (*abi.TypeAssertCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
499
500 if cheaprand()&uint32(oldC.Mask) != 0 {
501
502
503 return tab
504 }
505
506
507 newC := buildTypeAssertCache(oldC, t, tab)
508
509
510
511
512 atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
513
514 return tab
515 }
516
517 func buildTypeAssertCache(oldC *abi.TypeAssertCache, typ *_type, tab *itab) *abi.TypeAssertCache {
518 oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
519
520
521 n := 1
522 for _, e := range oldEntries {
523 if e.Typ != 0 {
524 n++
525 }
526 }
527
528
529
530
531 newN := n * 2
532 newN = 1 << sys.Len64(uint64(newN-1))
533
534
535 newSize := unsafe.Sizeof(abi.TypeAssertCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.TypeAssertCacheEntry{})
536 newC := (*abi.TypeAssertCache)(mallocgc(newSize, nil, true))
537 newC.Mask = uintptr(newN - 1)
538 newEntries := unsafe.Slice(&newC.Entries[0], newN)
539
540
541 addEntry := func(typ *_type, tab *itab) {
542 h := int(typ.Hash) & (newN - 1)
543 for {
544 if newEntries[h].Typ == 0 {
545 newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
546 newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
547 return
548 }
549 h = (h + 1) & (newN - 1)
550 }
551 }
552 for _, e := range oldEntries {
553 if e.Typ != 0 {
554 addEntry((*_type)(unsafe.Pointer(e.Typ)), (*itab)(unsafe.Pointer(e.Itab)))
555 }
556 }
557 addEntry(typ, tab)
558
559 return newC
560 }
561
562
563
564 var emptyTypeAssertCache = abi.TypeAssertCache{Mask: 0}
565
566
567
568
569
570
571 func interfaceSwitch(s *abi.InterfaceSwitch, t *_type) (int, *itab) {
572 cases := unsafe.Slice(&s.Cases[0], s.NCases)
573
574
575 case_ := len(cases)
576 var tab *itab
577
578
579 for i, c := range cases {
580 tab = getitab(c, t, true)
581 if tab != nil {
582 case_ = i
583 break
584 }
585 }
586
587 if !abi.UseInterfaceSwitchCache(goarch.ArchFamily) {
588 return case_, tab
589 }
590
591
592
593 if cheaprand()&1023 != 0 {
594
595
596
597 return case_, tab
598 }
599
600 oldC := (*abi.InterfaceSwitchCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
601
602 if cheaprand()&uint32(oldC.Mask) != 0 {
603
604
605
606 return case_, tab
607 }
608
609
610 newC := buildInterfaceSwitchCache(oldC, t, case_, tab)
611
612
613
614
615 atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
616
617 return case_, tab
618 }
619
620
621
622
623 func buildInterfaceSwitchCache(oldC *abi.InterfaceSwitchCache, typ *_type, case_ int, tab *itab) *abi.InterfaceSwitchCache {
624 oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
625
626
627 n := 1
628 for _, e := range oldEntries {
629 if e.Typ != 0 {
630 n++
631 }
632 }
633
634
635
636
637 newN := n * 2
638 newN = 1 << sys.Len64(uint64(newN-1))
639
640
641 newSize := unsafe.Sizeof(abi.InterfaceSwitchCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.InterfaceSwitchCacheEntry{})
642 newC := (*abi.InterfaceSwitchCache)(mallocgc(newSize, nil, true))
643 newC.Mask = uintptr(newN - 1)
644 newEntries := unsafe.Slice(&newC.Entries[0], newN)
645
646
647 addEntry := func(typ *_type, case_ int, tab *itab) {
648 h := int(typ.Hash) & (newN - 1)
649 for {
650 if newEntries[h].Typ == 0 {
651 newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
652 newEntries[h].Case = case_
653 newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
654 return
655 }
656 h = (h + 1) & (newN - 1)
657 }
658 }
659 for _, e := range oldEntries {
660 if e.Typ != 0 {
661 addEntry((*_type)(unsafe.Pointer(e.Typ)), e.Case, (*itab)(unsafe.Pointer(e.Itab)))
662 }
663 }
664 addEntry(typ, case_, tab)
665
666 return newC
667 }
668
669
670
671 var emptyInterfaceSwitchCache = abi.InterfaceSwitchCache{Mask: 0}
672
673
674
675
676
677
678
679
680
681
682
683 func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
684 *dst = iface{assertE2I(inter, e._type), e.data}
685 }
686
687
688 func reflectlite_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
689 *dst = iface{assertE2I(inter, e._type), e.data}
690 }
691
692 func iterate_itabs(fn func(*itab)) {
693
694
695 t := itabTable
696 for i := uintptr(0); i < t.size; i++ {
697 m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize))
698 if m != nil {
699 fn(m)
700 }
701 }
702 }
703
704
705
706
707 var staticuint64s [256]uint64
708
709
710
711
712
713 func getStaticuint64s() *[256]uint64 {
714 return &staticuint64s
715 }
716
717
718
719
720 func unreachableMethod() {
721 throw("unreachable method called. linker bug?")
722 }
723
View as plain text