Source file src/runtime/iface.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"internal/runtime/atomic"
    11  	"internal/runtime/sys"
    12  	"unsafe"
    13  )
    14  
    15  const itabInitSize = 512
    16  
    17  var (
    18  	itabLock      mutex                               // lock for accessing itab table
    19  	itabTable     = &itabTableInit                    // pointer to current table
    20  	itabTableInit = itabTableType{size: itabInitSize} // starter table
    21  )
    22  
    23  // Note: change the formula in the mallocgc call in itabAdd if you change these fields.
    24  type itabTableType struct {
    25  	size    uintptr             // length of entries array. Always a power of 2.
    26  	count   uintptr             // current number of filled entries.
    27  	entries [itabInitSize]*itab // really [size] large
    28  }
    29  
    30  func itabHashFunc(inter *interfacetype, typ *_type) uintptr {
    31  	// compiler has provided some good hash codes for us.
    32  	return uintptr(inter.Type.Hash ^ typ.Hash)
    33  }
    34  
    35  // getitab should be an internal detail,
    36  // but widely used packages access it using linkname.
    37  // Notable members of the hall of shame include:
    38  //   - github.com/bytedance/sonic
    39  //
    40  // Do not remove or change the type signature.
    41  // See go.dev/issue/67401.
    42  //
    43  //go:linkname getitab
    44  func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
    45  	if len(inter.Methods) == 0 {
    46  		throw("internal error - misuse of itab")
    47  	}
    48  
    49  	// easy case
    50  	if typ.TFlag&abi.TFlagUncommon == 0 {
    51  		if canfail {
    52  			return nil
    53  		}
    54  		name := toRType(&inter.Type).nameOff(inter.Methods[0].Name)
    55  		panic(&TypeAssertionError{nil, typ, &inter.Type, name.Name()})
    56  	}
    57  
    58  	var m *itab
    59  
    60  	// First, look in the existing table to see if we can find the itab we need.
    61  	// This is by far the most common case, so do it without locks.
    62  	// Use atomic to ensure we see any previous writes done by the thread
    63  	// that updates the itabTable field (with atomic.Storep in itabAdd).
    64  	t := (*itabTableType)(atomic.Loadp(unsafe.Pointer(&itabTable)))
    65  	if m = t.find(inter, typ); m != nil {
    66  		goto finish
    67  	}
    68  
    69  	// Not found.  Grab the lock and try again.
    70  	lock(&itabLock)
    71  	if m = itabTable.find(inter, typ); m != nil {
    72  		unlock(&itabLock)
    73  		goto finish
    74  	}
    75  
    76  	// Entry doesn't exist yet. Make a new entry & add it.
    77  	m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
    78  	m.Inter = inter
    79  	m.Type = typ
    80  	// The hash is used in type switches. However, compiler statically generates itab's
    81  	// for all interface/type pairs used in switches (which are added to itabTable
    82  	// in itabsinit). The dynamically-generated itab's never participate in type switches,
    83  	// and thus the hash is irrelevant.
    84  	// Note: m.Hash is _not_ the hash used for the runtime itabTable hash table.
    85  	m.Hash = 0
    86  	itabInit(m, true)
    87  	itabAdd(m)
    88  	unlock(&itabLock)
    89  finish:
    90  	if m.Fun[0] != 0 {
    91  		return m
    92  	}
    93  	if canfail {
    94  		return nil
    95  	}
    96  	// this can only happen if the conversion
    97  	// was already done once using the , ok form
    98  	// and we have a cached negative result.
    99  	// The cached result doesn't record which
   100  	// interface function was missing, so initialize
   101  	// the itab again to get the missing function name.
   102  	panic(&TypeAssertionError{concrete: typ, asserted: &inter.Type, missingMethod: itabInit(m, false)})
   103  }
   104  
   105  // find finds the given interface/type pair in t.
   106  // Returns nil if the given interface/type pair isn't present.
   107  func (t *itabTableType) find(inter *interfacetype, typ *_type) *itab {
   108  	// Implemented using quadratic probing.
   109  	// Probe sequence is h(i) = h0 + i*(i+1)/2 mod 2^k.
   110  	// We're guaranteed to hit all table entries using this probe sequence.
   111  	mask := t.size - 1
   112  	h := itabHashFunc(inter, typ) & mask
   113  	for i := uintptr(1); ; i++ {
   114  		p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
   115  		// Use atomic read here so if we see m != nil, we also see
   116  		// the initializations of the fields of m.
   117  		// m := *p
   118  		m := (*itab)(atomic.Loadp(unsafe.Pointer(p)))
   119  		if m == nil {
   120  			return nil
   121  		}
   122  		if m.Inter == inter && m.Type == typ {
   123  			return m
   124  		}
   125  		h += i
   126  		h &= mask
   127  	}
   128  }
   129  
   130  // itabAdd adds the given itab to the itab hash table.
   131  // itabLock must be held.
   132  func itabAdd(m *itab) {
   133  	// Bugs can lead to calling this while mallocing is set,
   134  	// typically because this is called while panicking.
   135  	// Crash reliably, rather than only when we need to grow
   136  	// the hash table.
   137  	if getg().m.mallocing != 0 {
   138  		throw("malloc deadlock")
   139  	}
   140  
   141  	t := itabTable
   142  	if t.count >= 3*(t.size/4) { // 75% load factor
   143  		// Grow hash table.
   144  		// t2 = new(itabTableType) + some additional entries
   145  		// We lie and tell malloc we want pointer-free memory because
   146  		// all the pointed-to values are not in the heap.
   147  		t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true))
   148  		t2.size = t.size * 2
   149  
   150  		// Copy over entries.
   151  		// Note: while copying, other threads may look for an itab and
   152  		// fail to find it. That's ok, they will then try to get the itab lock
   153  		// and as a consequence wait until this copying is complete.
   154  		iterate_itabs(t2.add)
   155  		if t2.count != t.count {
   156  			throw("mismatched count during itab table copy")
   157  		}
   158  		// Publish new hash table. Use an atomic write: see comment in getitab.
   159  		atomicstorep(unsafe.Pointer(&itabTable), unsafe.Pointer(t2))
   160  		// Adopt the new table as our own.
   161  		t = itabTable
   162  		// Note: the old table can be GC'ed here.
   163  	}
   164  	t.add(m)
   165  }
   166  
   167  // add adds the given itab to itab table t.
   168  // itabLock must be held.
   169  func (t *itabTableType) add(m *itab) {
   170  	// See comment in find about the probe sequence.
   171  	// Insert new itab in the first empty spot in the probe sequence.
   172  	mask := t.size - 1
   173  	h := itabHashFunc(m.Inter, m.Type) & mask
   174  	for i := uintptr(1); ; i++ {
   175  		p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
   176  		m2 := *p
   177  		if m2 == m {
   178  			// A given itab may be used in more than one module
   179  			// and thanks to the way global symbol resolution works, the
   180  			// pointed-to itab may already have been inserted into the
   181  			// global 'hash'.
   182  			return
   183  		}
   184  		if m2 == nil {
   185  			// Use atomic write here so if a reader sees m, it also
   186  			// sees the correctly initialized fields of m.
   187  			// NoWB is ok because m is not in heap memory.
   188  			// *p = m
   189  			atomic.StorepNoWB(unsafe.Pointer(p), unsafe.Pointer(m))
   190  			t.count++
   191  			return
   192  		}
   193  		h += i
   194  		h &= mask
   195  	}
   196  }
   197  
   198  // itabInit fills in the m.Fun array with all the code pointers for
   199  // the m.Inter/m.Type pair. If the type does not implement the interface,
   200  // it sets m.Fun[0] to 0 and returns the name of an interface function that is missing.
   201  // If !firstTime, itabInit will not write anything to m.Fun (see issue 65962).
   202  // It is ok to call this multiple times on the same m, even concurrently
   203  // (although it will only be called once with firstTime==true).
   204  func itabInit(m *itab, firstTime bool) string {
   205  	inter := m.Inter
   206  	typ := m.Type
   207  	x := typ.Uncommon()
   208  
   209  	// both inter and typ have method sorted by name,
   210  	// and interface names are unique,
   211  	// so can iterate over both in lock step;
   212  	// the loop is O(ni+nt) not O(ni*nt).
   213  	ni := len(inter.Methods)
   214  	nt := int(x.Mcount)
   215  	xmhdr := unsafe.Slice((*abi.Method)(add(unsafe.Pointer(x), uintptr(x.Moff))), nt)
   216  	j := 0
   217  	methods := unsafe.Slice((*unsafe.Pointer)(unsafe.Pointer(&m.Fun[0])), ni)
   218  	var fun0 unsafe.Pointer
   219  imethods:
   220  	for k := 0; k < ni; k++ {
   221  		i := &inter.Methods[k]
   222  		itype := toRType(&inter.Type).typeOff(i.Typ)
   223  		name := toRType(&inter.Type).nameOff(i.Name)
   224  		iname := name.Name()
   225  		ipkg := pkgPath(name)
   226  		if ipkg == "" {
   227  			ipkg = inter.PkgPath.Name()
   228  		}
   229  		for ; j < nt; j++ {
   230  			t := &xmhdr[j]
   231  			rtyp := toRType(typ)
   232  			tname := rtyp.nameOff(t.Name)
   233  			if rtyp.typeOff(t.Mtyp) == itype && tname.Name() == iname {
   234  				pkgPath := pkgPath(tname)
   235  				if pkgPath == "" {
   236  					pkgPath = rtyp.nameOff(x.PkgPath).Name()
   237  				}
   238  				if tname.IsExported() || pkgPath == ipkg {
   239  					ifn := rtyp.textOff(t.Ifn)
   240  					if k == 0 {
   241  						fun0 = ifn // we'll set m.Fun[0] at the end
   242  					} else if firstTime {
   243  						methods[k] = ifn
   244  					}
   245  					continue imethods
   246  				}
   247  			}
   248  		}
   249  		// didn't find method
   250  		// Leaves m.Fun[0] set to 0.
   251  		return iname
   252  	}
   253  	if firstTime {
   254  		m.Fun[0] = uintptr(fun0)
   255  	}
   256  	return ""
   257  }
   258  
   259  func itabsinit() {
   260  	lockInit(&itabLock, lockRankItab)
   261  	lock(&itabLock)
   262  	for _, md := range activeModules() {
   263  		addModuleItabs(md)
   264  	}
   265  	unlock(&itabLock)
   266  }
   267  
   268  // addModuleItabs adds the pre-compiled itabs from md to the itab hash table.
   269  // This is an optimization to let us skip creating itabs we already have.
   270  func addModuleItabs(md *moduledata) {
   271  	p := md.types + md.itaboffset
   272  	end := p + md.itabsize
   273  	for p < end {
   274  		itab := (*itab)(unsafe.Pointer(p))
   275  		itabAdd(itab)
   276  		p += uintptr(itab.Size())
   277  	}
   278  }
   279  
   280  // panicdottypeE is called when doing an e.(T) conversion and the conversion fails.
   281  // have = the dynamic type we have.
   282  // want = the static type we're trying to convert to.
   283  // iface = the static type we're converting from.
   284  func panicdottypeE(have, want, iface *_type) {
   285  	panic(&TypeAssertionError{iface, have, want, ""})
   286  }
   287  
   288  // panicdottypeI is called when doing an i.(T) conversion and the conversion fails.
   289  // Same args as panicdottypeE, but "have" is the dynamic itab we have.
   290  func panicdottypeI(have *itab, want, iface *_type) {
   291  	var t *_type
   292  	if have != nil {
   293  		t = have.Type
   294  	}
   295  	panicdottypeE(t, want, iface)
   296  }
   297  
   298  // panicnildottype is called when doing an i.(T) conversion and the interface i is nil.
   299  // want = the static type we're trying to convert to.
   300  func panicnildottype(want *_type) {
   301  	panic(&TypeAssertionError{nil, nil, want, ""})
   302  	// TODO: Add the static type we're converting from as well.
   303  	// It might generate a better error message.
   304  	// Just to match other nil conversion errors, we don't for now.
   305  }
   306  
   307  // The specialized convTx routines need a type descriptor to use when calling mallocgc.
   308  // We don't need the type to be exact, just to have the correct size, alignment, and pointer-ness.
   309  // However, when debugging, it'd be nice to have some indication in mallocgc where the types came from,
   310  // so we use named types here.
   311  // We then construct interface values of these types,
   312  // and then extract the type word to use as needed.
   313  type (
   314  	uint16InterfacePtr uint16
   315  	uint32InterfacePtr uint32
   316  	uint64InterfacePtr uint64
   317  	stringInterfacePtr string
   318  	sliceInterfacePtr  []byte
   319  )
   320  
   321  var (
   322  	uint16Eface any = uint16InterfacePtr(0)
   323  	uint32Eface any = uint32InterfacePtr(0)
   324  	uint64Eface any = uint64InterfacePtr(0)
   325  	stringEface any = stringInterfacePtr("")
   326  	sliceEface  any = sliceInterfacePtr(nil)
   327  
   328  	uint16Type *_type = efaceOf(&uint16Eface)._type
   329  	uint32Type *_type = efaceOf(&uint32Eface)._type
   330  	uint64Type *_type = efaceOf(&uint64Eface)._type
   331  	stringType *_type = efaceOf(&stringEface)._type
   332  	sliceType  *_type = efaceOf(&sliceEface)._type
   333  )
   334  
   335  // The conv and assert functions below do very similar things.
   336  // The convXXX functions are guaranteed by the compiler to succeed.
   337  // The assertXXX functions may fail (either panicking or returning false,
   338  // depending on whether they are 1-result or 2-result).
   339  // The convXXX functions succeed on a nil input, whereas the assertXXX
   340  // functions fail on a nil input.
   341  
   342  // convT converts a value of type t, which is pointed to by v, to a pointer that can
   343  // be used as the second word of an interface value.
   344  func convT(t *_type, v unsafe.Pointer) unsafe.Pointer {
   345  	if raceenabled {
   346  		raceReadObjectPC(t, v, sys.GetCallerPC(), abi.FuncPCABIInternal(convT))
   347  	}
   348  	if msanenabled {
   349  		msanread(v, t.Size_)
   350  	}
   351  	if asanenabled {
   352  		asanread(v, t.Size_)
   353  	}
   354  	x := mallocgc(t.Size_, t, true)
   355  	typedmemmove(t, x, v)
   356  	return x
   357  }
   358  func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer {
   359  	// TODO: maybe take size instead of type?
   360  	if raceenabled {
   361  		raceReadObjectPC(t, v, sys.GetCallerPC(), abi.FuncPCABIInternal(convTnoptr))
   362  	}
   363  	if msanenabled {
   364  		msanread(v, t.Size_)
   365  	}
   366  	if asanenabled {
   367  		asanread(v, t.Size_)
   368  	}
   369  
   370  	x := mallocgc(t.Size_, t, false)
   371  	memmove(x, v, t.Size_)
   372  	return x
   373  }
   374  
   375  func convT16(val uint16) (x unsafe.Pointer) {
   376  	if val < uint16(len(staticuint64s)) {
   377  		x = unsafe.Pointer(&staticuint64s[val])
   378  		if goarch.BigEndian {
   379  			x = add(x, 6)
   380  		}
   381  	} else {
   382  		x = mallocgc(2, uint16Type, false)
   383  		*(*uint16)(x) = val
   384  	}
   385  	return
   386  }
   387  
   388  func convT32(val uint32) (x unsafe.Pointer) {
   389  	if val < uint32(len(staticuint64s)) {
   390  		x = unsafe.Pointer(&staticuint64s[val])
   391  		if goarch.BigEndian {
   392  			x = add(x, 4)
   393  		}
   394  	} else {
   395  		x = mallocgc(4, uint32Type, false)
   396  		*(*uint32)(x) = val
   397  	}
   398  	return
   399  }
   400  
   401  // convT64 should be an internal detail,
   402  // but widely used packages access it using linkname.
   403  // Notable members of the hall of shame include:
   404  //   - github.com/bytedance/sonic
   405  //
   406  // Do not remove or change the type signature.
   407  // See go.dev/issue/67401.
   408  //
   409  //go:linkname convT64
   410  func convT64(val uint64) (x unsafe.Pointer) {
   411  	if val < uint64(len(staticuint64s)) {
   412  		x = unsafe.Pointer(&staticuint64s[val])
   413  	} else {
   414  		x = mallocgc(8, uint64Type, false)
   415  		*(*uint64)(x) = val
   416  	}
   417  	return
   418  }
   419  
   420  // convTstring should be an internal detail,
   421  // but widely used packages access it using linkname.
   422  // Notable members of the hall of shame include:
   423  //   - github.com/bytedance/sonic
   424  //
   425  // Do not remove or change the type signature.
   426  // See go.dev/issue/67401.
   427  //
   428  //go:linkname convTstring
   429  func convTstring(val string) (x unsafe.Pointer) {
   430  	if val == "" {
   431  		x = unsafe.Pointer(&zeroVal[0])
   432  	} else {
   433  		x = mallocgc(unsafe.Sizeof(val), stringType, true)
   434  		*(*string)(x) = val
   435  	}
   436  	return
   437  }
   438  
   439  // convTslice should be an internal detail,
   440  // but widely used packages access it using linkname.
   441  // Notable members of the hall of shame include:
   442  //   - github.com/bytedance/sonic
   443  //
   444  // Do not remove or change the type signature.
   445  // See go.dev/issue/67401.
   446  //
   447  //go:linkname convTslice
   448  func convTslice(val []byte) (x unsafe.Pointer) {
   449  	// Note: this must work for any element type, not just byte.
   450  	if (*slice)(unsafe.Pointer(&val)).array == nil {
   451  		x = unsafe.Pointer(&zeroVal[0])
   452  	} else {
   453  		x = mallocgc(unsafe.Sizeof(val), sliceType, true)
   454  		*(*[]byte)(x) = val
   455  	}
   456  	return
   457  }
   458  
   459  func assertE2I(inter *interfacetype, t *_type) *itab {
   460  	if t == nil {
   461  		// explicit conversions require non-nil interface value.
   462  		panic(&TypeAssertionError{nil, nil, &inter.Type, ""})
   463  	}
   464  	return getitab(inter, t, false)
   465  }
   466  
   467  func assertE2I2(inter *interfacetype, t *_type) *itab {
   468  	if t == nil {
   469  		return nil
   470  	}
   471  	return getitab(inter, t, true)
   472  }
   473  
   474  // typeAssert builds an itab for the concrete type t and the
   475  // interface type s.Inter. If the conversion is not possible it
   476  // panics if s.CanFail is false and returns nil if s.CanFail is true.
   477  func typeAssert(s *abi.TypeAssert, t *_type) *itab {
   478  	var tab *itab
   479  	if t == nil {
   480  		if !s.CanFail {
   481  			panic(&TypeAssertionError{nil, nil, &s.Inter.Type, ""})
   482  		}
   483  	} else {
   484  		tab = getitab(s.Inter, t, s.CanFail)
   485  	}
   486  
   487  	if !abi.UseInterfaceSwitchCache(goarch.ArchFamily) {
   488  		return tab
   489  	}
   490  
   491  	// Maybe update the cache, so the next time the generated code
   492  	// doesn't need to call into the runtime.
   493  	if cheaprand()&1023 != 0 {
   494  		// Only bother updating the cache ~1 in 1000 times.
   495  		return tab
   496  	}
   497  	// Load the current cache.
   498  	oldC := (*abi.TypeAssertCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
   499  
   500  	if cheaprand()&uint32(oldC.Mask) != 0 {
   501  		// As cache gets larger, choose to update it less often
   502  		// so we can amortize the cost of building a new cache.
   503  		return tab
   504  	}
   505  
   506  	// Make a new cache.
   507  	newC := buildTypeAssertCache(oldC, t, tab)
   508  
   509  	// Update cache. Use compare-and-swap so if multiple threads
   510  	// are fighting to update the cache, at least one of their
   511  	// updates will stick.
   512  	atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
   513  
   514  	return tab
   515  }
   516  
   517  func buildTypeAssertCache(oldC *abi.TypeAssertCache, typ *_type, tab *itab) *abi.TypeAssertCache {
   518  	oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
   519  
   520  	// Count the number of entries we need.
   521  	n := 1
   522  	for _, e := range oldEntries {
   523  		if e.Typ != 0 {
   524  			n++
   525  		}
   526  	}
   527  
   528  	// Figure out how big a table we need.
   529  	// We need at least one more slot than the number of entries
   530  	// so that we are guaranteed an empty slot (for termination).
   531  	newN := n * 2                         // make it at most 50% full
   532  	newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2
   533  
   534  	// Allocate the new table.
   535  	newSize := unsafe.Sizeof(abi.TypeAssertCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.TypeAssertCacheEntry{})
   536  	newC := (*abi.TypeAssertCache)(mallocgc(newSize, nil, true))
   537  	newC.Mask = uintptr(newN - 1)
   538  	newEntries := unsafe.Slice(&newC.Entries[0], newN)
   539  
   540  	// Fill the new table.
   541  	addEntry := func(typ *_type, tab *itab) {
   542  		h := int(typ.Hash) & (newN - 1)
   543  		for {
   544  			if newEntries[h].Typ == 0 {
   545  				newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
   546  				newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
   547  				return
   548  			}
   549  			h = (h + 1) & (newN - 1)
   550  		}
   551  	}
   552  	for _, e := range oldEntries {
   553  		if e.Typ != 0 {
   554  			addEntry((*_type)(unsafe.Pointer(e.Typ)), (*itab)(unsafe.Pointer(e.Itab)))
   555  		}
   556  	}
   557  	addEntry(typ, tab)
   558  
   559  	return newC
   560  }
   561  
   562  // Empty type assert cache. Contains one entry with a nil Typ (which
   563  // causes a cache lookup to fail immediately.)
   564  var emptyTypeAssertCache = abi.TypeAssertCache{Mask: 0}
   565  
   566  // interfaceSwitch compares t against the list of cases in s.
   567  // If t matches case i, interfaceSwitch returns the case index i and
   568  // an itab for the pair <t, s.Cases[i]>.
   569  // If there is no match, return N,nil, where N is the number
   570  // of cases.
   571  func interfaceSwitch(s *abi.InterfaceSwitch, t *_type) (int, *itab) {
   572  	cases := unsafe.Slice(&s.Cases[0], s.NCases)
   573  
   574  	// Results if we don't find a match.
   575  	case_ := len(cases)
   576  	var tab *itab
   577  
   578  	// Look through each case in order.
   579  	for i, c := range cases {
   580  		tab = getitab(c, t, true)
   581  		if tab != nil {
   582  			case_ = i
   583  			break
   584  		}
   585  	}
   586  
   587  	if !abi.UseInterfaceSwitchCache(goarch.ArchFamily) {
   588  		return case_, tab
   589  	}
   590  
   591  	// Maybe update the cache, so the next time the generated code
   592  	// doesn't need to call into the runtime.
   593  	if cheaprand()&1023 != 0 {
   594  		// Only bother updating the cache ~1 in 1000 times.
   595  		// This ensures we don't waste memory on switches, or
   596  		// switch arguments, that only happen a few times.
   597  		return case_, tab
   598  	}
   599  	// Load the current cache.
   600  	oldC := (*abi.InterfaceSwitchCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
   601  
   602  	if cheaprand()&uint32(oldC.Mask) != 0 {
   603  		// As cache gets larger, choose to update it less often
   604  		// so we can amortize the cost of building a new cache
   605  		// (that cost is linear in oldc.Mask).
   606  		return case_, tab
   607  	}
   608  
   609  	// Make a new cache.
   610  	newC := buildInterfaceSwitchCache(oldC, t, case_, tab)
   611  
   612  	// Update cache. Use compare-and-swap so if multiple threads
   613  	// are fighting to update the cache, at least one of their
   614  	// updates will stick.
   615  	atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
   616  
   617  	return case_, tab
   618  }
   619  
   620  // buildInterfaceSwitchCache constructs an interface switch cache
   621  // containing all the entries from oldC plus the new entry
   622  // (typ,case_,tab).
   623  func buildInterfaceSwitchCache(oldC *abi.InterfaceSwitchCache, typ *_type, case_ int, tab *itab) *abi.InterfaceSwitchCache {
   624  	oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
   625  
   626  	// Count the number of entries we need.
   627  	n := 1
   628  	for _, e := range oldEntries {
   629  		if e.Typ != 0 {
   630  			n++
   631  		}
   632  	}
   633  
   634  	// Figure out how big a table we need.
   635  	// We need at least one more slot than the number of entries
   636  	// so that we are guaranteed an empty slot (for termination).
   637  	newN := n * 2                         // make it at most 50% full
   638  	newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2
   639  
   640  	// Allocate the new table.
   641  	newSize := unsafe.Sizeof(abi.InterfaceSwitchCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.InterfaceSwitchCacheEntry{})
   642  	newC := (*abi.InterfaceSwitchCache)(mallocgc(newSize, nil, true))
   643  	newC.Mask = uintptr(newN - 1)
   644  	newEntries := unsafe.Slice(&newC.Entries[0], newN)
   645  
   646  	// Fill the new table.
   647  	addEntry := func(typ *_type, case_ int, tab *itab) {
   648  		h := int(typ.Hash) & (newN - 1)
   649  		for {
   650  			if newEntries[h].Typ == 0 {
   651  				newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
   652  				newEntries[h].Case = case_
   653  				newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
   654  				return
   655  			}
   656  			h = (h + 1) & (newN - 1)
   657  		}
   658  	}
   659  	for _, e := range oldEntries {
   660  		if e.Typ != 0 {
   661  			addEntry((*_type)(unsafe.Pointer(e.Typ)), e.Case, (*itab)(unsafe.Pointer(e.Itab)))
   662  		}
   663  	}
   664  	addEntry(typ, case_, tab)
   665  
   666  	return newC
   667  }
   668  
   669  // Empty interface switch cache. Contains one entry with a nil Typ (which
   670  // causes a cache lookup to fail immediately.)
   671  var emptyInterfaceSwitchCache = abi.InterfaceSwitchCache{Mask: 0}
   672  
   673  // reflect_ifaceE2I is for package reflect,
   674  // but widely used packages access it using linkname.
   675  // Notable members of the hall of shame include:
   676  //   - gitee.com/quant1x/gox
   677  //   - github.com/modern-go/reflect2
   678  //   - github.com/v2pro/plz
   679  //
   680  // Do not remove or change the type signature.
   681  //
   682  //go:linkname reflect_ifaceE2I reflect.ifaceE2I
   683  func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
   684  	*dst = iface{assertE2I(inter, e._type), e.data}
   685  }
   686  
   687  //go:linkname reflectlite_ifaceE2I internal/reflectlite.ifaceE2I
   688  func reflectlite_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
   689  	*dst = iface{assertE2I(inter, e._type), e.data}
   690  }
   691  
   692  func iterate_itabs(fn func(*itab)) {
   693  	// Note: only runs during stop the world or with itabLock held,
   694  	// so no other locks/atomics needed.
   695  	t := itabTable
   696  	for i := uintptr(0); i < t.size; i++ {
   697  		m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize))
   698  		if m != nil {
   699  			fn(m)
   700  		}
   701  	}
   702  }
   703  
   704  // staticuint64s is used to avoid allocating in convTx for small integer values.
   705  // staticuint64s[0] == 0, staticuint64s[1] == 1, and so forth.
   706  // It is defined in assembler code so that it is read-only.
   707  var staticuint64s [256]uint64
   708  
   709  // getStaticuint64s is called by the reflect package to get a pointer
   710  // to the read-only array.
   711  //
   712  //go:linkname getStaticuint64s
   713  func getStaticuint64s() *[256]uint64 {
   714  	return &staticuint64s
   715  }
   716  
   717  // The linker redirects a reference of a method that it determined
   718  // unreachable to a reference to this function, so it will throw if
   719  // ever called.
   720  func unreachableMethod() {
   721  	throw("unreachable method called. linker bug?")
   722  }
   723  

View as plain text