Source file src/runtime/type.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Runtime type representation.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goarch"
    12  	"internal/runtime/atomic"
    13  	"unsafe"
    14  )
    15  
    16  //go:linkname maps_typeString internal/runtime/maps.typeString
    17  func maps_typeString(typ *abi.Type) string {
    18  	return toRType(typ).string()
    19  }
    20  
    21  type nameOff = abi.NameOff
    22  type typeOff = abi.TypeOff
    23  type textOff = abi.TextOff
    24  
    25  type _type = abi.Type
    26  
    27  // rtype is a wrapper that allows us to define additional methods.
    28  type rtype struct {
    29  	*abi.Type // embedding is okay here (unlike reflect) because none of this is public
    30  }
    31  
    32  func (t rtype) string() string {
    33  	s := t.nameOff(t.Str).Name()
    34  	if t.TFlag&abi.TFlagExtraStar != 0 {
    35  		return s[1:]
    36  	}
    37  	return s
    38  }
    39  
    40  func (t rtype) uncommon() *uncommontype {
    41  	return t.Uncommon()
    42  }
    43  
    44  func (t rtype) name() string {
    45  	if t.TFlag&abi.TFlagNamed == 0 {
    46  		return ""
    47  	}
    48  	s := t.string()
    49  	i := len(s) - 1
    50  	sqBrackets := 0
    51  	for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
    52  		switch s[i] {
    53  		case ']':
    54  			sqBrackets++
    55  		case '[':
    56  			sqBrackets--
    57  		}
    58  		i--
    59  	}
    60  	return s[i+1:]
    61  }
    62  
    63  // pkgpath returns the path of the package where t was defined, if
    64  // available. This is not the same as the reflect package's PkgPath
    65  // method, in that it returns the package path for struct and interface
    66  // types, not just named types.
    67  func (t rtype) pkgpath() string {
    68  	if u := t.uncommon(); u != nil {
    69  		return t.nameOff(u.PkgPath).Name()
    70  	}
    71  	switch t.Kind() {
    72  	case abi.Struct:
    73  		st := (*structtype)(unsafe.Pointer(t.Type))
    74  		return st.PkgPath.Name()
    75  	case abi.Interface:
    76  		it := (*interfacetype)(unsafe.Pointer(t.Type))
    77  		return it.PkgPath.Name()
    78  	}
    79  	return ""
    80  }
    81  
    82  // getGCMask returns the pointer/nonpointer bitmask for type t.
    83  //
    84  // nosplit because it is used during write barriers and must not be preempted.
    85  //
    86  //go:nosplit
    87  func getGCMask(t *_type) *byte {
    88  	if t.TFlag&abi.TFlagGCMaskOnDemand != 0 {
    89  		// Split the rest into getGCMaskOnDemand so getGCMask itself is inlineable.
    90  		return getGCMaskOnDemand(t)
    91  	}
    92  	return t.GCData
    93  }
    94  
    95  // inProgress is a byte whose address is a sentinel indicating that
    96  // some thread is currently building the GC bitmask for a type.
    97  var inProgress byte
    98  
    99  // nosplit because it is used during write barriers and must not be preempted.
   100  //
   101  //go:nosplit
   102  func getGCMaskOnDemand(t *_type) *byte {
   103  	// For large types, GCData doesn't point directly to a bitmask.
   104  	// Instead it points to a pointer to a bitmask, and the runtime
   105  	// is responsible for (on first use) creating the bitmask and
   106  	// storing a pointer to it in that slot.
   107  	// TODO: we could use &t.GCData as the slot, but types are
   108  	// in read-only memory currently.
   109  	addr := unsafe.Pointer(t.GCData)
   110  
   111  	if GOOS == "aix" {
   112  		addr = add(addr, firstmoduledata.data-aixStaticDataBase)
   113  	}
   114  
   115  	for {
   116  		p := (*byte)(atomic.Loadp(addr))
   117  		switch p {
   118  		default: // Already built.
   119  			return p
   120  		case &inProgress: // Someone else is currently building it.
   121  			// Just wait until the builder is done.
   122  			// We can't block here, so spinning while having
   123  			// the OS thread yield is about the best we can do.
   124  			osyield()
   125  			continue
   126  		case nil: // Not built yet.
   127  			// Attempt to get exclusive access to build it.
   128  			if !atomic.Casp1((*unsafe.Pointer)(addr), nil, unsafe.Pointer(&inProgress)) {
   129  				continue
   130  			}
   131  
   132  			// Build gcmask for this type.
   133  			bytes := goarch.PtrSize * divRoundUp(t.PtrBytes/goarch.PtrSize, 8*goarch.PtrSize)
   134  			p = (*byte)(persistentalloc(bytes, goarch.PtrSize, &memstats.other_sys))
   135  			systemstack(func() {
   136  				buildGCMask(t, bitCursor{ptr: p, n: 0})
   137  			})
   138  
   139  			// Store the newly-built gcmask for future callers.
   140  			atomic.StorepNoWB(addr, unsafe.Pointer(p))
   141  			return p
   142  		}
   143  	}
   144  }
   145  
   146  // A bitCursor is a simple cursor to memory to which we
   147  // can write a set of bits.
   148  type bitCursor struct {
   149  	ptr *byte   // base of region
   150  	n   uintptr // cursor points to bit n of region
   151  }
   152  
   153  // Write to b cnt bits starting at bit 0 of data.
   154  // Requires cnt>0.
   155  func (b bitCursor) write(data *byte, cnt uintptr) {
   156  	// Starting byte for writing.
   157  	p := addb(b.ptr, b.n/8)
   158  
   159  	// Note: if we're starting halfway through a byte, we load the
   160  	// existing lower bits so we don't clobber them.
   161  	n := b.n % 8                    // # of valid bits in buf
   162  	buf := uintptr(*p) & (1<<n - 1) // buffered bits to start
   163  
   164  	// Work 8 bits at a time.
   165  	for cnt > 8 {
   166  		// Read 8 more bits, now buf has 8-15 valid bits in it.
   167  		buf |= uintptr(*data) << n
   168  		n += 8
   169  		data = addb(data, 1)
   170  		cnt -= 8
   171  		// Write 8 of the buffered bits out.
   172  		*p = byte(buf)
   173  		buf >>= 8
   174  		n -= 8
   175  		p = addb(p, 1)
   176  	}
   177  	// Read remaining bits.
   178  	buf |= (uintptr(*data) & (1<<cnt - 1)) << n
   179  	n += cnt
   180  
   181  	// Flush remaining bits.
   182  	if n > 8 {
   183  		*p = byte(buf)
   184  		buf >>= 8
   185  		n -= 8
   186  		p = addb(p, 1)
   187  	}
   188  	*p &^= 1<<n - 1
   189  	*p |= byte(buf)
   190  }
   191  
   192  func (b bitCursor) offset(cnt uintptr) bitCursor {
   193  	return bitCursor{ptr: b.ptr, n: b.n + cnt}
   194  }
   195  
   196  // buildGCMask writes the ptr/nonptr bitmap for t to dst.
   197  // t must have a pointer.
   198  func buildGCMask(t *_type, dst bitCursor) {
   199  	// Note: we want to avoid a situation where buildGCMask gets into a
   200  	// very deep recursion, because M stacks are fixed size and pretty small
   201  	// (16KB). We do that by ensuring that any recursive
   202  	// call operates on a type at most half the size of its parent.
   203  	// Thus, the recursive chain can be at most 64 calls deep (on a
   204  	// 64-bit machine).
   205  	// Recursion is avoided by using a "tail call" (jumping to the
   206  	// "top" label) for any recursive call with a large subtype.
   207  top:
   208  	if t.PtrBytes == 0 {
   209  		throw("pointerless type")
   210  	}
   211  	if t.TFlag&abi.TFlagGCMaskOnDemand == 0 {
   212  		// copy t.GCData to dst
   213  		dst.write(t.GCData, t.PtrBytes/goarch.PtrSize)
   214  		return
   215  	}
   216  	// The above case should handle all kinds except
   217  	// possibly arrays and structs.
   218  	switch t.Kind() {
   219  	case abi.Array:
   220  		a := t.ArrayType()
   221  		if a.Len == 1 {
   222  			// Avoid recursive call for element type that
   223  			// isn't smaller than the parent type.
   224  			t = a.Elem
   225  			goto top
   226  		}
   227  		e := a.Elem
   228  		for i := uintptr(0); i < a.Len; i++ {
   229  			buildGCMask(e, dst)
   230  			dst = dst.offset(e.Size_ / goarch.PtrSize)
   231  		}
   232  	case abi.Struct:
   233  		s := t.StructType()
   234  		var bigField abi.StructField
   235  		for _, f := range s.Fields {
   236  			ft := f.Typ
   237  			if !ft.Pointers() {
   238  				continue
   239  			}
   240  			if ft.Size_ > t.Size_/2 {
   241  				// Avoid recursive call for field type that
   242  				// is larger than half of the parent type.
   243  				// There can be only one.
   244  				bigField = f
   245  				continue
   246  			}
   247  			buildGCMask(ft, dst.offset(f.Offset/goarch.PtrSize))
   248  		}
   249  		if bigField.Typ != nil {
   250  			// Note: this case causes bits to be written out of order.
   251  			t = bigField.Typ
   252  			dst = dst.offset(bigField.Offset / goarch.PtrSize)
   253  			goto top
   254  		}
   255  	default:
   256  		throw("unexpected kind")
   257  	}
   258  }
   259  
   260  // reflectOffs holds type offsets defined at run time by the reflect package.
   261  //
   262  // When a type is defined at run time, its *rtype data lives on the heap.
   263  // There are a wide range of possible addresses the heap may use, that
   264  // may not be representable as a 32-bit offset. Moreover the GC may
   265  // one day start moving heap memory, in which case there is no stable
   266  // offset that can be defined.
   267  //
   268  // To provide stable offsets, we add pin *rtype objects in a global map
   269  // and treat the offset as an identifier. We use negative offsets that
   270  // do not overlap with any compile-time module offsets.
   271  //
   272  // Entries are created by reflect.addReflectOff.
   273  var reflectOffs struct {
   274  	lock mutex
   275  	next int32
   276  	m    map[int32]unsafe.Pointer
   277  	minv map[unsafe.Pointer]int32
   278  }
   279  
   280  func reflectOffsLock() {
   281  	lock(&reflectOffs.lock)
   282  	if raceenabled {
   283  		raceacquire(unsafe.Pointer(&reflectOffs.lock))
   284  	}
   285  }
   286  
   287  func reflectOffsUnlock() {
   288  	if raceenabled {
   289  		racerelease(unsafe.Pointer(&reflectOffs.lock))
   290  	}
   291  	unlock(&reflectOffs.lock)
   292  }
   293  
   294  func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name {
   295  	if off == 0 {
   296  		return name{}
   297  	}
   298  	base := uintptr(ptrInModule)
   299  	for md := &firstmoduledata; md != nil; md = md.next {
   300  		if base >= md.types && base < md.etypes {
   301  			res := md.types + uintptr(off)
   302  			if res > md.etypes {
   303  				println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
   304  				throw("runtime: name offset out of range")
   305  			}
   306  			return name{Bytes: (*byte)(unsafe.Pointer(res))}
   307  		}
   308  	}
   309  
   310  	// No module found. see if it is a run time name.
   311  	reflectOffsLock()
   312  	res, found := reflectOffs.m[int32(off)]
   313  	reflectOffsUnlock()
   314  	if !found {
   315  		println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:")
   316  		for next := &firstmoduledata; next != nil; next = next.next {
   317  			println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   318  		}
   319  		throw("runtime: name offset base pointer out of range")
   320  	}
   321  	return name{Bytes: (*byte)(res)}
   322  }
   323  
   324  func (t rtype) nameOff(off nameOff) name {
   325  	return resolveNameOff(unsafe.Pointer(t.Type), off)
   326  }
   327  
   328  func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type {
   329  	if off == 0 || off == -1 {
   330  		// -1 is the sentinel value for unreachable code.
   331  		// See cmd/link/internal/ld/data.go:relocsym.
   332  		return nil
   333  	}
   334  	base := uintptr(ptrInModule)
   335  	var md *moduledata
   336  	for next := &firstmoduledata; next != nil; next = next.next {
   337  		if base >= next.types && base < next.etypes {
   338  			md = next
   339  			break
   340  		}
   341  	}
   342  	if md == nil {
   343  		reflectOffsLock()
   344  		res := reflectOffs.m[int32(off)]
   345  		reflectOffsUnlock()
   346  		if res == nil {
   347  			println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
   348  			for next := &firstmoduledata; next != nil; next = next.next {
   349  				println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   350  			}
   351  			throw("runtime: type offset base pointer out of range")
   352  		}
   353  		return (*_type)(res)
   354  	}
   355  	res := md.types + uintptr(off)
   356  	resType := (*_type)(unsafe.Pointer(res))
   357  	if t := md.typemap[resType]; t != nil {
   358  		return t
   359  	}
   360  	if res > md.etypes {
   361  		println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
   362  		throw("runtime: type offset out of range")
   363  	}
   364  	return resType
   365  }
   366  
   367  func (t rtype) typeOff(off typeOff) *_type {
   368  	return resolveTypeOff(unsafe.Pointer(t.Type), off)
   369  }
   370  
   371  func (t rtype) textOff(off textOff) unsafe.Pointer {
   372  	if off == -1 {
   373  		// -1 is the sentinel value for unreachable code.
   374  		// See cmd/link/internal/ld/data.go:relocsym.
   375  		return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
   376  	}
   377  	base := uintptr(unsafe.Pointer(t.Type))
   378  	var md *moduledata
   379  	for next := &firstmoduledata; next != nil; next = next.next {
   380  		if base >= next.types && base < next.etypes {
   381  			md = next
   382  			break
   383  		}
   384  	}
   385  	if md == nil {
   386  		reflectOffsLock()
   387  		res := reflectOffs.m[int32(off)]
   388  		reflectOffsUnlock()
   389  		if res == nil {
   390  			println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
   391  			for next := &firstmoduledata; next != nil; next = next.next {
   392  				println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   393  			}
   394  			throw("runtime: text offset base pointer out of range")
   395  		}
   396  		return res
   397  	}
   398  	res := md.textAddr(uint32(off))
   399  	return unsafe.Pointer(res)
   400  }
   401  
   402  type uncommontype = abi.UncommonType
   403  
   404  type interfacetype = abi.InterfaceType
   405  
   406  type arraytype = abi.ArrayType
   407  
   408  type chantype = abi.ChanType
   409  
   410  type slicetype = abi.SliceType
   411  
   412  type functype = abi.FuncType
   413  
   414  type ptrtype = abi.PtrType
   415  
   416  type name = abi.Name
   417  
   418  type structtype = abi.StructType
   419  
   420  func pkgPath(n name) string {
   421  	if n.Bytes == nil || *n.Data(0)&(1<<2) == 0 {
   422  		return ""
   423  	}
   424  	i, l := n.ReadVarint(1)
   425  	off := 1 + i + l
   426  	if *n.Data(0)&(1<<1) != 0 {
   427  		i2, l2 := n.ReadVarint(off)
   428  		off += i2 + l2
   429  	}
   430  	var nameOff nameOff
   431  	copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.Data(off)))[:])
   432  	pkgPathName := resolveNameOff(unsafe.Pointer(n.Bytes), nameOff)
   433  	return pkgPathName.Name()
   434  }
   435  
   436  // typelinksinit scans the types from extra modules and builds the
   437  // moduledata typemap used to de-duplicate type pointers.
   438  func typelinksinit() {
   439  	lockInit(&moduleToTypelinksLock, lockRankTypelinks)
   440  
   441  	if firstmoduledata.next == nil {
   442  		return
   443  	}
   444  
   445  	modules := activeModules()
   446  	prev := modules[0]
   447  	prevTypelinks := moduleTypelinks(modules[0])
   448  	typehash := make(map[uint32][]*_type, len(prevTypelinks))
   449  	for _, md := range modules[1:] {
   450  		// Collect types from the previous module into typehash.
   451  	collect:
   452  		for _, tl := range prevTypelinks {
   453  			t := tl
   454  			if prev.typemap != nil {
   455  				t = prev.typemap[tl]
   456  			}
   457  			// Add to typehash if not seen before.
   458  			tlist := typehash[t.Hash]
   459  			for _, tcur := range tlist {
   460  				if tcur == t {
   461  					continue collect
   462  				}
   463  			}
   464  			typehash[t.Hash] = append(tlist, t)
   465  		}
   466  
   467  		mdTypelinks := moduleTypelinks(md)
   468  
   469  		if md.typemap == nil {
   470  			// If any of this module's typelinks match a type from a
   471  			// prior module, prefer that prior type by adding the offset
   472  			// to this module's typemap.
   473  			tm := make(map[*_type]*_type, len(mdTypelinks))
   474  			pinnedTypemaps = append(pinnedTypemaps, tm)
   475  			md.typemap = tm
   476  			for _, t := range mdTypelinks {
   477  				set := t
   478  				for _, candidate := range typehash[t.Hash] {
   479  					seen := map[_typePair]struct{}{}
   480  					if typesEqual(t, candidate, seen) {
   481  						set = candidate
   482  						break
   483  					}
   484  				}
   485  				md.typemap[t] = set
   486  			}
   487  		}
   488  
   489  		prev = md
   490  		prevTypelinks = mdTypelinks
   491  	}
   492  }
   493  
   494  // moduleToTypelinks maps from moduledata to typelinks.
   495  // We build this lazily as needed, since most programs do not need it.
   496  var (
   497  	moduleToTypelinks     map[*moduledata][]*_type
   498  	moduleToTypelinksLock mutex
   499  )
   500  
   501  // moduleTypelinks takes a moduledata and returns the type
   502  // descriptors that the reflect package needs to know about.
   503  // These are the typelinks. They are the types that the user
   504  // can construct. This is used to ensure that we use a unique
   505  // type descriptor for all types. The returned types are sorted
   506  // by type string; the sorting is done by the linker.
   507  // This slice is constructed as needed.
   508  func moduleTypelinks(md *moduledata) []*_type {
   509  	lock(&moduleToTypelinksLock)
   510  	if raceenabled {
   511  		raceacquire(unsafe.Pointer(&moduleToTypelinksLock))
   512  	}
   513  
   514  	if typelinks, ok := moduleToTypelinks[md]; ok {
   515  		if raceenabled {
   516  			racerelease(unsafe.Pointer(&moduleToTypelinksLock))
   517  		}
   518  		unlock(&moduleToTypelinksLock)
   519  		return typelinks
   520  	}
   521  
   522  	// Allocate a very rough estimate of the number of types.
   523  	ret := make([]*_type, 0, md.typedesclen/(2*unsafe.Sizeof(_type{})))
   524  
   525  	td := md.types
   526  
   527  	// We have to increment by the pointer size to match the
   528  	// increment in cmd/link/internal/data.go createRelroSect
   529  	// in allocateDataSections.
   530  	//
   531  	// The linker doesn't do that increment when runtime.types
   532  	// has a non-zero size, but in that case the runtime.types
   533  	// symbol itself pushes the other symbols forward.
   534  	// So either way this increment is correct.
   535  	td += goarch.PtrSize
   536  
   537  	etypedesc := md.types + md.typedesclen
   538  	for td < etypedesc {
   539  		td = alignUp(td, goarch.PtrSize)
   540  
   541  		typ := (*_type)(unsafe.Pointer(td))
   542  		ret = append(ret, typ)
   543  
   544  		td += uintptr(typ.DescriptorSize())
   545  	}
   546  
   547  	if moduleToTypelinks == nil {
   548  		moduleToTypelinks = make(map[*moduledata][]*_type)
   549  	}
   550  	moduleToTypelinks[md] = ret
   551  
   552  	if raceenabled {
   553  		racerelease(unsafe.Pointer(&moduleToTypelinksLock))
   554  	}
   555  	unlock(&moduleToTypelinksLock)
   556  	return ret
   557  }
   558  
   559  type _typePair struct {
   560  	t1 *_type
   561  	t2 *_type
   562  }
   563  
   564  func toRType(t *abi.Type) rtype {
   565  	return rtype{t}
   566  }
   567  
   568  // typesEqual reports whether two types are equal.
   569  //
   570  // Everywhere in the runtime and reflect packages, it is assumed that
   571  // there is exactly one *_type per Go type, so that pointer equality
   572  // can be used to test if types are equal. There is one place that
   573  // breaks this assumption: buildmode=shared. In this case a type can
   574  // appear as two different pieces of memory. This is hidden from the
   575  // runtime and reflect package by the per-module typemap built in
   576  // typelinksinit. It uses typesEqual to map types from later modules
   577  // back into earlier ones.
   578  //
   579  // Only typelinksinit needs this function.
   580  func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
   581  	tp := _typePair{t, v}
   582  	if _, ok := seen[tp]; ok {
   583  		return true
   584  	}
   585  
   586  	// mark these types as seen, and thus equivalent which prevents an infinite loop if
   587  	// the two types are identical, but recursively defined and loaded from
   588  	// different modules
   589  	seen[tp] = struct{}{}
   590  
   591  	if t == v {
   592  		return true
   593  	}
   594  	kind := t.Kind()
   595  	if kind != v.Kind() {
   596  		return false
   597  	}
   598  	rt, rv := toRType(t), toRType(v)
   599  	if rt.string() != rv.string() {
   600  		return false
   601  	}
   602  	ut := t.Uncommon()
   603  	uv := v.Uncommon()
   604  	if ut != nil || uv != nil {
   605  		if ut == nil || uv == nil {
   606  			return false
   607  		}
   608  		pkgpatht := rt.nameOff(ut.PkgPath).Name()
   609  		pkgpathv := rv.nameOff(uv.PkgPath).Name()
   610  		if pkgpatht != pkgpathv {
   611  			return false
   612  		}
   613  	}
   614  	if abi.Bool <= kind && kind <= abi.Complex128 {
   615  		return true
   616  	}
   617  	switch kind {
   618  	case abi.String, abi.UnsafePointer:
   619  		return true
   620  	case abi.Array:
   621  		at := (*arraytype)(unsafe.Pointer(t))
   622  		av := (*arraytype)(unsafe.Pointer(v))
   623  		return typesEqual(at.Elem, av.Elem, seen) && at.Len == av.Len
   624  	case abi.Chan:
   625  		ct := (*chantype)(unsafe.Pointer(t))
   626  		cv := (*chantype)(unsafe.Pointer(v))
   627  		return ct.Dir == cv.Dir && typesEqual(ct.Elem, cv.Elem, seen)
   628  	case abi.Func:
   629  		ft := (*functype)(unsafe.Pointer(t))
   630  		fv := (*functype)(unsafe.Pointer(v))
   631  		if ft.OutCount != fv.OutCount || ft.InCount != fv.InCount {
   632  			return false
   633  		}
   634  		tin, vin := ft.InSlice(), fv.InSlice()
   635  		for i := 0; i < len(tin); i++ {
   636  			if !typesEqual(tin[i], vin[i], seen) {
   637  				return false
   638  			}
   639  		}
   640  		tout, vout := ft.OutSlice(), fv.OutSlice()
   641  		for i := 0; i < len(tout); i++ {
   642  			if !typesEqual(tout[i], vout[i], seen) {
   643  				return false
   644  			}
   645  		}
   646  		return true
   647  	case abi.Interface:
   648  		it := (*interfacetype)(unsafe.Pointer(t))
   649  		iv := (*interfacetype)(unsafe.Pointer(v))
   650  		if it.PkgPath.Name() != iv.PkgPath.Name() {
   651  			return false
   652  		}
   653  		if len(it.Methods) != len(iv.Methods) {
   654  			return false
   655  		}
   656  		for i := range it.Methods {
   657  			tm := &it.Methods[i]
   658  			vm := &iv.Methods[i]
   659  			// Note the mhdr array can be relocated from
   660  			// another module. See #17724.
   661  			tname := resolveNameOff(unsafe.Pointer(tm), tm.Name)
   662  			vname := resolveNameOff(unsafe.Pointer(vm), vm.Name)
   663  			if tname.Name() != vname.Name() {
   664  				return false
   665  			}
   666  			if pkgPath(tname) != pkgPath(vname) {
   667  				return false
   668  			}
   669  			tityp := resolveTypeOff(unsafe.Pointer(tm), tm.Typ)
   670  			vityp := resolveTypeOff(unsafe.Pointer(vm), vm.Typ)
   671  			if !typesEqual(tityp, vityp, seen) {
   672  				return false
   673  			}
   674  		}
   675  		return true
   676  	case abi.Map:
   677  		mt := (*abi.MapType)(unsafe.Pointer(t))
   678  		mv := (*abi.MapType)(unsafe.Pointer(v))
   679  		return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
   680  	case abi.Pointer:
   681  		pt := (*ptrtype)(unsafe.Pointer(t))
   682  		pv := (*ptrtype)(unsafe.Pointer(v))
   683  		return typesEqual(pt.Elem, pv.Elem, seen)
   684  	case abi.Slice:
   685  		st := (*slicetype)(unsafe.Pointer(t))
   686  		sv := (*slicetype)(unsafe.Pointer(v))
   687  		return typesEqual(st.Elem, sv.Elem, seen)
   688  	case abi.Struct:
   689  		st := (*structtype)(unsafe.Pointer(t))
   690  		sv := (*structtype)(unsafe.Pointer(v))
   691  		if len(st.Fields) != len(sv.Fields) {
   692  			return false
   693  		}
   694  		if st.PkgPath.Name() != sv.PkgPath.Name() {
   695  			return false
   696  		}
   697  		for i := range st.Fields {
   698  			tf := &st.Fields[i]
   699  			vf := &sv.Fields[i]
   700  			if tf.Name.Name() != vf.Name.Name() {
   701  				return false
   702  			}
   703  			if !typesEqual(tf.Typ, vf.Typ, seen) {
   704  				return false
   705  			}
   706  			if tf.Name.Tag() != vf.Name.Tag() {
   707  				return false
   708  			}
   709  			if tf.Offset != vf.Offset {
   710  				return false
   711  			}
   712  			if tf.Name.IsEmbedded() != vf.Name.IsEmbedded() {
   713  				return false
   714  			}
   715  		}
   716  		return true
   717  	default:
   718  		println("runtime: impossible type kind", kind)
   719  		throw("runtime: impossible type kind")
   720  		return false
   721  	}
   722  }
   723  

View as plain text