Source file src/internal/runtime/maps/table.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package maps
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goexperiment"
    10  	"internal/runtime/math"
    11  	"unsafe"
    12  )
    13  
    14  // Maximum size of a table before it is split at the directory level.
    15  //
    16  // TODO: Completely made up value. This should be tuned for performance vs grow
    17  // latency.
    18  // TODO: This should likely be based on byte size, as copying costs will
    19  // dominate grow latency for large objects.
    20  const maxTableCapacity = 1024
    21  
    22  // Ensure the max capacity fits in uint16, used for capacity and growthLeft
    23  // below.
    24  var _ = uint16(maxTableCapacity)
    25  
    26  // table is a Swiss table hash table structure.
    27  //
    28  // Each table is a complete hash table implementation.
    29  //
    30  // Map uses one or more tables to store entries. Extendible hashing (hash
    31  // prefix) is used to select the table to use for a specific key. Using
    32  // multiple tables enables incremental growth by growing only one table at a
    33  // time.
    34  type table struct {
    35  	// The number of filled slots (i.e. the number of elements in the table).
    36  	used uint16
    37  
    38  	// The total number of slots (always 2^N). Equal to
    39  	// `(groups.lengthMask+1)*abi.MapGroupSlots`.
    40  	capacity uint16
    41  
    42  	// The number of slots we can still fill without needing to rehash.
    43  	//
    44  	// We rehash when used + tombstones > loadFactor*capacity, including
    45  	// tombstones so the table doesn't overfill with tombstones. This field
    46  	// counts down remaining empty slots before the next rehash.
    47  	growthLeft uint16
    48  
    49  	// The number of bits used by directory lookups above this table. Note
    50  	// that this may be less then globalDepth, if the directory has grown
    51  	// but this table has not yet been split.
    52  	localDepth uint8
    53  
    54  	// Index of this table in the Map directory. This is the index of the
    55  	// _first_ location in the directory. The table may occur in multiple
    56  	// sequential indices.
    57  	//
    58  	// index is -1 if the table is stale (no longer installed in the
    59  	// directory).
    60  	index int
    61  
    62  	// groups is an array of slot groups. Each group holds abi.MapGroupSlots
    63  	// key/elem slots and their control bytes. A table has a fixed size
    64  	// groups array. The table is replaced (in rehash) when more space is
    65  	// required.
    66  	//
    67  	// TODO(prattmic): keys and elements are interleaved to maximize
    68  	// locality, but it comes at the expense of wasted space for some types
    69  	// (consider uint8 key, uint64 element). Consider placing all keys
    70  	// together in these cases to save space.
    71  	groups groupsReference
    72  }
    73  
    74  func newTable(typ *abi.MapType, capacity uint64, index int, localDepth uint8) *table {
    75  	if capacity < abi.MapGroupSlots {
    76  		capacity = abi.MapGroupSlots
    77  	}
    78  
    79  	t := &table{
    80  		index:      index,
    81  		localDepth: localDepth,
    82  	}
    83  
    84  	if capacity > maxTableCapacity {
    85  		panic("initial table capacity too large")
    86  	}
    87  
    88  	// N.B. group count must be a power of two for probeSeq to visit every
    89  	// group.
    90  	capacity, overflow := alignUpPow2(capacity)
    91  	if overflow {
    92  		panic("rounded-up capacity overflows uint64")
    93  	}
    94  
    95  	t.reset(typ, uint16(capacity))
    96  
    97  	return t
    98  }
    99  
   100  // reset resets the table with new, empty groups with the specified new total
   101  // capacity.
   102  func (t *table) reset(typ *abi.MapType, capacity uint16) {
   103  	groupCount := uint64(capacity) / abi.MapGroupSlots
   104  	t.groups = newGroups(typ, groupCount)
   105  	t.capacity = capacity
   106  	t.growthLeft = t.maxGrowthLeft()
   107  
   108  	for i := uint64(0); i <= t.groups.lengthMask; i++ {
   109  		g := t.groups.group(typ, i)
   110  		g.ctrls().setEmpty()
   111  	}
   112  }
   113  
   114  // maxGrowthLeft is the number of inserts we can do before
   115  // resizing, starting from an empty table.
   116  func (t *table) maxGrowthLeft() uint16 {
   117  	if t.capacity == 0 {
   118  		// No real reason to support zero capacity table, since an
   119  		// empty Map simply won't have a table.
   120  		panic("table must have positive capacity")
   121  	} else if t.capacity <= abi.MapGroupSlots {
   122  		// If the map fits in a single group then we're able to fill all of
   123  		// the slots except 1 (an empty slot is needed to terminate find
   124  		// operations).
   125  		//
   126  		// TODO(go.dev/issue/54766): With a special case in probing for
   127  		// single-group tables, we could fill all slots.
   128  		return t.capacity - 1
   129  	} else {
   130  		if t.capacity > math.MaxUint16/maxAvgGroupLoad {
   131  			panic("overflow")
   132  		}
   133  		return (t.capacity * maxAvgGroupLoad) / abi.MapGroupSlots
   134  	}
   135  
   136  }
   137  
   138  func (t *table) Used() uint64 {
   139  	return uint64(t.used)
   140  }
   141  
   142  // Get performs a lookup of the key that key points to. It returns a pointer to
   143  // the element, or false if the key doesn't exist.
   144  func (t *table) Get(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
   145  	// TODO(prattmic): We could avoid hashing in a variety of special
   146  	// cases.
   147  	//
   148  	// - One entry maps could just directly compare the single entry
   149  	//   without hashing.
   150  	// - String keys could do quick checks of a few bytes before hashing.
   151  	hash := typ.Hasher(key, m.seed)
   152  	_, elem, ok := t.getWithKey(typ, hash, key)
   153  	return elem, ok
   154  }
   155  
   156  // getWithKey performs a lookup of key, returning a pointer to the version of
   157  // the key in the map in addition to the element.
   158  //
   159  // This is relevant when multiple different key values compare equal (e.g.,
   160  // +0.0 and -0.0). When a grow occurs during iteration, iteration perform a
   161  // lookup of keys from the old group in the new group in order to correctly
   162  // expose updated elements. For NeedsKeyUpdate keys, iteration also must return
   163  // the new key value, not the old key value.
   164  // hash must be the hash of the key.
   165  func (t *table) getWithKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
   166  	// To find the location of a key in the table, we compute hash(key). From
   167  	// h1(hash(key)) and the capacity, we construct a probeSeq that visits
   168  	// every group of slots in some interesting order. See [probeSeq].
   169  	//
   170  	// We walk through these indices. At each index, we select the entire
   171  	// group starting with that index and extract potential candidates:
   172  	// occupied slots with a control byte equal to h2(hash(key)). The key
   173  	// at candidate slot i is compared with key; if key == g.slot(i).key
   174  	// we are done and return the slot; if there is an empty slot in the
   175  	// group, we stop and return an error; otherwise we continue to the
   176  	// next probe index. Tombstones (ctrlDeleted) effectively behave like
   177  	// full slots that never match the value we're looking for.
   178  	//
   179  	// The h2 bits ensure when we compare a key we are likely to have
   180  	// actually found the object. That is, the chance is low that keys
   181  	// compare false. Thus, when we search for an object, we are unlikely
   182  	// to call Equal many times. This likelihood can be analyzed as follows
   183  	// (assuming that h2 is a random enough hash function).
   184  	//
   185  	// Let's assume that there are k "wrong" objects that must be examined
   186  	// in a probe sequence. For example, when doing a find on an object
   187  	// that is in the table, k is the number of objects between the start
   188  	// of the probe sequence and the final found object (not including the
   189  	// final found object). The expected number of objects with an h2 match
   190  	// is then k/128. Measurements and analysis indicate that even at high
   191  	// load factors, k is less than 32, meaning that the number of false
   192  	// positive comparisons we must perform is less than 1/8 per find.
   193  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   194  	h2Hash := h2(hash)
   195  	for ; ; seq = seq.next() {
   196  		g := t.groups.group(typ, seq.offset)
   197  
   198  		match := g.ctrls().matchH2(h2Hash)
   199  
   200  		for match != 0 {
   201  			i := match.first()
   202  
   203  			slotKey := g.key(typ, i)
   204  			if typ.IndirectKey() {
   205  				slotKey = *((*unsafe.Pointer)(slotKey))
   206  			}
   207  			if typ.Key.Equal(key, slotKey) {
   208  				slotElem := g.elem(typ, i)
   209  				if typ.IndirectElem() {
   210  					slotElem = *((*unsafe.Pointer)(slotElem))
   211  				}
   212  				return slotKey, slotElem, true
   213  			}
   214  			match = match.removeFirst()
   215  		}
   216  
   217  		match = g.ctrls().matchEmpty()
   218  		if match != 0 {
   219  			// Finding an empty slot means we've reached the end of
   220  			// the probe sequence.
   221  			return nil, nil, false
   222  		}
   223  	}
   224  }
   225  
   226  func (t *table) getWithoutKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
   227  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   228  	h2Hash := h2(hash)
   229  	for ; ; seq = seq.next() {
   230  		g := t.groups.group(typ, seq.offset)
   231  
   232  		match := g.ctrls().matchH2(h2Hash)
   233  
   234  		for match != 0 {
   235  			i := match.first()
   236  
   237  			slotKey := g.key(typ, i)
   238  			if typ.IndirectKey() {
   239  				slotKey = *((*unsafe.Pointer)(slotKey))
   240  			}
   241  			if typ.Key.Equal(key, slotKey) {
   242  				slotElem := g.elem(typ, i)
   243  				if typ.IndirectElem() {
   244  					slotElem = *((*unsafe.Pointer)(slotElem))
   245  				}
   246  				return slotElem, true
   247  			}
   248  			match = match.removeFirst()
   249  		}
   250  
   251  		match = g.ctrls().matchEmpty()
   252  		if match != 0 {
   253  			// Finding an empty slot means we've reached the end of
   254  			// the probe sequence.
   255  			return nil, false
   256  		}
   257  	}
   258  }
   259  
   260  // PutSlot returns a pointer to the element slot where an inserted element
   261  // should be written, and ok if it returned a valid slot.
   262  //
   263  // PutSlot returns ok false if the table was split and the Map needs to find
   264  // the new table.
   265  //
   266  // hash must be the hash of key.
   267  func (t *table) PutSlot(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
   268  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   269  
   270  	// As we look for a match, keep track of the first deleted slot we
   271  	// find, which we'll use to insert the new entry if necessary.
   272  	var firstDeletedGroup groupReference
   273  	var firstDeletedSlot uintptr
   274  
   275  	h2Hash := h2(hash)
   276  	for ; ; seq = seq.next() {
   277  		g := t.groups.group(typ, seq.offset)
   278  		match := g.ctrls().matchH2(h2Hash)
   279  
   280  		// Look for an existing slot containing this key.
   281  		for match != 0 {
   282  			i := match.first()
   283  
   284  			slotKey := g.key(typ, i)
   285  			if typ.IndirectKey() {
   286  				slotKey = *((*unsafe.Pointer)(slotKey))
   287  			}
   288  			if typ.Key.Equal(key, slotKey) {
   289  				if typ.NeedKeyUpdate() {
   290  					typedmemmove(typ.Key, slotKey, key)
   291  				}
   292  
   293  				slotElem := g.elem(typ, i)
   294  				if typ.IndirectElem() {
   295  					slotElem = *((*unsafe.Pointer)(slotElem))
   296  				}
   297  
   298  				t.checkInvariants(typ, m)
   299  				return slotElem, true
   300  			}
   301  			match = match.removeFirst()
   302  		}
   303  
   304  		// No existing slot for this key in this group. Is this the end
   305  		// of the probe sequence?
   306  		match = g.ctrls().matchEmptyOrDeleted()
   307  		if match == 0 {
   308  			continue // nothing but filled slots. Keep probing.
   309  		}
   310  		i := match.first()
   311  		if g.ctrls().get(i) == ctrlDeleted {
   312  			// There are some deleted slots. Remember
   313  			// the first one, and keep probing.
   314  			if firstDeletedGroup.data == nil {
   315  				firstDeletedGroup = g
   316  				firstDeletedSlot = i
   317  			}
   318  			continue
   319  		}
   320  		// We've found an empty slot, which means we've reached the end of
   321  		// the probe sequence.
   322  
   323  		// If we found a deleted slot along the way, we can
   324  		// replace it without consuming growthLeft.
   325  		if firstDeletedGroup.data != nil {
   326  			g = firstDeletedGroup
   327  			i = firstDeletedSlot
   328  			t.growthLeft++ // will be decremented below to become a no-op.
   329  		}
   330  
   331  		// If we have no space left, first try to remove some tombstones.
   332  		if t.growthLeft == 0 {
   333  			t.pruneTombstones(typ, m)
   334  		}
   335  
   336  		// If there is room left to grow, just insert the new entry.
   337  		if t.growthLeft > 0 {
   338  			slotKey := g.key(typ, i)
   339  			if typ.IndirectKey() {
   340  				kmem := newobject(typ.Key)
   341  				*(*unsafe.Pointer)(slotKey) = kmem
   342  				slotKey = kmem
   343  			}
   344  			typedmemmove(typ.Key, slotKey, key)
   345  
   346  			slotElem := g.elem(typ, i)
   347  			if typ.IndirectElem() {
   348  				emem := newobject(typ.Elem)
   349  				*(*unsafe.Pointer)(slotElem) = emem
   350  				slotElem = emem
   351  			}
   352  
   353  			g.ctrls().set(i, ctrl(h2Hash))
   354  			t.growthLeft--
   355  			t.used++
   356  			m.used++
   357  
   358  			t.checkInvariants(typ, m)
   359  			return slotElem, true
   360  		}
   361  
   362  		t.rehash(typ, m)
   363  		return nil, false
   364  	}
   365  }
   366  
   367  // uncheckedPutSlot inserts an entry known not to be in the table.
   368  // This is used for grow/split where we are making a new table from
   369  // entries in an existing table.
   370  //
   371  // Decrements growthLeft and increments used.
   372  //
   373  // Requires that the entry does not exist in the table, and that the table has
   374  // room for another element without rehashing.
   375  //
   376  // Requires that there are no deleted entries in the table.
   377  //
   378  // For indirect keys and/or elements, the key and elem pointers can be
   379  // put directly into the map, they do not need to be copied. This
   380  // requires the caller to ensure that the referenced memory never
   381  // changes (by sourcing those pointers from another indirect key/elem
   382  // map).
   383  func (t *table) uncheckedPutSlot(typ *abi.MapType, hash uintptr, key, elem unsafe.Pointer) {
   384  	if t.growthLeft == 0 {
   385  		panic("invariant failed: growthLeft is unexpectedly 0")
   386  	}
   387  
   388  	// Given key and its hash hash(key), to insert it, we construct a
   389  	// probeSeq, and use it to find the first group with an unoccupied (empty
   390  	// or deleted) slot. We place the key/value into the first such slot in
   391  	// the group and mark it as full with key's H2.
   392  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   393  	for ; ; seq = seq.next() {
   394  		g := t.groups.group(typ, seq.offset)
   395  
   396  		match := g.ctrls().matchEmptyOrDeleted()
   397  		if match != 0 {
   398  			i := match.first()
   399  
   400  			slotKey := g.key(typ, i)
   401  			if typ.IndirectKey() {
   402  				*(*unsafe.Pointer)(slotKey) = key
   403  			} else {
   404  				typedmemmove(typ.Key, slotKey, key)
   405  			}
   406  
   407  			slotElem := g.elem(typ, i)
   408  			if typ.IndirectElem() {
   409  				*(*unsafe.Pointer)(slotElem) = elem
   410  			} else {
   411  				typedmemmove(typ.Elem, slotElem, elem)
   412  			}
   413  
   414  			t.growthLeft--
   415  			t.used++
   416  			g.ctrls().set(i, ctrl(h2(hash)))
   417  			return
   418  		}
   419  	}
   420  }
   421  
   422  // Delete returns true if it put a tombstone in t.
   423  func (t *table) Delete(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
   424  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   425  	h2Hash := h2(hash)
   426  	for ; ; seq = seq.next() {
   427  		g := t.groups.group(typ, seq.offset)
   428  		match := g.ctrls().matchH2(h2Hash)
   429  
   430  		for match != 0 {
   431  			i := match.first()
   432  
   433  			slotKey := g.key(typ, i)
   434  			origSlotKey := slotKey
   435  			if typ.IndirectKey() {
   436  				slotKey = *((*unsafe.Pointer)(slotKey))
   437  			}
   438  
   439  			if typ.Key.Equal(key, slotKey) {
   440  				t.used--
   441  				m.used--
   442  
   443  				if typ.IndirectKey() {
   444  					// Clearing the pointer is sufficient.
   445  					*(*unsafe.Pointer)(origSlotKey) = nil
   446  				} else if typ.Key.Pointers() {
   447  					// Only bothing clear the key if there
   448  					// are pointers in it.
   449  					typedmemclr(typ.Key, slotKey)
   450  				}
   451  
   452  				slotElem := g.elem(typ, i)
   453  				if typ.IndirectElem() {
   454  					// Clearing the pointer is sufficient.
   455  					*(*unsafe.Pointer)(slotElem) = nil
   456  				} else {
   457  					// Unlike keys, always clear the elem (even if
   458  					// it contains no pointers), as compound
   459  					// assignment operations depend on cleared
   460  					// deleted values. See
   461  					// https://go.dev/issue/25936.
   462  					typedmemclr(typ.Elem, slotElem)
   463  				}
   464  
   465  				// Only a full group can appear in the middle
   466  				// of a probe sequence (a group with at least
   467  				// one empty slot terminates probing). Once a
   468  				// group becomes full, it stays full until
   469  				// rehashing/resizing. So if the group isn't
   470  				// full now, we can simply remove the element.
   471  				// Otherwise, we create a tombstone to mark the
   472  				// slot as deleted.
   473  				var tombstone bool
   474  				if g.ctrls().matchEmpty() != 0 {
   475  					g.ctrls().set(i, ctrlEmpty)
   476  					t.growthLeft++
   477  				} else {
   478  					g.ctrls().set(i, ctrlDeleted)
   479  					tombstone = true
   480  				}
   481  
   482  				t.checkInvariants(typ, m)
   483  				return tombstone
   484  			}
   485  			match = match.removeFirst()
   486  		}
   487  
   488  		match = g.ctrls().matchEmpty()
   489  		if match != 0 {
   490  			// Finding an empty slot means we've reached the end of
   491  			// the probe sequence.
   492  			return false
   493  		}
   494  	}
   495  }
   496  
   497  // pruneTombstones goes through the table and tries to remove
   498  // tombstones that are no longer needed. Best effort.
   499  // Note that it only removes tombstones, it does not move elements.
   500  // Moving elements would do a better job but is infeasbile due to
   501  // iterator semantics.
   502  //
   503  // Pruning should only succeed if it can remove O(n) tombstones.
   504  // It would be bad if we did O(n) work to find 1 tombstone to remove.
   505  // Then the next insert would spend another O(n) work to find 1 more
   506  // tombstone to remove, etc.
   507  //
   508  // We really need to remove O(n) tombstones so we can pay for the cost
   509  // of finding them. If we can't, then we need to grow (which is also O(n),
   510  // but guarantees O(n) subsequent inserts can happen in constant time).
   511  func (t *table) pruneTombstones(typ *abi.MapType, m *Map) {
   512  	if t.tombstones()*10 < t.capacity { // 10% of capacity
   513  		// Not enough tombstones to be worth the effort.
   514  		return
   515  	}
   516  
   517  	// Bit set marking all the groups whose tombstones are needed.
   518  	var needed [(maxTableCapacity/abi.MapGroupSlots + 31) / 32]uint32
   519  
   520  	// Trace the probe sequence of every full entry.
   521  	for i := uint64(0); i <= t.groups.lengthMask; i++ {
   522  		g := t.groups.group(typ, i)
   523  		match := g.ctrls().matchFull()
   524  		for match != 0 {
   525  			j := match.first()
   526  			match = match.removeFirst()
   527  			key := g.key(typ, j)
   528  			if typ.IndirectKey() {
   529  				key = *((*unsafe.Pointer)(key))
   530  			}
   531  			if !typ.Key.Equal(key, key) {
   532  				// Key not equal to itself. We never have to find these
   533  				// keys on lookup (only on iteration), so we can break
   534  				// their probe sequences at will.
   535  				continue
   536  			}
   537  			// Walk probe sequence for this key.
   538  			// Each tombstone group we need to walk past is marked required.
   539  			hash := typ.Hasher(key, m.seed)
   540  			for seq := makeProbeSeq(h1(hash), t.groups.lengthMask); ; seq = seq.next() {
   541  				if seq.offset == i {
   542  					break // reached group of element in probe sequence
   543  				}
   544  				g := t.groups.group(typ, seq.offset)
   545  				m := g.ctrls().matchEmptyOrDeleted()
   546  				if m != 0 { // must be deleted, not empty, as we haven't found our key yet
   547  					// Mark this group's tombstone as required.
   548  					needed[seq.offset/32] |= 1 << (seq.offset % 32)
   549  				}
   550  			}
   551  		}
   552  		if g.ctrls().matchEmpty() != 0 {
   553  			// Also mark non-tombstone-containing groups, so we don't try
   554  			// to remove tombstones from them below.
   555  			needed[i/32] |= 1 << (i % 32)
   556  		}
   557  	}
   558  
   559  	// First, see if we can remove enough tombstones to restore capacity.
   560  	// This function is O(n), so only remove tombstones if we can remove
   561  	// enough of them to justify the O(n) cost.
   562  	cnt := 0
   563  	for i := uint64(0); i <= t.groups.lengthMask; i++ {
   564  		if needed[i/32]>>(i%32)&1 != 0 {
   565  			continue
   566  		}
   567  		g := t.groups.group(typ, i)
   568  		m := g.ctrls().matchEmptyOrDeleted() // must be deleted
   569  		cnt += m.count()
   570  	}
   571  	if cnt*10 < int(t.capacity) { // Can we restore 10% of capacity?
   572  		return // don't bother removing tombstones. Caller will grow instead.
   573  	}
   574  
   575  	// Prune unneeded tombstones.
   576  	for i := uint64(0); i <= t.groups.lengthMask; i++ {
   577  		if needed[i/32]>>(i%32)&1 != 0 {
   578  			continue
   579  		}
   580  		g := t.groups.group(typ, i)
   581  		m := g.ctrls().matchEmptyOrDeleted() // must be deleted
   582  		for m != 0 {
   583  			k := m.first()
   584  			m = m.removeFirst()
   585  			g.ctrls().set(k, ctrlEmpty)
   586  			t.growthLeft++
   587  		}
   588  		// TODO: maybe we could convert all slots at once
   589  		// using some bitvector trickery.
   590  	}
   591  }
   592  
   593  // tombstones returns the number of deleted (tombstone) entries in the table. A
   594  // tombstone is a slot that has been deleted but is still considered occupied
   595  // so as not to violate the probing invariant.
   596  func (t *table) tombstones() uint16 {
   597  	return (t.capacity*maxAvgGroupLoad)/abi.MapGroupSlots - t.used - t.growthLeft
   598  }
   599  
   600  // Clear deletes all entries from the table resulting in an empty table.
   601  func (t *table) Clear(typ *abi.MapType) {
   602  	mgl := t.maxGrowthLeft()
   603  	if t.used == 0 && t.growthLeft == mgl { // no current entries and no tombstones
   604  		return
   605  	}
   606  	// We only want to do the work of clearing slots
   607  	// if they are full. But we also don't want to do too
   608  	// much work to figure out whether a slot is full or not,
   609  	// especially if clearing a slot is cheap.
   610  	//  1) We decide group-by-group instead of slot-by-slot.
   611  	//     If any slot in a group is full, we zero the whole group.
   612  	//  2) If groups are unlikely to be empty, don't bother
   613  	//     testing for it.
   614  	//  3) If groups are 50%/50% likely to be empty, also don't
   615  	//     bother testing, as it confuses the branch predictor. See #75097.
   616  	//  4) But if a group is really large, do the test anyway, as
   617  	//     clearing is expensive.
   618  	fullTest := uint64(t.used)*4 <= t.groups.lengthMask // less than ~0.25 entries per group -> >3/4 empty groups
   619  	if goexperiment.MapSplitGroup {
   620  		if (typ.KeyStride + typ.ElemStride) > 32 {
   621  			// For large slots, it is always worth doing the test first.
   622  			fullTest = true
   623  		}
   624  	} else {
   625  		if typ.KeyStride > 32 { // KeyStride == SlotSize in interleaved layout
   626  			// For large slots, it is always worth doing the test first.
   627  			fullTest = true
   628  		}
   629  	}
   630  	if fullTest {
   631  		for i := uint64(0); i <= t.groups.lengthMask; i++ {
   632  			g := t.groups.group(typ, i)
   633  			if g.ctrls().anyFull() {
   634  				typedmemclr(typ.Group, g.data)
   635  			}
   636  			g.ctrls().setEmpty()
   637  		}
   638  	} else {
   639  		for i := uint64(0); i <= t.groups.lengthMask; i++ {
   640  			g := t.groups.group(typ, i)
   641  			typedmemclr(typ.Group, g.data)
   642  			g.ctrls().setEmpty()
   643  		}
   644  	}
   645  	t.used = 0
   646  	t.growthLeft = mgl
   647  }
   648  
   649  type Iter struct {
   650  	key  unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
   651  	elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
   652  	typ  *abi.MapType
   653  	m    *Map
   654  
   655  	// Randomize iteration order by starting iteration at a random slot
   656  	// offset. The offset into the directory uses a separate offset, as it
   657  	// must adjust when the directory grows.
   658  	entryOffset uint64
   659  	dirOffset   uint64
   660  
   661  	// Snapshot of Map.clearSeq at iteration initialization time. Used to
   662  	// detect clear during iteration.
   663  	clearSeq uint64
   664  
   665  	// Value of Map.globalDepth during the last call to Next. Used to
   666  	// detect directory grow during iteration.
   667  	globalDepth uint8
   668  
   669  	// dirIdx is the current directory index, prior to adjustment by
   670  	// dirOffset.
   671  	dirIdx int
   672  
   673  	// tab is the table at dirIdx during the previous call to Next.
   674  	tab *table
   675  
   676  	// group is the group at entryIdx during the previous call to Next.
   677  	group groupReference
   678  
   679  	// entryIdx is the current entry index, prior to adjustment by entryOffset.
   680  	// The lower 3 bits of the index are the slot index, and the upper bits
   681  	// are the group index.
   682  	entryIdx uint64
   683  }
   684  
   685  // Init initializes Iter for iteration.
   686  func (it *Iter) Init(typ *abi.MapType, m *Map) {
   687  	it.typ = typ
   688  
   689  	if m == nil || m.used == 0 {
   690  		return
   691  	}
   692  
   693  	dirIdx := 0
   694  	var groupSmall groupReference
   695  	if m.dirLen <= 0 {
   696  		// Use dirIdx == -1 as sentinel for small maps.
   697  		dirIdx = -1
   698  		groupSmall.data = m.dirPtr
   699  	}
   700  
   701  	it.m = m
   702  	it.entryOffset = rand()
   703  	it.dirOffset = rand()
   704  	it.globalDepth = m.globalDepth
   705  	it.dirIdx = dirIdx
   706  	it.group = groupSmall
   707  	it.clearSeq = m.clearSeq
   708  }
   709  
   710  func (it *Iter) Initialized() bool {
   711  	return it.typ != nil
   712  }
   713  
   714  // Map returns the map this iterator is iterating over.
   715  func (it *Iter) Map() *Map {
   716  	return it.m
   717  }
   718  
   719  // Key returns a pointer to the current key. nil indicates end of iteration.
   720  //
   721  // Must not be called prior to Next.
   722  func (it *Iter) Key() unsafe.Pointer {
   723  	return it.key
   724  }
   725  
   726  // Elem returns a pointer to the current element. nil indicates end of
   727  // iteration.
   728  //
   729  // Must not be called prior to Next.
   730  func (it *Iter) Elem() unsafe.Pointer {
   731  	return it.elem
   732  }
   733  
   734  func (it *Iter) nextDirIdx() {
   735  	// Skip other entries in the directory that refer to the same
   736  	// logical table. There are two cases of this:
   737  	//
   738  	// Consider this directory:
   739  	//
   740  	// - 0: *t1
   741  	// - 1: *t1
   742  	// - 2: *t2a
   743  	// - 3: *t2b
   744  	//
   745  	// At some point, the directory grew to accommodate a split of
   746  	// t2. t1 did not split, so entries 0 and 1 both point to t1.
   747  	// t2 did split, so the two halves were installed in entries 2
   748  	// and 3.
   749  	//
   750  	// If dirIdx is 0 and it.tab is t1, then we should skip past
   751  	// entry 1 to avoid repeating t1.
   752  	//
   753  	// If dirIdx is 2 and it.tab is t2 (pre-split), then we should
   754  	// skip past entry 3 because our pre-split t2 already covers
   755  	// all keys from t2a and t2b (except for new insertions, which
   756  	// iteration need not return).
   757  	//
   758  	// We can achieve both of these by using to difference between
   759  	// the directory and table depth to compute how many entries
   760  	// the table covers.
   761  	entries := 1 << (it.m.globalDepth - it.tab.localDepth)
   762  	it.dirIdx += entries
   763  	it.tab = nil
   764  	it.group = groupReference{}
   765  	it.entryIdx = 0
   766  }
   767  
   768  // Return the appropriate key/elem for key at slotIdx index within it.group, if
   769  // any.
   770  func (it *Iter) grownKeyElem(key unsafe.Pointer, slotIdx uintptr) (unsafe.Pointer, unsafe.Pointer, bool) {
   771  	newKey, newElem, ok := it.m.getWithKey(it.typ, key)
   772  	if !ok {
   773  		// Key has likely been deleted, and
   774  		// should be skipped.
   775  		//
   776  		// One exception is keys that don't
   777  		// compare equal to themselves (e.g.,
   778  		// NaN). These keys cannot be looked
   779  		// up, so getWithKey will fail even if
   780  		// the key exists.
   781  		//
   782  		// However, we are in luck because such
   783  		// keys cannot be updated and they
   784  		// cannot be deleted except with clear.
   785  		// Thus if no clear has occurred, the
   786  		// key/elem must still exist exactly as
   787  		// in the old groups, so we can return
   788  		// them from there.
   789  		//
   790  		// TODO(prattmic): Consider checking
   791  		// clearSeq early. If a clear occurred,
   792  		// Next could always return
   793  		// immediately, as iteration doesn't
   794  		// need to return anything added after
   795  		// clear.
   796  		if it.clearSeq == it.m.clearSeq && !it.typ.Key.Equal(key, key) {
   797  			elem := it.group.elem(it.typ, slotIdx)
   798  			if it.typ.IndirectElem() {
   799  				elem = *((*unsafe.Pointer)(elem))
   800  			}
   801  			return key, elem, true
   802  		}
   803  
   804  		// This entry doesn't exist anymore.
   805  		return nil, nil, false
   806  	}
   807  
   808  	return newKey, newElem, true
   809  }
   810  
   811  // Next proceeds to the next element in iteration, which can be accessed via
   812  // the Key and Elem methods.
   813  //
   814  // The table can be mutated during iteration, though there is no guarantee that
   815  // the mutations will be visible to the iteration.
   816  //
   817  // Init must be called prior to Next.
   818  func (it *Iter) Next() {
   819  	if it.m == nil {
   820  		// Map was empty at Iter.Init.
   821  		it.key = nil
   822  		it.elem = nil
   823  		return
   824  	}
   825  
   826  	if it.m.writing != 0 {
   827  		fatal("concurrent map iteration and map write")
   828  		return
   829  	}
   830  
   831  	if it.dirIdx < 0 {
   832  		// Map was small at Init.
   833  		for ; it.entryIdx < abi.MapGroupSlots; it.entryIdx++ {
   834  			k := uintptr(it.entryIdx+it.entryOffset) % abi.MapGroupSlots
   835  
   836  			if (it.group.ctrls().get(k) & ctrlEmpty) == ctrlEmpty {
   837  				// Empty or deleted.
   838  				continue
   839  			}
   840  
   841  			key := it.group.key(it.typ, k)
   842  			if it.typ.IndirectKey() {
   843  				key = *((*unsafe.Pointer)(key))
   844  			}
   845  
   846  			// As below, if we have grown to a full map since Init,
   847  			// we continue to use the old group to decide the keys
   848  			// to return, but must look them up again in the new
   849  			// tables.
   850  			grown := it.m.dirLen > 0
   851  			var elem unsafe.Pointer
   852  			if grown {
   853  				var ok bool
   854  				newKey, newElem, ok := it.m.getWithKey(it.typ, key)
   855  				if !ok {
   856  					// See comment below.
   857  					if it.clearSeq == it.m.clearSeq && !it.typ.Key.Equal(key, key) {
   858  						elem = it.group.elem(it.typ, k)
   859  						if it.typ.IndirectElem() {
   860  							elem = *((*unsafe.Pointer)(elem))
   861  						}
   862  					} else {
   863  						continue
   864  					}
   865  				} else {
   866  					key = newKey
   867  					elem = newElem
   868  				}
   869  			} else {
   870  				elem = it.group.elem(it.typ, k)
   871  				if it.typ.IndirectElem() {
   872  					elem = *((*unsafe.Pointer)(elem))
   873  				}
   874  			}
   875  
   876  			it.entryIdx++
   877  			it.key = key
   878  			it.elem = elem
   879  			return
   880  		}
   881  		it.key = nil
   882  		it.elem = nil
   883  		return
   884  	}
   885  
   886  	if it.globalDepth != it.m.globalDepth {
   887  		// Directory has grown since the last call to Next. Adjust our
   888  		// directory index.
   889  		//
   890  		// Consider:
   891  		//
   892  		// Before:
   893  		// - 0: *t1
   894  		// - 1: *t2  <- dirIdx
   895  		//
   896  		// After:
   897  		// - 0: *t1a (split)
   898  		// - 1: *t1b (split)
   899  		// - 2: *t2  <- dirIdx
   900  		// - 3: *t2
   901  		//
   902  		// That is, we want to double the current index when the
   903  		// directory size doubles (or quadruple when the directory size
   904  		// quadruples, etc).
   905  		//
   906  		// The actual (randomized) dirIdx is computed below as:
   907  		//
   908  		// dirIdx := (it.dirIdx + it.dirOffset) % it.m.dirLen
   909  		//
   910  		// Multiplication is associative across modulo operations,
   911  		// A * (B % C) = (A * B) % (A * C),
   912  		// provided that A is positive.
   913  		//
   914  		// Thus we can achieve this by adjusting it.dirIdx,
   915  		// it.dirOffset, and it.m.dirLen individually.
   916  		orders := it.m.globalDepth - it.globalDepth
   917  		it.dirIdx <<= orders
   918  		it.dirOffset <<= orders
   919  		// it.m.dirLen was already adjusted when the directory grew.
   920  
   921  		it.globalDepth = it.m.globalDepth
   922  	}
   923  
   924  	// Continue iteration until we find a full slot.
   925  	for ; it.dirIdx < it.m.dirLen; it.nextDirIdx() {
   926  		// Resolve the table.
   927  		if it.tab == nil {
   928  			dirIdx := int((uint64(it.dirIdx) + it.dirOffset) & uint64(it.m.dirLen-1))
   929  			newTab := it.m.directoryAt(uintptr(dirIdx))
   930  			if newTab.index != dirIdx {
   931  				// Normally we skip past all duplicates of the
   932  				// same entry in the table (see updates to
   933  				// it.dirIdx at the end of the loop below), so
   934  				// this case wouldn't occur.
   935  				//
   936  				// But on the very first call, we have a
   937  				// completely randomized dirIdx that may refer
   938  				// to a middle of a run of tables in the
   939  				// directory. Do a one-time adjustment of the
   940  				// offset to ensure we start at first index for
   941  				// newTable.
   942  				diff := dirIdx - newTab.index
   943  				it.dirOffset -= uint64(diff)
   944  				dirIdx = newTab.index
   945  			}
   946  			it.tab = newTab
   947  		}
   948  
   949  		// N.B. Use it.tab, not newTab. It is important to use the old
   950  		// table for key selection if the table has grown. See comment
   951  		// on grown below.
   952  
   953  		entryMask := uint64(it.tab.capacity) - 1
   954  		if it.entryIdx > entryMask {
   955  			// Continue to next table.
   956  			continue
   957  		}
   958  
   959  		// Fast path: skip matching and directly check if entryIdx is a
   960  		// full slot.
   961  		//
   962  		// In the slow path below, we perform an 8-slot match check to
   963  		// look for full slots within the group.
   964  		//
   965  		// However, with a max load factor of 7/8, each slot in a
   966  		// mostly full map has a high probability of being full. Thus
   967  		// it is cheaper to check a single slot than do a full control
   968  		// match.
   969  
   970  		entryIdx := (it.entryIdx + it.entryOffset) & entryMask
   971  		slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
   972  		if slotIdx == 0 || it.group.data == nil {
   973  			// Only compute the group (a) when we switch
   974  			// groups (slotIdx rolls over) and (b) on the
   975  			// first iteration in this table (slotIdx may
   976  			// not be zero due to entryOffset).
   977  			groupIdx := entryIdx >> abi.MapGroupSlotsBits
   978  			it.group = it.tab.groups.group(it.typ, groupIdx)
   979  		}
   980  
   981  		if (it.group.ctrls().get(slotIdx) & ctrlEmpty) == 0 {
   982  			// Slot full.
   983  
   984  			key := it.group.key(it.typ, slotIdx)
   985  			if it.typ.IndirectKey() {
   986  				key = *((*unsafe.Pointer)(key))
   987  			}
   988  
   989  			grown := it.tab.index == -1
   990  			var elem unsafe.Pointer
   991  			if grown {
   992  				newKey, newElem, ok := it.grownKeyElem(key, slotIdx)
   993  				if !ok {
   994  					// This entry doesn't exist
   995  					// anymore. Continue to the
   996  					// next one.
   997  					goto next
   998  				} else {
   999  					key = newKey
  1000  					elem = newElem
  1001  				}
  1002  			} else {
  1003  				elem = it.group.elem(it.typ, slotIdx)
  1004  				if it.typ.IndirectElem() {
  1005  					elem = *((*unsafe.Pointer)(elem))
  1006  				}
  1007  			}
  1008  
  1009  			it.entryIdx++
  1010  			it.key = key
  1011  			it.elem = elem
  1012  			return
  1013  		}
  1014  
  1015  	next:
  1016  		it.entryIdx++
  1017  
  1018  		// Slow path: use a match on the control word to jump ahead to
  1019  		// the next full slot.
  1020  		//
  1021  		// This is highly effective for maps with particularly low load
  1022  		// (e.g., map allocated with large hint but few insertions).
  1023  		//
  1024  		// For maps with medium load (e.g., 3-4 empty slots per group)
  1025  		// it also tends to work pretty well. Since slots within a
  1026  		// group are filled in order, then if there have been no
  1027  		// deletions, a match will allow skipping past all empty slots
  1028  		// at once.
  1029  		//
  1030  		// Note: it is tempting to cache the group match result in the
  1031  		// iterator to use across Next calls. However because entries
  1032  		// may be deleted between calls later calls would still need to
  1033  		// double-check the control value.
  1034  
  1035  		var groupMatch bitset
  1036  		for it.entryIdx <= entryMask {
  1037  			entryIdx := (it.entryIdx + it.entryOffset) & entryMask
  1038  			slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
  1039  
  1040  			if slotIdx == 0 || it.group.data == nil {
  1041  				// Only compute the group (a) when we switch
  1042  				// groups (slotIdx rolls over) and (b) on the
  1043  				// first iteration in this table (slotIdx may
  1044  				// not be zero due to entryOffset).
  1045  				groupIdx := entryIdx >> abi.MapGroupSlotsBits
  1046  				it.group = it.tab.groups.group(it.typ, groupIdx)
  1047  			}
  1048  
  1049  			if groupMatch == 0 {
  1050  				groupMatch = it.group.ctrls().matchFull()
  1051  
  1052  				if slotIdx != 0 {
  1053  					// Starting in the middle of the group.
  1054  					// Ignore earlier groups.
  1055  					groupMatch = groupMatch.removeBelow(slotIdx)
  1056  				}
  1057  
  1058  				// Skip over groups that are composed of only empty or
  1059  				// deleted slots.
  1060  				if groupMatch == 0 {
  1061  					// Jump past remaining slots in this
  1062  					// group.
  1063  					it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
  1064  					continue
  1065  				}
  1066  
  1067  				i := groupMatch.first()
  1068  				it.entryIdx += uint64(i - slotIdx)
  1069  				if it.entryIdx > entryMask {
  1070  					// Past the end of this table's iteration.
  1071  					continue
  1072  				}
  1073  				entryIdx += uint64(i - slotIdx)
  1074  				slotIdx = i
  1075  			}
  1076  
  1077  			key := it.group.key(it.typ, slotIdx)
  1078  			if it.typ.IndirectKey() {
  1079  				key = *((*unsafe.Pointer)(key))
  1080  			}
  1081  
  1082  			// If the table has changed since the last
  1083  			// call, then it has grown or split. In this
  1084  			// case, further mutations (changes to
  1085  			// key->elem or deletions) will not be visible
  1086  			// in our snapshot table. Instead we must
  1087  			// consult the new table by doing a full
  1088  			// lookup.
  1089  			//
  1090  			// We still use our old table to decide which
  1091  			// keys to lookup in order to avoid returning
  1092  			// the same key twice.
  1093  			grown := it.tab.index == -1
  1094  			var elem unsafe.Pointer
  1095  			if grown {
  1096  				newKey, newElem, ok := it.grownKeyElem(key, slotIdx)
  1097  				if !ok {
  1098  					// This entry doesn't exist anymore.
  1099  					// Continue to the next one.
  1100  					groupMatch = groupMatch.removeFirst()
  1101  					if groupMatch == 0 {
  1102  						// No more entries in this
  1103  						// group. Continue to next
  1104  						// group.
  1105  						it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
  1106  						continue
  1107  					}
  1108  
  1109  					// Next full slot.
  1110  					i := groupMatch.first()
  1111  					it.entryIdx += uint64(i - slotIdx)
  1112  					continue
  1113  				} else {
  1114  					key = newKey
  1115  					elem = newElem
  1116  				}
  1117  			} else {
  1118  				elem = it.group.elem(it.typ, slotIdx)
  1119  				if it.typ.IndirectElem() {
  1120  					elem = *((*unsafe.Pointer)(elem))
  1121  				}
  1122  			}
  1123  
  1124  			// Jump ahead to the next full slot or next group.
  1125  			groupMatch = groupMatch.removeFirst()
  1126  			if groupMatch == 0 {
  1127  				// No more entries in
  1128  				// this group. Continue
  1129  				// to next group.
  1130  				it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
  1131  			} else {
  1132  				// Next full slot.
  1133  				i := groupMatch.first()
  1134  				it.entryIdx += uint64(i - slotIdx)
  1135  			}
  1136  
  1137  			it.key = key
  1138  			it.elem = elem
  1139  			return
  1140  		}
  1141  
  1142  		// Continue to next table.
  1143  	}
  1144  
  1145  	it.key = nil
  1146  	it.elem = nil
  1147  	return
  1148  }
  1149  
  1150  // Replaces the table with one larger table or two split tables to fit more
  1151  // entries. Since the table is replaced, t is now stale and should not be
  1152  // modified.
  1153  func (t *table) rehash(typ *abi.MapType, m *Map) {
  1154  	// TODO(prattmic): SwissTables typically perform a "rehash in place"
  1155  	// operation which recovers capacity consumed by tombstones without growing
  1156  	// the table by reordering slots as necessary to maintain the probe
  1157  	// invariant while eliminating all tombstones.
  1158  	//
  1159  	// However, it is unclear how to make rehash in place work with
  1160  	// iteration. Since iteration simply walks through all slots in order
  1161  	// (with random start offset), reordering the slots would break
  1162  	// iteration.
  1163  	//
  1164  	// As an alternative, we could do a "resize" to new groups allocation
  1165  	// of the same size. This would eliminate the tombstones, but using a
  1166  	// new allocation, so the existing grow support in iteration would
  1167  	// continue to work.
  1168  
  1169  	newCapacity := 2 * t.capacity
  1170  	if newCapacity <= maxTableCapacity {
  1171  		t.grow(typ, m, newCapacity)
  1172  		return
  1173  	}
  1174  
  1175  	t.split(typ, m)
  1176  }
  1177  
  1178  // Bitmask for the last selection bit at this depth.
  1179  func localDepthMask(localDepth uint8) uintptr {
  1180  	if !Use64BitHash {
  1181  		return uintptr(1) << (32 - localDepth)
  1182  	}
  1183  	return uintptr(1) << (64 - localDepth)
  1184  }
  1185  
  1186  // split the table into two, installing the new tables in the map directory.
  1187  func (t *table) split(typ *abi.MapType, m *Map) {
  1188  	localDepth := t.localDepth
  1189  	localDepth++
  1190  
  1191  	// TODO: is this the best capacity?
  1192  	left := newTable(typ, maxTableCapacity, -1, localDepth)
  1193  	right := newTable(typ, maxTableCapacity, -1, localDepth)
  1194  
  1195  	// Split in half at the localDepth bit from the top.
  1196  	mask := localDepthMask(localDepth)
  1197  
  1198  	for i := uint64(0); i <= t.groups.lengthMask; i++ {
  1199  		g := t.groups.group(typ, i)
  1200  		for j := uintptr(0); j < abi.MapGroupSlots; j++ {
  1201  			if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
  1202  				// Empty or deleted
  1203  				continue
  1204  			}
  1205  
  1206  			key := g.key(typ, j)
  1207  			if typ.IndirectKey() {
  1208  				key = *((*unsafe.Pointer)(key))
  1209  			}
  1210  
  1211  			elem := g.elem(typ, j)
  1212  			if typ.IndirectElem() {
  1213  				elem = *((*unsafe.Pointer)(elem))
  1214  			}
  1215  
  1216  			hash := typ.Hasher(key, m.seed)
  1217  			var newTable *table
  1218  			if hash&mask == 0 {
  1219  				newTable = left
  1220  			} else {
  1221  				newTable = right
  1222  			}
  1223  			newTable.uncheckedPutSlot(typ, hash, key, elem)
  1224  		}
  1225  	}
  1226  
  1227  	m.installTableSplit(t, left, right)
  1228  	t.index = -1
  1229  }
  1230  
  1231  // grow the capacity of the table by allocating a new table with a bigger array
  1232  // and uncheckedPutting each element of the table into the new table (we know
  1233  // that no insertion here will Put an already-present value), and discard the
  1234  // old table.
  1235  func (t *table) grow(typ *abi.MapType, m *Map, newCapacity uint16) {
  1236  	newTable := newTable(typ, uint64(newCapacity), t.index, t.localDepth)
  1237  
  1238  	if t.capacity > 0 {
  1239  		for i := uint64(0); i <= t.groups.lengthMask; i++ {
  1240  			g := t.groups.group(typ, i)
  1241  			for j := uintptr(0); j < abi.MapGroupSlots; j++ {
  1242  				if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
  1243  					// Empty or deleted
  1244  					continue
  1245  				}
  1246  
  1247  				key := g.key(typ, j)
  1248  				if typ.IndirectKey() {
  1249  					key = *((*unsafe.Pointer)(key))
  1250  				}
  1251  
  1252  				elem := g.elem(typ, j)
  1253  				if typ.IndirectElem() {
  1254  					elem = *((*unsafe.Pointer)(elem))
  1255  				}
  1256  
  1257  				hash := typ.Hasher(key, m.seed)
  1258  
  1259  				newTable.uncheckedPutSlot(typ, hash, key, elem)
  1260  			}
  1261  		}
  1262  	}
  1263  
  1264  	newTable.checkInvariants(typ, m)
  1265  	m.replaceTable(newTable)
  1266  	t.index = -1
  1267  }
  1268  
  1269  // probeSeq maintains the state for a probe sequence that iterates through the
  1270  // groups in a table. The sequence is a triangular progression of the form
  1271  // hash, hash + 1, hash + 1 + 2, hash + 1 + 2 + 3, ..., modulo mask + 1.
  1272  // The i-th term of the sequence is
  1273  //
  1274  //	p(i) := hash + (i^2 + i)/2 (mod mask+1)
  1275  //
  1276  // The sequence effectively outputs the indexes of *groups*. The group
  1277  // machinery allows us to check an entire group with minimal branching.
  1278  //
  1279  // It turns out that this probe sequence visits every group exactly once if
  1280  // the number of groups is a power of two, since (i^2+i)/2 is a bijection in
  1281  // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
  1282  type probeSeq struct {
  1283  	mask   uint64
  1284  	offset uint64
  1285  	index  uint64
  1286  }
  1287  
  1288  func makeProbeSeq(hash uintptr, mask uint64) probeSeq {
  1289  	return probeSeq{
  1290  		mask:   mask,
  1291  		offset: uint64(hash) & mask,
  1292  		index:  0,
  1293  	}
  1294  }
  1295  
  1296  func (s probeSeq) next() probeSeq {
  1297  	s.index++
  1298  	s.offset = (s.offset + s.index) & s.mask
  1299  	return s
  1300  }
  1301  
  1302  func (t *table) clone(typ *abi.MapType) *table {
  1303  	// Shallow copy the table structure.
  1304  	t2 := new(table)
  1305  	*t2 = *t
  1306  	t = t2
  1307  
  1308  	// We need to just deep copy the groups.data field.
  1309  	oldGroups := t.groups
  1310  	newGroups := newGroups(typ, oldGroups.lengthMask+1)
  1311  	for i := uint64(0); i <= oldGroups.lengthMask; i++ {
  1312  		oldGroup := oldGroups.group(typ, i)
  1313  		newGroup := newGroups.group(typ, i)
  1314  		cloneGroup(typ, newGroup, oldGroup)
  1315  	}
  1316  	t.groups = newGroups
  1317  
  1318  	return t
  1319  }
  1320  

View as plain text