Source file src/internal/runtime/maps/runtime_fast64.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package maps
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goexperiment"
    10  	"internal/race"
    11  	"internal/runtime/sys"
    12  	"unsafe"
    13  )
    14  
    15  //go:linkname runtime_mapaccess1_fast64 runtime.mapaccess1_fast64
    16  func runtime_mapaccess1_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Pointer {
    17  	p, _ := runtime_mapaccess2_fast64(typ, m, key)
    18  	return p
    19  }
    20  
    21  //go:linkname runtime_mapaccess2_fast64 runtime.mapaccess2_fast64
    22  func runtime_mapaccess2_fast64(typ *abi.MapType, m *Map, key uint64) (unsafe.Pointer, bool) {
    23  	if race.Enabled && m != nil {
    24  		callerpc := sys.GetCallerPC()
    25  		pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast64)
    26  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    27  	}
    28  
    29  	if m == nil || m.Used() == 0 {
    30  		return unsafe.Pointer(&zeroVal[0]), false
    31  	}
    32  
    33  	if m.writing != 0 {
    34  		fatal("concurrent map read and map write")
    35  		return nil, false
    36  	}
    37  
    38  	if m.dirLen == 0 {
    39  		g := groupReference{
    40  			data: m.dirPtr,
    41  		}
    42  		full := g.ctrls().matchFull()
    43  		slotKey := g.key(typ, 0)
    44  		var keyStride uintptr
    45  		if goexperiment.MapSplitGroup {
    46  			keyStride = 8 // keys are contiguous in split layout
    47  		} else {
    48  			keyStride = typ.KeyStride // == SlotSize in interleaved layout
    49  		}
    50  		var i uintptr
    51  		for full != 0 {
    52  			if key == *(*uint64)(slotKey) && full.lowestSet() {
    53  				if goexperiment.MapSplitGroup {
    54  					return g.elem(typ, i), true
    55  				} else {
    56  					return unsafe.Pointer(uintptr(slotKey) + 8), true
    57  				}
    58  			}
    59  			slotKey = unsafe.Pointer(uintptr(slotKey) + keyStride)
    60  			full = full.shiftOutLowest()
    61  			i++
    62  		}
    63  		return unsafe.Pointer(&zeroVal[0]), false
    64  	}
    65  
    66  	// See the related comment in runtime_mapaccess2_fast32
    67  	// for why we pass local copy of key.
    68  	k := key
    69  	hash := memhash64(unsafe.Pointer(&k), m.seed)
    70  
    71  	// Select table.
    72  	idx := m.directoryIndex(hash)
    73  	t := m.directoryAt(idx)
    74  
    75  	// Probe table.
    76  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
    77  
    78  	h2Hash := h2(hash)
    79  	for ; ; seq = seq.next() {
    80  		g := t.groups.group(typ, seq.offset)
    81  
    82  		match := g.ctrls().matchH2(h2Hash)
    83  
    84  		for match != 0 {
    85  			i := match.first()
    86  
    87  			slotKey := g.key(typ, i)
    88  			if key == *(*uint64)(slotKey) {
    89  				if goexperiment.MapSplitGroup {
    90  					return g.elem(typ, i), true
    91  				} else {
    92  					return unsafe.Pointer(uintptr(slotKey) + 8), true
    93  				}
    94  			}
    95  			match = match.removeFirst()
    96  		}
    97  
    98  		match = g.ctrls().matchEmpty()
    99  		if match != 0 {
   100  			// Finding an empty slot means we've reached the end of
   101  			// the probe sequence.
   102  			return unsafe.Pointer(&zeroVal[0]), false
   103  		}
   104  	}
   105  }
   106  
   107  func (m *Map) putSlotSmallFast64(typ *abi.MapType, hash uintptr, key uint64) unsafe.Pointer {
   108  	g := groupReference{
   109  		data: m.dirPtr,
   110  	}
   111  
   112  	match := g.ctrls().matchH2(h2(hash))
   113  
   114  	// Look for an existing slot containing this key.
   115  	for match != 0 {
   116  		i := match.first()
   117  
   118  		slotKey := g.key(typ, i)
   119  		if key == *(*uint64)(slotKey) {
   120  			slotElem := g.elem(typ, i)
   121  			return slotElem
   122  		}
   123  		match = match.removeFirst()
   124  	}
   125  
   126  	// There can't be deleted slots, small maps can't have them
   127  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   128  	// more efficient than matchEmpty.
   129  	match = g.ctrls().matchEmptyOrDeleted()
   130  	if match == 0 {
   131  		fatal("small map with no empty slot (concurrent map writes?)")
   132  	}
   133  
   134  	i := match.first()
   135  
   136  	slotKey := g.key(typ, i)
   137  	*(*uint64)(slotKey) = key
   138  
   139  	slotElem := g.elem(typ, i)
   140  
   141  	g.ctrls().set(i, ctrl(h2(hash)))
   142  	m.used++
   143  
   144  	return slotElem
   145  }
   146  
   147  //go:linkname runtime_mapassign_fast64 runtime.mapassign_fast64
   148  func runtime_mapassign_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Pointer {
   149  	if m == nil {
   150  		panic(errNilAssign)
   151  	}
   152  	if race.Enabled {
   153  		callerpc := sys.GetCallerPC()
   154  		pc := abi.FuncPCABIInternal(runtime_mapassign_fast64)
   155  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   156  	}
   157  	if m.writing != 0 {
   158  		fatal("concurrent map writes")
   159  	}
   160  
   161  	// See the related comment in runtime_mapaccess2_fast32
   162  	// for why we pass local copy of key.
   163  	k := key
   164  	hash := memhash64(unsafe.Pointer(&k), m.seed)
   165  
   166  	// Set writing after calling Hasher, since Hasher may panic, in which
   167  	// case we have not actually done a write.
   168  	m.writing ^= 1 // toggle, see comment on writing
   169  
   170  	if m.dirPtr == nil {
   171  		m.growToSmall(typ)
   172  	}
   173  
   174  	if m.dirLen == 0 {
   175  		if m.used < abi.MapGroupSlots {
   176  			elem := m.putSlotSmallFast64(typ, hash, key)
   177  
   178  			if m.writing == 0 {
   179  				fatal("concurrent map writes")
   180  			}
   181  			m.writing ^= 1
   182  
   183  			return elem
   184  		}
   185  
   186  		// Can't fit another entry, grow to full size map.
   187  		m.growToTable(typ)
   188  	}
   189  
   190  	var slotElem unsafe.Pointer
   191  outer:
   192  	for {
   193  		// Select table.
   194  		idx := m.directoryIndex(hash)
   195  		t := m.directoryAt(idx)
   196  
   197  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   198  
   199  		// As we look for a match, keep track of the first deleted slot
   200  		// we find, which we'll use to insert the new entry if
   201  		// necessary.
   202  		var firstDeletedGroup groupReference
   203  		var firstDeletedSlot uintptr
   204  
   205  		h2Hash := h2(hash)
   206  		for ; ; seq = seq.next() {
   207  			g := t.groups.group(typ, seq.offset)
   208  			match := g.ctrls().matchH2(h2Hash)
   209  
   210  			// Look for an existing slot containing this key.
   211  			for match != 0 {
   212  				i := match.first()
   213  
   214  				slotKey := g.key(typ, i)
   215  				if key == *(*uint64)(slotKey) {
   216  					slotElem = g.elem(typ, i)
   217  
   218  					t.checkInvariants(typ, m)
   219  					break outer
   220  				}
   221  				match = match.removeFirst()
   222  			}
   223  
   224  			// No existing slot for this key in this group. Is this the end
   225  			// of the probe sequence?
   226  			match = g.ctrls().matchEmptyOrDeleted()
   227  			if match == 0 {
   228  				continue // nothing but filled slots. Keep probing.
   229  			}
   230  			i := match.first()
   231  			if g.ctrls().get(i) == ctrlDeleted {
   232  				// There are some deleted slots. Remember
   233  				// the first one, and keep probing.
   234  				if firstDeletedGroup.data == nil {
   235  					firstDeletedGroup = g
   236  					firstDeletedSlot = i
   237  				}
   238  				continue
   239  			}
   240  			// We've found an empty slot, which means we've reached the end of
   241  			// the probe sequence.
   242  
   243  			// If we found a deleted slot along the way, we can
   244  			// replace it without consuming growthLeft.
   245  			if firstDeletedGroup.data != nil {
   246  				g = firstDeletedGroup
   247  				i = firstDeletedSlot
   248  				t.growthLeft++ // will be decremented below to become a no-op.
   249  			}
   250  
   251  			// If we have no space left, first try to remove some tombstones.
   252  			if t.growthLeft == 0 {
   253  				t.pruneTombstones(typ, m)
   254  			}
   255  
   256  			// If there is room left to grow, just insert the new entry.
   257  			if t.growthLeft > 0 {
   258  				slotKey := g.key(typ, i)
   259  				*(*uint64)(slotKey) = key
   260  
   261  				slotElem = g.elem(typ, i)
   262  
   263  				g.ctrls().set(i, ctrl(h2Hash))
   264  				t.growthLeft--
   265  				t.used++
   266  				m.used++
   267  
   268  				t.checkInvariants(typ, m)
   269  				break outer
   270  			}
   271  
   272  			t.rehash(typ, m)
   273  			continue outer
   274  		}
   275  	}
   276  
   277  	if m.writing == 0 {
   278  		fatal("concurrent map writes")
   279  	}
   280  	m.writing ^= 1
   281  
   282  	return slotElem
   283  }
   284  
   285  func (m *Map) putSlotSmallFastPtr(typ *abi.MapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
   286  	g := groupReference{
   287  		data: m.dirPtr,
   288  	}
   289  
   290  	match := g.ctrls().matchH2(h2(hash))
   291  
   292  	// Look for an existing slot containing this key.
   293  	for match != 0 {
   294  		i := match.first()
   295  
   296  		slotKey := g.key(typ, i)
   297  		if key == *(*unsafe.Pointer)(slotKey) {
   298  			slotElem := g.elem(typ, i)
   299  			return slotElem
   300  		}
   301  		match = match.removeFirst()
   302  	}
   303  
   304  	// There can't be deleted slots, small maps can't have them
   305  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   306  	// more efficient than matchEmpty.
   307  	match = g.ctrls().matchEmptyOrDeleted()
   308  	if match == 0 {
   309  		fatal("small map with no empty slot (concurrent map writes?)")
   310  	}
   311  
   312  	i := match.first()
   313  
   314  	slotKey := g.key(typ, i)
   315  	*(*unsafe.Pointer)(slotKey) = key
   316  
   317  	slotElem := g.elem(typ, i)
   318  
   319  	g.ctrls().set(i, ctrl(h2(hash)))
   320  	m.used++
   321  
   322  	return slotElem
   323  }
   324  
   325  // Key is a 64-bit pointer (only called on 64-bit GOARCH).
   326  //
   327  //go:linkname runtime_mapassign_fast64ptr runtime.mapassign_fast64ptr
   328  func runtime_mapassign_fast64ptr(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
   329  	if m == nil {
   330  		panic(errNilAssign)
   331  	}
   332  	if race.Enabled {
   333  		callerpc := sys.GetCallerPC()
   334  		pc := abi.FuncPCABIInternal(runtime_mapassign_fast64ptr)
   335  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   336  	}
   337  	if m.writing != 0 {
   338  		fatal("concurrent map writes")
   339  	}
   340  
   341  	// See the related comment in runtime_mapaccess2_fast32
   342  	// for why we pass local copy of key.
   343  	k := key
   344  	hash := memhash64(unsafe.Pointer(&k), m.seed)
   345  
   346  	// Set writing after calling Hasher, since Hasher may panic, in which
   347  	// case we have not actually done a write.
   348  	m.writing ^= 1 // toggle, see comment on writing
   349  
   350  	if m.dirPtr == nil {
   351  		m.growToSmall(typ)
   352  	}
   353  
   354  	if m.dirLen == 0 {
   355  		if m.used < abi.MapGroupSlots {
   356  			elem := m.putSlotSmallFastPtr(typ, hash, key)
   357  
   358  			if m.writing == 0 {
   359  				fatal("concurrent map writes")
   360  			}
   361  			m.writing ^= 1
   362  
   363  			return elem
   364  		}
   365  
   366  		// Can't fit another entry, grow to full size map.
   367  		m.growToTable(typ)
   368  	}
   369  
   370  	var slotElem unsafe.Pointer
   371  outer:
   372  	for {
   373  		// Select table.
   374  		idx := m.directoryIndex(hash)
   375  		t := m.directoryAt(idx)
   376  
   377  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   378  
   379  		// As we look for a match, keep track of the first deleted slot
   380  		// we find, which we'll use to insert the new entry if
   381  		// necessary.
   382  		var firstDeletedGroup groupReference
   383  		var firstDeletedSlot uintptr
   384  
   385  		h2Hash := h2(hash)
   386  		for ; ; seq = seq.next() {
   387  			g := t.groups.group(typ, seq.offset)
   388  			match := g.ctrls().matchH2(h2Hash)
   389  
   390  			// Look for an existing slot containing this key.
   391  			for match != 0 {
   392  				i := match.first()
   393  
   394  				slotKey := g.key(typ, i)
   395  				if key == *(*unsafe.Pointer)(slotKey) {
   396  					slotElem = g.elem(typ, i)
   397  
   398  					t.checkInvariants(typ, m)
   399  					break outer
   400  				}
   401  				match = match.removeFirst()
   402  			}
   403  
   404  			// No existing slot for this key in this group. Is this the end
   405  			// of the probe sequence?
   406  			match = g.ctrls().matchEmptyOrDeleted()
   407  			if match == 0 {
   408  				continue // nothing but filled slots. Keep probing.
   409  			}
   410  			i := match.first()
   411  			if g.ctrls().get(i) == ctrlDeleted {
   412  				// There are some deleted slots. Remember
   413  				// the first one, and keep probing.
   414  				if firstDeletedGroup.data == nil {
   415  					firstDeletedGroup = g
   416  					firstDeletedSlot = i
   417  				}
   418  				continue
   419  			}
   420  			// We've found an empty slot, which means we've reached the end of
   421  			// the probe sequence.
   422  
   423  			// If we found a deleted slot along the way, we can
   424  			// replace it without consuming growthLeft.
   425  			if firstDeletedGroup.data != nil {
   426  				g = firstDeletedGroup
   427  				i = firstDeletedSlot
   428  				t.growthLeft++ // will be decremented below to become a no-op.
   429  			}
   430  
   431  			// If there is room left to grow, just insert the new entry.
   432  			if t.growthLeft > 0 {
   433  				slotKey := g.key(typ, i)
   434  				*(*unsafe.Pointer)(slotKey) = key
   435  
   436  				slotElem = g.elem(typ, i)
   437  
   438  				g.ctrls().set(i, ctrl(h2Hash))
   439  				t.growthLeft--
   440  				t.used++
   441  				m.used++
   442  
   443  				t.checkInvariants(typ, m)
   444  				break outer
   445  			}
   446  
   447  			t.rehash(typ, m)
   448  			continue outer
   449  		}
   450  	}
   451  
   452  	if m.writing == 0 {
   453  		fatal("concurrent map writes")
   454  	}
   455  	m.writing ^= 1
   456  
   457  	return slotElem
   458  }
   459  
   460  //go:linkname runtime_mapdelete_fast64 runtime.mapdelete_fast64
   461  func runtime_mapdelete_fast64(typ *abi.MapType, m *Map, key uint64) {
   462  	if race.Enabled {
   463  		callerpc := sys.GetCallerPC()
   464  		pc := abi.FuncPCABIInternal(runtime_mapdelete_fast64)
   465  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   466  	}
   467  
   468  	if m == nil || m.Used() == 0 {
   469  		return
   470  	}
   471  
   472  	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key)))
   473  }
   474  

View as plain text