Source file src/internal/runtime/maps/runtime_fast32.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package maps
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goexperiment"
    10  	"internal/race"
    11  	"internal/runtime/sys"
    12  	"unsafe"
    13  )
    14  
    15  //go:linkname runtime_mapaccess1_fast32 runtime.mapaccess1_fast32
    16  func runtime_mapaccess1_fast32(typ *abi.MapType, m *Map, key uint32) unsafe.Pointer {
    17  	p, _ := runtime_mapaccess2_fast32(typ, m, key)
    18  	return p
    19  }
    20  
    21  //go:linkname runtime_mapaccess2_fast32 runtime.mapaccess2_fast32
    22  func runtime_mapaccess2_fast32(typ *abi.MapType, m *Map, key uint32) (unsafe.Pointer, bool) {
    23  	if race.Enabled && m != nil {
    24  		callerpc := sys.GetCallerPC()
    25  		pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast32)
    26  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    27  	}
    28  
    29  	if m == nil || m.Used() == 0 {
    30  		return unsafe.Pointer(&zeroVal[0]), false
    31  	}
    32  
    33  	if m.writing != 0 {
    34  		fatal("concurrent map read and map write")
    35  		return nil, false
    36  	}
    37  
    38  	if m.dirLen == 0 {
    39  		g := groupReference{
    40  			data: m.dirPtr,
    41  		}
    42  		full := g.ctrls().matchFull()
    43  		slotKey := g.key(typ, 0)
    44  		var keyStride uintptr
    45  		if goexperiment.MapSplitGroup {
    46  			keyStride = 4 // keys are contiguous in split layout
    47  		} else {
    48  			keyStride = typ.KeyStride // == SlotSize in interleaved layout
    49  		}
    50  		var i uintptr
    51  		for full != 0 {
    52  			if key == *(*uint32)(slotKey) && full.lowestSet() {
    53  				if goexperiment.MapSplitGroup {
    54  					return g.elem(typ, i), true
    55  				} else {
    56  					return unsafe.Pointer(uintptr(slotKey) + typ.ElemOff), true
    57  				}
    58  			}
    59  			slotKey = unsafe.Pointer(uintptr(slotKey) + keyStride)
    60  			full = full.shiftOutLowest()
    61  			i++
    62  		}
    63  		return unsafe.Pointer(&zeroVal[0]), false
    64  	}
    65  
    66  	// Don't pass address of the key directly to the hashing function.
    67  	// Hashing functions are implemented in Go assembly and cannot be inlined,
    68  	// so compiler doesn't optimize redundant address taking/dereference.
    69  	//
    70  	// Taking &key makes compiler treat key as address-taken, which forces it to spill on the stack
    71  	// and reload it in the loop.
    72  	// This is suboptimal for performance.
    73  	//
    74  	// Note: Even when we pass k (local copy of key), the compiler still spills the key to the stack.
    75  	// However, from compiler's perspective, key is no longer address-taken and
    76  	// filled back in register before the loop.
    77  	k := key
    78  	hash := memhash32(unsafe.Pointer(&k), m.seed)
    79  
    80  	// Select table.
    81  	idx := m.directoryIndex(hash)
    82  	t := m.directoryAt(idx)
    83  
    84  	// Probe table.
    85  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
    86  	h2Hash := h2(hash)
    87  	for ; ; seq = seq.next() {
    88  		g := t.groups.group(typ, seq.offset)
    89  
    90  		match := g.ctrls().matchH2(h2Hash)
    91  
    92  		for match != 0 {
    93  			i := match.first()
    94  
    95  			slotKey := g.key(typ, i)
    96  			if key == *(*uint32)(slotKey) {
    97  				if goexperiment.MapSplitGroup {
    98  					return g.elem(typ, i), true
    99  				} else {
   100  					return unsafe.Pointer(uintptr(slotKey) + typ.ElemOff), true
   101  				}
   102  			}
   103  			match = match.removeFirst()
   104  		}
   105  
   106  		match = g.ctrls().matchEmpty()
   107  		if match != 0 {
   108  			// Finding an empty slot means we've reached the end of
   109  			// the probe sequence.
   110  			return unsafe.Pointer(&zeroVal[0]), false
   111  		}
   112  	}
   113  }
   114  
   115  func (m *Map) putSlotSmallFast32(typ *abi.MapType, hash uintptr, key uint32) unsafe.Pointer {
   116  	g := groupReference{
   117  		data: m.dirPtr,
   118  	}
   119  
   120  	match := g.ctrls().matchH2(h2(hash))
   121  
   122  	// Look for an existing slot containing this key.
   123  	for match != 0 {
   124  		i := match.first()
   125  
   126  		slotKey := g.key(typ, i)
   127  		if key == *(*uint32)(slotKey) {
   128  			slotElem := g.elem(typ, i)
   129  			return slotElem
   130  		}
   131  		match = match.removeFirst()
   132  	}
   133  
   134  	// There can't be deleted slots, small maps can't have them
   135  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   136  	// more efficient than matchEmpty.
   137  	match = g.ctrls().matchEmptyOrDeleted()
   138  	if match == 0 {
   139  		fatal("small map with no empty slot (concurrent map writes?)")
   140  	}
   141  
   142  	i := match.first()
   143  
   144  	slotKey := g.key(typ, i)
   145  	*(*uint32)(slotKey) = key
   146  
   147  	slotElem := g.elem(typ, i)
   148  
   149  	g.ctrls().set(i, ctrl(h2(hash)))
   150  	m.used++
   151  
   152  	return slotElem
   153  }
   154  
   155  //go:linkname runtime_mapassign_fast32 runtime.mapassign_fast32
   156  func runtime_mapassign_fast32(typ *abi.MapType, m *Map, key uint32) unsafe.Pointer {
   157  	if m == nil {
   158  		panic(errNilAssign)
   159  	}
   160  	if race.Enabled {
   161  		callerpc := sys.GetCallerPC()
   162  		pc := abi.FuncPCABIInternal(runtime_mapassign_fast32)
   163  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   164  	}
   165  	if m.writing != 0 {
   166  		fatal("concurrent map writes")
   167  	}
   168  
   169  	// See the related comment in runtime_mapaccess2_fast32
   170  	// for why we pass local copy of key.
   171  	k := key
   172  	hash := memhash32(unsafe.Pointer(&k), m.seed)
   173  
   174  	// Set writing after calling Hasher, since Hasher may panic, in which
   175  	// case we have not actually done a write.
   176  	m.writing ^= 1 // toggle, see comment on writing
   177  
   178  	if m.dirPtr == nil {
   179  		m.growToSmall(typ)
   180  	}
   181  
   182  	if m.dirLen == 0 {
   183  		if m.used < abi.MapGroupSlots {
   184  			elem := m.putSlotSmallFast32(typ, hash, key)
   185  
   186  			if m.writing == 0 {
   187  				fatal("concurrent map writes")
   188  			}
   189  			m.writing ^= 1
   190  
   191  			return elem
   192  		}
   193  
   194  		// Can't fit another entry, grow to full size map.
   195  		m.growToTable(typ)
   196  	}
   197  
   198  	var slotElem unsafe.Pointer
   199  outer:
   200  	for {
   201  		// Select table.
   202  		idx := m.directoryIndex(hash)
   203  		t := m.directoryAt(idx)
   204  
   205  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   206  
   207  		// As we look for a match, keep track of the first deleted slot
   208  		// we find, which we'll use to insert the new entry if
   209  		// necessary.
   210  		var firstDeletedGroup groupReference
   211  		var firstDeletedSlot uintptr
   212  
   213  		h2Hash := h2(hash)
   214  		for ; ; seq = seq.next() {
   215  			g := t.groups.group(typ, seq.offset)
   216  			match := g.ctrls().matchH2(h2Hash)
   217  
   218  			// Look for an existing slot containing this key.
   219  			for match != 0 {
   220  				i := match.first()
   221  
   222  				slotKey := g.key(typ, i)
   223  				if key == *(*uint32)(slotKey) {
   224  					slotElem = g.elem(typ, i)
   225  
   226  					t.checkInvariants(typ, m)
   227  					break outer
   228  				}
   229  				match = match.removeFirst()
   230  			}
   231  
   232  			// No existing slot for this key in this group. Is this the end
   233  			// of the probe sequence?
   234  			match = g.ctrls().matchEmptyOrDeleted()
   235  			if match == 0 {
   236  				continue // nothing but filled slots. Keep probing.
   237  			}
   238  			i := match.first()
   239  			if g.ctrls().get(i) == ctrlDeleted {
   240  				// There are some deleted slots. Remember
   241  				// the first one, and keep probing.
   242  				if firstDeletedGroup.data == nil {
   243  					firstDeletedGroup = g
   244  					firstDeletedSlot = i
   245  				}
   246  				continue
   247  			}
   248  			// We've found an empty slot, which means we've reached the end of
   249  			// the probe sequence.
   250  
   251  			// If we found a deleted slot along the way, we can
   252  			// replace it without consuming growthLeft.
   253  			if firstDeletedGroup.data != nil {
   254  				g = firstDeletedGroup
   255  				i = firstDeletedSlot
   256  				t.growthLeft++ // will be decremented below to become a no-op.
   257  			}
   258  
   259  			// If we have no space left, first try to remove some tombstones.
   260  			if t.growthLeft == 0 {
   261  				t.pruneTombstones(typ, m)
   262  			}
   263  
   264  			// If there is room left to grow, just insert the new entry.
   265  			if t.growthLeft > 0 {
   266  				slotKey := g.key(typ, i)
   267  				*(*uint32)(slotKey) = key
   268  
   269  				slotElem = g.elem(typ, i)
   270  
   271  				g.ctrls().set(i, ctrl(h2Hash))
   272  				t.growthLeft--
   273  				t.used++
   274  				m.used++
   275  
   276  				t.checkInvariants(typ, m)
   277  				break outer
   278  			}
   279  
   280  			t.rehash(typ, m)
   281  			continue outer
   282  		}
   283  	}
   284  
   285  	if m.writing == 0 {
   286  		fatal("concurrent map writes")
   287  	}
   288  	m.writing ^= 1
   289  
   290  	return slotElem
   291  }
   292  
   293  // Key is a 32-bit pointer (only called on 32-bit GOARCH). This source is identical to fast64ptr.
   294  //
   295  // TODO(prattmic): With some compiler refactoring we could avoid duplication of this function.
   296  //
   297  //go:linkname runtime_mapassign_fast32ptr runtime.mapassign_fast32ptr
   298  func runtime_mapassign_fast32ptr(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
   299  	if m == nil {
   300  		panic(errNilAssign)
   301  	}
   302  	if race.Enabled {
   303  		callerpc := sys.GetCallerPC()
   304  		pc := abi.FuncPCABIInternal(runtime_mapassign_fast32ptr)
   305  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   306  	}
   307  	if m.writing != 0 {
   308  		fatal("concurrent map writes")
   309  	}
   310  
   311  	// See the related comment in runtime_mapaccess2_fast32
   312  	// for why we pass local copy of key.
   313  	k := key
   314  	hash := memhash32(unsafe.Pointer(&k), m.seed)
   315  
   316  	// Set writing after calling Hasher, since Hasher may panic, in which
   317  	// case we have not actually done a write.
   318  	m.writing ^= 1 // toggle, see comment on writing
   319  
   320  	if m.dirPtr == nil {
   321  		m.growToSmall(typ)
   322  	}
   323  
   324  	if m.dirLen == 0 {
   325  		if m.used < abi.MapGroupSlots {
   326  			elem := m.putSlotSmallFastPtr(typ, hash, key)
   327  
   328  			if m.writing == 0 {
   329  				fatal("concurrent map writes")
   330  			}
   331  			m.writing ^= 1
   332  
   333  			return elem
   334  		}
   335  
   336  		// Can't fit another entry, grow to full size map.
   337  		m.growToTable(typ)
   338  	}
   339  
   340  	var slotElem unsafe.Pointer
   341  outer:
   342  	for {
   343  		// Select table.
   344  		idx := m.directoryIndex(hash)
   345  		t := m.directoryAt(idx)
   346  
   347  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   348  
   349  		// As we look for a match, keep track of the first deleted slot we
   350  		// find, which we'll use to insert the new entry if necessary.
   351  		var firstDeletedGroup groupReference
   352  		var firstDeletedSlot uintptr
   353  
   354  		h2Hash := h2(hash)
   355  		for ; ; seq = seq.next() {
   356  			g := t.groups.group(typ, seq.offset)
   357  			match := g.ctrls().matchH2(h2Hash)
   358  
   359  			// Look for an existing slot containing this key.
   360  			for match != 0 {
   361  				i := match.first()
   362  
   363  				slotKey := g.key(typ, i)
   364  				if key == *(*unsafe.Pointer)(slotKey) {
   365  					slotElem = g.elem(typ, i)
   366  
   367  					t.checkInvariants(typ, m)
   368  					break outer
   369  				}
   370  				match = match.removeFirst()
   371  			}
   372  
   373  			// No existing slot for this key in this group. Is this the end
   374  			// of the probe sequence?
   375  			match = g.ctrls().matchEmptyOrDeleted()
   376  			if match == 0 {
   377  				continue // nothing but filled slots. Keep probing.
   378  			}
   379  			i := match.first()
   380  			if g.ctrls().get(i) == ctrlDeleted {
   381  				// There are some deleted slots. Remember
   382  				// the first one, and keep probing.
   383  				if firstDeletedGroup.data == nil {
   384  					firstDeletedGroup = g
   385  					firstDeletedSlot = i
   386  				}
   387  				continue
   388  			}
   389  			// We've found an empty slot, which means we've reached the end of
   390  			// the probe sequence.
   391  
   392  			// If we found a deleted slot along the way, we can
   393  			// replace it without consuming growthLeft.
   394  			if firstDeletedGroup.data != nil {
   395  				g = firstDeletedGroup
   396  				i = firstDeletedSlot
   397  				t.growthLeft++ // will be decremented below to become a no-op.
   398  			}
   399  
   400  			// If there is room left to grow, just insert the new entry.
   401  			if t.growthLeft > 0 {
   402  				slotKey := g.key(typ, i)
   403  				*(*unsafe.Pointer)(slotKey) = key
   404  
   405  				slotElem = g.elem(typ, i)
   406  
   407  				g.ctrls().set(i, ctrl(h2Hash))
   408  				t.growthLeft--
   409  				t.used++
   410  				m.used++
   411  
   412  				t.checkInvariants(typ, m)
   413  				break outer
   414  			}
   415  
   416  			t.rehash(typ, m)
   417  			continue outer
   418  		}
   419  	}
   420  
   421  	if m.writing == 0 {
   422  		fatal("concurrent map writes")
   423  	}
   424  	m.writing ^= 1
   425  
   426  	return slotElem
   427  }
   428  
   429  //go:linkname runtime_mapdelete_fast32 runtime.mapdelete_fast32
   430  func runtime_mapdelete_fast32(typ *abi.MapType, m *Map, key uint32) {
   431  	if race.Enabled {
   432  		callerpc := sys.GetCallerPC()
   433  		pc := abi.FuncPCABIInternal(runtime_mapdelete_fast32)
   434  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   435  	}
   436  
   437  	if m == nil || m.Used() == 0 {
   438  		return
   439  	}
   440  
   441  	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key)))
   442  }
   443  

View as plain text