Source file src/internal/runtime/maps/runtime_faststr.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package maps
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"internal/goexperiment"
    11  	"internal/race"
    12  	"internal/runtime/sys"
    13  	"unsafe"
    14  )
    15  
    16  func (m *Map) getWithoutKeySmallFastStr(typ *abi.MapType, key string) unsafe.Pointer {
    17  	g := groupReference{
    18  		data: m.dirPtr,
    19  	}
    20  
    21  	ctrls := *g.ctrls()
    22  	slotKey := g.key(typ, 0)
    23  	var keyStride uintptr
    24  	if goexperiment.MapSplitGroup {
    25  		keyStride = 2 * goarch.PtrSize // keys are contiguous in split layout
    26  	} else {
    27  		keyStride = typ.KeyStride // == SlotSize in interleaved layout
    28  	}
    29  
    30  	// The 64 threshold was chosen based on performance of BenchmarkMapStringKeysEight,
    31  	// where there are 8 keys to check, all of which don't quick-match the lookup key.
    32  	// In that case, we can save hashing the lookup key. That savings is worth this extra code
    33  	// for strings that are long enough that hashing is expensive.
    34  	if len(key) > 64 {
    35  		// String hashing and equality might be expensive. Do a quick check first.
    36  		j := abi.MapGroupSlots
    37  		for i := range abi.MapGroupSlots {
    38  			if ctrls&(1<<7) == 0 && longStringQuickEqualityTest(key, *(*string)(slotKey)) {
    39  				if j < abi.MapGroupSlots {
    40  					// 2 strings both passed the quick equality test.
    41  					// Break out of this loop and do it the slow way.
    42  					goto dohash
    43  				}
    44  				j = i
    45  			}
    46  			slotKey = unsafe.Pointer(uintptr(slotKey) + keyStride)
    47  			ctrls >>= 8
    48  		}
    49  		if j == abi.MapGroupSlots {
    50  			// No slot passed the quick test.
    51  			return nil
    52  		}
    53  		// There's exactly one slot that passed the quick test. Do the single expensive comparison.
    54  		slotKey = g.key(typ, uintptr(j))
    55  		if key == *(*string)(slotKey) {
    56  			if goexperiment.MapSplitGroup {
    57  				return g.elem(typ, uintptr(j))
    58  			} else {
    59  				return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
    60  			}
    61  		}
    62  		return nil
    63  	}
    64  
    65  dohash:
    66  	// This path will cost 1 hash and 1+ε comparisons.
    67  
    68  	// See the related comment in runtime_mapaccess2_fast32
    69  	// for why we pass local copy of key.
    70  	k := key
    71  	hash := strhash(unsafe.Pointer(&k), m.seed)
    72  	h2 := uint8(h2(hash))
    73  	ctrls = *g.ctrls()
    74  	slotKey = g.key(typ, 0)
    75  
    76  	for i := range uintptr(abi.MapGroupSlots) {
    77  		if uint8(ctrls) == h2 && key == *(*string)(slotKey) {
    78  			if goexperiment.MapSplitGroup {
    79  				return g.elem(typ, i)
    80  			} else {
    81  				return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
    82  			}
    83  		}
    84  		slotKey = unsafe.Pointer(uintptr(slotKey) + keyStride)
    85  		ctrls >>= 8
    86  	}
    87  	return nil
    88  }
    89  
    90  // Returns true if a and b might be equal.
    91  // Returns false if a and b are definitely not equal.
    92  // Requires len(a)>=8.
    93  func longStringQuickEqualityTest(a, b string) bool {
    94  	if len(a) != len(b) {
    95  		return false
    96  	}
    97  	x, y := stringPtr(a), stringPtr(b)
    98  	// Check first 8 bytes.
    99  	if *(*[8]byte)(x) != *(*[8]byte)(y) {
   100  		return false
   101  	}
   102  	// Check last 8 bytes.
   103  	x = unsafe.Pointer(uintptr(x) + uintptr(len(a)) - 8)
   104  	y = unsafe.Pointer(uintptr(y) + uintptr(len(a)) - 8)
   105  	if *(*[8]byte)(x) != *(*[8]byte)(y) {
   106  		return false
   107  	}
   108  	return true
   109  }
   110  func stringPtr(s string) unsafe.Pointer {
   111  	type stringStruct struct {
   112  		ptr unsafe.Pointer
   113  		len int
   114  	}
   115  	return (*stringStruct)(unsafe.Pointer(&s)).ptr
   116  }
   117  
   118  //go:linkname runtime_mapaccess1_faststr runtime.mapaccess1_faststr
   119  func runtime_mapaccess1_faststr(typ *abi.MapType, m *Map, key string) unsafe.Pointer {
   120  	p, _ := runtime_mapaccess2_faststr(typ, m, key)
   121  	return p
   122  }
   123  
   124  //go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr
   125  func runtime_mapaccess2_faststr(typ *abi.MapType, m *Map, key string) (unsafe.Pointer, bool) {
   126  	if race.Enabled && m != nil {
   127  		callerpc := sys.GetCallerPC()
   128  		pc := abi.FuncPCABIInternal(runtime_mapaccess2_faststr)
   129  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
   130  	}
   131  
   132  	if m == nil || m.Used() == 0 {
   133  		return unsafe.Pointer(&zeroVal[0]), false
   134  	}
   135  
   136  	if m.writing != 0 {
   137  		fatal("concurrent map read and map write")
   138  		return nil, false
   139  	}
   140  
   141  	if m.dirLen <= 0 {
   142  		elem := m.getWithoutKeySmallFastStr(typ, key)
   143  		if elem == nil {
   144  			return unsafe.Pointer(&zeroVal[0]), false
   145  		}
   146  		return elem, true
   147  	}
   148  
   149  	// See the related comment in runtime_mapaccess2_fast32
   150  	// for why we pass local copy of key.
   151  	k := key
   152  	hash := strhash(unsafe.Pointer(&k), m.seed)
   153  
   154  	// Select table.
   155  	idx := m.directoryIndex(hash)
   156  	t := m.directoryAt(idx)
   157  
   158  	// Probe table.
   159  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   160  	h2Hash := h2(hash)
   161  	for ; ; seq = seq.next() {
   162  		g := t.groups.group(typ, seq.offset)
   163  
   164  		match := g.ctrls().matchH2(h2Hash)
   165  
   166  		for match != 0 {
   167  			i := match.first()
   168  
   169  			slotKey := g.key(typ, i)
   170  			if key == *(*string)(slotKey) {
   171  				if goexperiment.MapSplitGroup {
   172  					return g.elem(typ, i), true
   173  				} else {
   174  					return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize), true
   175  				}
   176  			}
   177  			match = match.removeFirst()
   178  		}
   179  
   180  		match = g.ctrls().matchEmpty()
   181  		if match != 0 {
   182  			// Finding an empty slot means we've reached the end of
   183  			// the probe sequence.
   184  			return unsafe.Pointer(&zeroVal[0]), false
   185  		}
   186  	}
   187  }
   188  
   189  func (m *Map) putSlotSmallFastStr(typ *abi.MapType, hash uintptr, key string) unsafe.Pointer {
   190  	g := groupReference{
   191  		data: m.dirPtr,
   192  	}
   193  
   194  	match := g.ctrls().matchH2(h2(hash))
   195  
   196  	// Look for an existing slot containing this key.
   197  	for match != 0 {
   198  		i := match.first()
   199  
   200  		slotKey := g.key(typ, i)
   201  		if key == *(*string)(slotKey) {
   202  			// Key needs update, as the backing storage may differ.
   203  			*(*string)(slotKey) = key
   204  			slotElem := g.elem(typ, i)
   205  			return slotElem
   206  		}
   207  		match = match.removeFirst()
   208  	}
   209  
   210  	// There can't be deleted slots, small maps can't have them
   211  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   212  	// more efficient than matchEmpty.
   213  	match = g.ctrls().matchEmptyOrDeleted()
   214  	if match == 0 {
   215  		fatal("small map with no empty slot (concurrent map writes?)")
   216  	}
   217  
   218  	i := match.first()
   219  
   220  	slotKey := g.key(typ, i)
   221  	*(*string)(slotKey) = key
   222  
   223  	slotElem := g.elem(typ, i)
   224  
   225  	g.ctrls().set(i, ctrl(h2(hash)))
   226  	m.used++
   227  
   228  	return slotElem
   229  }
   230  
   231  //go:linkname runtime_mapassign_faststr runtime.mapassign_faststr
   232  func runtime_mapassign_faststr(typ *abi.MapType, m *Map, key string) unsafe.Pointer {
   233  	if m == nil {
   234  		panic(errNilAssign)
   235  	}
   236  	if race.Enabled {
   237  		callerpc := sys.GetCallerPC()
   238  		pc := abi.FuncPCABIInternal(runtime_mapassign_faststr)
   239  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   240  	}
   241  	if m.writing != 0 {
   242  		fatal("concurrent map writes")
   243  	}
   244  
   245  	// See the related comment in runtime_mapaccess2_fast32
   246  	// for why we pass local copy of key.
   247  	k := key
   248  	hash := strhash(unsafe.Pointer(&k), m.seed)
   249  
   250  	// Set writing after calling Hasher, since Hasher may panic, in which
   251  	// case we have not actually done a write.
   252  	m.writing ^= 1 // toggle, see comment on writing
   253  
   254  	if m.dirPtr == nil {
   255  		m.growToSmall(typ)
   256  	}
   257  
   258  	if m.dirLen == 0 {
   259  		if m.used < abi.MapGroupSlots {
   260  			elem := m.putSlotSmallFastStr(typ, hash, key)
   261  
   262  			if m.writing == 0 {
   263  				fatal("concurrent map writes")
   264  			}
   265  			m.writing ^= 1
   266  
   267  			return elem
   268  		}
   269  
   270  		// Can't fit another entry, grow to full size map.
   271  		m.growToTable(typ)
   272  	}
   273  
   274  	var slotElem unsafe.Pointer
   275  outer:
   276  	for {
   277  		// Select table.
   278  		idx := m.directoryIndex(hash)
   279  		t := m.directoryAt(idx)
   280  
   281  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   282  
   283  		// As we look for a match, keep track of the first deleted slot
   284  		// we find, which we'll use to insert the new entry if
   285  		// necessary.
   286  		var firstDeletedGroup groupReference
   287  		var firstDeletedSlot uintptr
   288  
   289  		h2Hash := h2(hash)
   290  		for ; ; seq = seq.next() {
   291  			g := t.groups.group(typ, seq.offset)
   292  			match := g.ctrls().matchH2(h2Hash)
   293  
   294  			// Look for an existing slot containing this key.
   295  			for match != 0 {
   296  				i := match.first()
   297  
   298  				slotKey := g.key(typ, i)
   299  				if key == *(*string)(slotKey) {
   300  					// Key needs update, as the backing
   301  					// storage may differ.
   302  					*(*string)(slotKey) = key
   303  					slotElem = g.elem(typ, i)
   304  
   305  					t.checkInvariants(typ, m)
   306  					break outer
   307  				}
   308  				match = match.removeFirst()
   309  			}
   310  
   311  			// No existing slot for this key in this group. Is this the end
   312  			// of the probe sequence?
   313  			match = g.ctrls().matchEmptyOrDeleted()
   314  			if match == 0 {
   315  				continue // nothing but filled slots. Keep probing.
   316  			}
   317  			i := match.first()
   318  			if g.ctrls().get(i) == ctrlDeleted {
   319  				// There are some deleted slots. Remember
   320  				// the first one, and keep probing.
   321  				if firstDeletedGroup.data == nil {
   322  					firstDeletedGroup = g
   323  					firstDeletedSlot = i
   324  				}
   325  				continue
   326  			}
   327  			// We've found an empty slot, which means we've reached the end of
   328  			// the probe sequence.
   329  
   330  			// If we found a deleted slot along the way, we can
   331  			// replace it without consuming growthLeft.
   332  			if firstDeletedGroup.data != nil {
   333  				g = firstDeletedGroup
   334  				i = firstDeletedSlot
   335  				t.growthLeft++ // will be decremented below to become a no-op.
   336  			}
   337  
   338  			// If we have no space left, first try to remove some tombstones.
   339  			if t.growthLeft == 0 {
   340  				t.pruneTombstones(typ, m)
   341  			}
   342  
   343  			// If there is room left to grow, just insert the new entry.
   344  			if t.growthLeft > 0 {
   345  				slotKey := g.key(typ, i)
   346  				*(*string)(slotKey) = key
   347  
   348  				slotElem = g.elem(typ, i)
   349  
   350  				g.ctrls().set(i, ctrl(h2Hash))
   351  				t.growthLeft--
   352  				t.used++
   353  				m.used++
   354  
   355  				t.checkInvariants(typ, m)
   356  				break outer
   357  			}
   358  
   359  			t.rehash(typ, m)
   360  			continue outer
   361  		}
   362  	}
   363  
   364  	if m.writing == 0 {
   365  		fatal("concurrent map writes")
   366  	}
   367  	m.writing ^= 1
   368  
   369  	return slotElem
   370  }
   371  
   372  //go:linkname runtime_mapdelete_faststr runtime.mapdelete_faststr
   373  func runtime_mapdelete_faststr(typ *abi.MapType, m *Map, key string) {
   374  	if race.Enabled {
   375  		callerpc := sys.GetCallerPC()
   376  		pc := abi.FuncPCABIInternal(runtime_mapdelete_faststr)
   377  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   378  	}
   379  
   380  	if m == nil || m.Used() == 0 {
   381  		return
   382  	}
   383  
   384  	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key)))
   385  }
   386  

View as plain text