1
2
3
4
5 package maps
6
7 import (
8 "internal/abi"
9 "internal/asan"
10 "internal/goexperiment"
11 "internal/msan"
12 "internal/race"
13 "internal/runtime/sys"
14 "unsafe"
15 )
16
17
18
19
20
21 func memhash32(p unsafe.Pointer, h uintptr) uintptr
22
23
24
25 func memhash64(p unsafe.Pointer, h uintptr) uintptr
26
27
28
29 func strhash(a unsafe.Pointer, h uintptr) uintptr
30
31
32 func fatal(s string)
33
34
35 func rand() uint64
36
37
38 func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer)
39
40
41 func typedmemclr(typ *abi.Type, ptr unsafe.Pointer)
42
43
44 func newarray(typ *abi.Type, n int) unsafe.Pointer
45
46
47 func newobject(typ *abi.Type) unsafe.Pointer
48
49
50
51
52 var errNilAssign error
53
54
55
56
57 var zeroVal [abi.ZeroValSize]byte
58
59
60
61
62
63
64
65
66 func runtime_mapaccess1(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
67 p, _ := runtime_mapaccess2(typ, m, key)
68 return p
69 }
70
71
72 func runtime_mapaccess2(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
73 if race.Enabled && m != nil {
74 callerpc := sys.GetCallerPC()
75 pc := abi.FuncPCABIInternal(runtime_mapaccess2)
76 race.ReadPC(unsafe.Pointer(m), callerpc, pc)
77 race.ReadObjectPC(typ.Key, key, callerpc, pc)
78 }
79 if msan.Enabled && m != nil {
80 msan.Read(key, typ.Key.Size_)
81 }
82 if asan.Enabled && m != nil {
83 asan.Read(key, typ.Key.Size_)
84 }
85
86 if m == nil || m.Used() == 0 {
87 if err := mapKeyError(typ, key); err != nil {
88 panic(err)
89 }
90 return unsafe.Pointer(&zeroVal[0]), false
91 }
92
93 if m.writing != 0 {
94 fatal("concurrent map read and map write")
95 }
96
97 hash := typ.Hasher(key, m.seed)
98
99 if m.dirLen == 0 {
100 _, elem, ok := m.getWithKeySmall(typ, hash, key)
101 if !ok {
102 return unsafe.Pointer(&zeroVal[0]), false
103 }
104 return elem, true
105 }
106
107
108 idx := m.directoryIndex(hash)
109 t := m.directoryAt(idx)
110
111
112 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
113 h2Hash := h2(hash)
114 for ; ; seq = seq.next() {
115 g := t.groups.group(typ, seq.offset)
116
117 match := g.ctrls().matchH2(h2Hash)
118
119 for match != 0 {
120 i := match.first()
121
122 slotKey := g.key(typ, i)
123 slotKeyOrig := slotKey
124 if typ.IndirectKey() {
125 slotKey = *((*unsafe.Pointer)(slotKey))
126 }
127 if typ.Key.Equal(key, slotKey) {
128 var slotElem unsafe.Pointer
129 if goexperiment.MapSplitGroup {
130 slotElem = g.elem(typ, i)
131 } else {
132 slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
133 }
134 if typ.IndirectElem() {
135 slotElem = *((*unsafe.Pointer)(slotElem))
136 }
137 return slotElem, true
138 }
139 match = match.removeFirst()
140 }
141
142 match = g.ctrls().matchEmpty()
143 if match != 0 {
144
145
146 return unsafe.Pointer(&zeroVal[0]), false
147 }
148 }
149 }
150
151
152 func runtime_mapassign(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
153 if m == nil {
154 panic(errNilAssign)
155 }
156 if race.Enabled {
157 callerpc := sys.GetCallerPC()
158 pc := abi.FuncPCABIInternal(runtime_mapassign)
159 race.WritePC(unsafe.Pointer(m), callerpc, pc)
160 race.ReadObjectPC(typ.Key, key, callerpc, pc)
161 }
162 if msan.Enabled {
163 msan.Read(key, typ.Key.Size_)
164 }
165 if asan.Enabled {
166 asan.Read(key, typ.Key.Size_)
167 }
168 if m.writing != 0 {
169 fatal("concurrent map writes")
170 }
171
172 hash := typ.Hasher(key, m.seed)
173
174
175
176 m.writing ^= 1
177
178 if m.dirPtr == nil {
179 m.growToSmall(typ)
180 }
181
182 if m.dirLen == 0 {
183 if m.used < abi.MapGroupSlots {
184 elem := m.putSlotSmall(typ, hash, key)
185
186 if m.writing == 0 {
187 fatal("concurrent map writes")
188 }
189 m.writing ^= 1
190
191 return elem
192 }
193
194
195 m.growToTable(typ)
196 }
197
198 var slotElem unsafe.Pointer
199 outer:
200 for {
201
202 idx := m.directoryIndex(hash)
203 t := m.directoryAt(idx)
204
205 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
206
207
208
209
210 var firstDeletedGroup groupReference
211 var firstDeletedSlot uintptr
212
213 h2Hash := h2(hash)
214 for ; ; seq = seq.next() {
215 g := t.groups.group(typ, seq.offset)
216 match := g.ctrls().matchH2(h2Hash)
217
218
219 for match != 0 {
220 i := match.first()
221
222 slotKey := g.key(typ, i)
223 slotKeyOrig := slotKey
224 if typ.IndirectKey() {
225 slotKey = *((*unsafe.Pointer)(slotKey))
226 }
227 if typ.Key.Equal(key, slotKey) {
228 if typ.NeedKeyUpdate() {
229 typedmemmove(typ.Key, slotKey, key)
230 }
231
232 if goexperiment.MapSplitGroup {
233 slotElem = g.elem(typ, i)
234 } else {
235 slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
236 }
237 if typ.IndirectElem() {
238 slotElem = *((*unsafe.Pointer)(slotElem))
239 }
240
241 t.checkInvariants(typ, m)
242 break outer
243 }
244 match = match.removeFirst()
245 }
246
247
248
249 match = g.ctrls().matchEmpty()
250 if match != 0 {
251
252
253
254 var i uintptr
255
256
257
258 if firstDeletedGroup.data != nil {
259 g = firstDeletedGroup
260 i = firstDeletedSlot
261 t.growthLeft++
262 } else {
263
264 i = match.first()
265 }
266
267
268 if t.growthLeft > 0 {
269 slotKey := g.key(typ, i)
270 slotKeyOrig := slotKey
271 if typ.IndirectKey() {
272 kmem := newobject(typ.Key)
273 *(*unsafe.Pointer)(slotKey) = kmem
274 slotKey = kmem
275 }
276 typedmemmove(typ.Key, slotKey, key)
277
278 if goexperiment.MapSplitGroup {
279 slotElem = g.elem(typ, i)
280 } else {
281 slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
282 }
283 if typ.IndirectElem() {
284 emem := newobject(typ.Elem)
285 *(*unsafe.Pointer)(slotElem) = emem
286 slotElem = emem
287 }
288
289 g.ctrls().set(i, ctrl(h2Hash))
290 t.growthLeft--
291 t.used++
292 m.used++
293
294 t.checkInvariants(typ, m)
295 break outer
296 }
297
298 t.rehash(typ, m)
299 continue outer
300 }
301
302
303
304
305
306
307 if firstDeletedGroup.data == nil {
308
309
310 match = g.ctrls().matchEmptyOrDeleted()
311 if match != 0 {
312 firstDeletedGroup = g
313 firstDeletedSlot = match.first()
314 }
315 }
316 }
317 }
318
319 if m.writing == 0 {
320 fatal("concurrent map writes")
321 }
322 m.writing ^= 1
323
324 return slotElem
325 }
326
View as plain text