1
2
3
4
5 package maps
6
7 import (
8 "internal/abi"
9 "internal/goexperiment"
10 "internal/runtime/math"
11 "unsafe"
12 )
13
14
15
16
17
18
19
20 const maxTableCapacity = 1024
21
22
23
24 var _ = uint16(maxTableCapacity)
25
26
27
28
29
30
31
32
33
34 type table struct {
35
36 used uint16
37
38
39
40 capacity uint16
41
42
43
44
45
46
47 growthLeft uint16
48
49
50
51
52 localDepth uint8
53
54
55
56
57
58
59
60 index int
61
62
63
64
65
66
67
68
69
70
71 groups groupsReference
72 }
73
74 func newTable(typ *abi.MapType, capacity uint64, index int, localDepth uint8) *table {
75 if capacity < abi.MapGroupSlots {
76 capacity = abi.MapGroupSlots
77 }
78
79 t := &table{
80 index: index,
81 localDepth: localDepth,
82 }
83
84 if capacity > maxTableCapacity {
85 panic("initial table capacity too large")
86 }
87
88
89
90 capacity, overflow := alignUpPow2(capacity)
91 if overflow {
92 panic("rounded-up capacity overflows uint64")
93 }
94
95 t.reset(typ, uint16(capacity))
96
97 return t
98 }
99
100
101
102 func (t *table) reset(typ *abi.MapType, capacity uint16) {
103 groupCount := uint64(capacity) / abi.MapGroupSlots
104 t.groups = newGroups(typ, groupCount)
105 t.capacity = capacity
106 t.growthLeft = t.maxGrowthLeft()
107
108 for i := uint64(0); i <= t.groups.lengthMask; i++ {
109 g := t.groups.group(typ, i)
110 g.ctrls().setEmpty()
111 }
112 }
113
114
115
116 func (t *table) maxGrowthLeft() uint16 {
117 if t.capacity == 0 {
118
119
120 panic("table must have positive capacity")
121 } else if t.capacity <= abi.MapGroupSlots {
122
123
124
125
126
127
128 return t.capacity - 1
129 } else {
130 if t.capacity > math.MaxUint16/maxAvgGroupLoad {
131 panic("overflow")
132 }
133 return (t.capacity * maxAvgGroupLoad) / abi.MapGroupSlots
134 }
135
136 }
137
138 func (t *table) Used() uint64 {
139 return uint64(t.used)
140 }
141
142
143
144 func (t *table) Get(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
145
146
147
148
149
150
151 hash := typ.Hasher(key, m.seed)
152 _, elem, ok := t.getWithKey(typ, hash, key)
153 return elem, ok
154 }
155
156
157
158
159
160
161
162
163
164
165 func (t *table) getWithKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
194 h2Hash := h2(hash)
195 for ; ; seq = seq.next() {
196 g := t.groups.group(typ, seq.offset)
197
198 match := g.ctrls().matchH2(h2Hash)
199
200 for match != 0 {
201 i := match.first()
202
203 slotKey := g.key(typ, i)
204 if typ.IndirectKey() {
205 slotKey = *((*unsafe.Pointer)(slotKey))
206 }
207 if typ.Key.Equal(key, slotKey) {
208 slotElem := g.elem(typ, i)
209 if typ.IndirectElem() {
210 slotElem = *((*unsafe.Pointer)(slotElem))
211 }
212 return slotKey, slotElem, true
213 }
214 match = match.removeFirst()
215 }
216
217 match = g.ctrls().matchEmpty()
218 if match != 0 {
219
220
221 return nil, nil, false
222 }
223 }
224 }
225
226 func (t *table) getWithoutKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
227 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
228 h2Hash := h2(hash)
229 for ; ; seq = seq.next() {
230 g := t.groups.group(typ, seq.offset)
231
232 match := g.ctrls().matchH2(h2Hash)
233
234 for match != 0 {
235 i := match.first()
236
237 slotKey := g.key(typ, i)
238 if typ.IndirectKey() {
239 slotKey = *((*unsafe.Pointer)(slotKey))
240 }
241 if typ.Key.Equal(key, slotKey) {
242 slotElem := g.elem(typ, i)
243 if typ.IndirectElem() {
244 slotElem = *((*unsafe.Pointer)(slotElem))
245 }
246 return slotElem, true
247 }
248 match = match.removeFirst()
249 }
250
251 match = g.ctrls().matchEmpty()
252 if match != 0 {
253
254
255 return nil, false
256 }
257 }
258 }
259
260
261
262
263
264
265
266
267 func (t *table) PutSlot(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
268 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
269
270
271
272 var firstDeletedGroup groupReference
273 var firstDeletedSlot uintptr
274
275 h2Hash := h2(hash)
276 for ; ; seq = seq.next() {
277 g := t.groups.group(typ, seq.offset)
278 match := g.ctrls().matchH2(h2Hash)
279
280
281 for match != 0 {
282 i := match.first()
283
284 slotKey := g.key(typ, i)
285 if typ.IndirectKey() {
286 slotKey = *((*unsafe.Pointer)(slotKey))
287 }
288 if typ.Key.Equal(key, slotKey) {
289 if typ.NeedKeyUpdate() {
290 typedmemmove(typ.Key, slotKey, key)
291 }
292
293 slotElem := g.elem(typ, i)
294 if typ.IndirectElem() {
295 slotElem = *((*unsafe.Pointer)(slotElem))
296 }
297
298 t.checkInvariants(typ, m)
299 return slotElem, true
300 }
301 match = match.removeFirst()
302 }
303
304
305
306 match = g.ctrls().matchEmptyOrDeleted()
307 if match == 0 {
308 continue
309 }
310 i := match.first()
311 if g.ctrls().get(i) == ctrlDeleted {
312
313
314 if firstDeletedGroup.data == nil {
315 firstDeletedGroup = g
316 firstDeletedSlot = i
317 }
318 continue
319 }
320
321
322
323
324
325 if firstDeletedGroup.data != nil {
326 g = firstDeletedGroup
327 i = firstDeletedSlot
328 t.growthLeft++
329 }
330
331
332 if t.growthLeft == 0 {
333 t.pruneTombstones(typ, m)
334 }
335
336
337 if t.growthLeft > 0 {
338 slotKey := g.key(typ, i)
339 if typ.IndirectKey() {
340 kmem := newobject(typ.Key)
341 *(*unsafe.Pointer)(slotKey) = kmem
342 slotKey = kmem
343 }
344 typedmemmove(typ.Key, slotKey, key)
345
346 slotElem := g.elem(typ, i)
347 if typ.IndirectElem() {
348 emem := newobject(typ.Elem)
349 *(*unsafe.Pointer)(slotElem) = emem
350 slotElem = emem
351 }
352
353 g.ctrls().set(i, ctrl(h2Hash))
354 t.growthLeft--
355 t.used++
356 m.used++
357
358 t.checkInvariants(typ, m)
359 return slotElem, true
360 }
361
362 t.rehash(typ, m)
363 return nil, false
364 }
365 }
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383 func (t *table) uncheckedPutSlot(typ *abi.MapType, hash uintptr, key, elem unsafe.Pointer) {
384 if t.growthLeft == 0 {
385 panic("invariant failed: growthLeft is unexpectedly 0")
386 }
387
388
389
390
391
392 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
393 for ; ; seq = seq.next() {
394 g := t.groups.group(typ, seq.offset)
395
396 match := g.ctrls().matchEmptyOrDeleted()
397 if match != 0 {
398 i := match.first()
399
400 slotKey := g.key(typ, i)
401 if typ.IndirectKey() {
402 *(*unsafe.Pointer)(slotKey) = key
403 } else {
404 typedmemmove(typ.Key, slotKey, key)
405 }
406
407 slotElem := g.elem(typ, i)
408 if typ.IndirectElem() {
409 *(*unsafe.Pointer)(slotElem) = elem
410 } else {
411 typedmemmove(typ.Elem, slotElem, elem)
412 }
413
414 t.growthLeft--
415 t.used++
416 g.ctrls().set(i, ctrl(h2(hash)))
417 return
418 }
419 }
420 }
421
422
423 func (t *table) Delete(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
424 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
425 h2Hash := h2(hash)
426 for ; ; seq = seq.next() {
427 g := t.groups.group(typ, seq.offset)
428 match := g.ctrls().matchH2(h2Hash)
429
430 for match != 0 {
431 i := match.first()
432
433 slotKey := g.key(typ, i)
434 origSlotKey := slotKey
435 if typ.IndirectKey() {
436 slotKey = *((*unsafe.Pointer)(slotKey))
437 }
438
439 if typ.Key.Equal(key, slotKey) {
440 t.used--
441 m.used--
442
443 if typ.IndirectKey() {
444
445 *(*unsafe.Pointer)(origSlotKey) = nil
446 } else if typ.Key.Pointers() {
447
448
449 typedmemclr(typ.Key, slotKey)
450 }
451
452 slotElem := g.elem(typ, i)
453 if typ.IndirectElem() {
454
455 *(*unsafe.Pointer)(slotElem) = nil
456 } else {
457
458
459
460
461
462 typedmemclr(typ.Elem, slotElem)
463 }
464
465
466
467
468
469
470
471
472
473 var tombstone bool
474 if g.ctrls().matchEmpty() != 0 {
475 g.ctrls().set(i, ctrlEmpty)
476 t.growthLeft++
477 } else {
478 g.ctrls().set(i, ctrlDeleted)
479 tombstone = true
480 }
481
482 t.checkInvariants(typ, m)
483 return tombstone
484 }
485 match = match.removeFirst()
486 }
487
488 match = g.ctrls().matchEmpty()
489 if match != 0 {
490
491
492 return false
493 }
494 }
495 }
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511 func (t *table) pruneTombstones(typ *abi.MapType, m *Map) {
512 if t.tombstones()*10 < t.capacity {
513
514 return
515 }
516
517
518 var needed [(maxTableCapacity/abi.MapGroupSlots + 31) / 32]uint32
519
520
521 for i := uint64(0); i <= t.groups.lengthMask; i++ {
522 g := t.groups.group(typ, i)
523 match := g.ctrls().matchFull()
524 for match != 0 {
525 j := match.first()
526 match = match.removeFirst()
527 key := g.key(typ, j)
528 if typ.IndirectKey() {
529 key = *((*unsafe.Pointer)(key))
530 }
531 if !typ.Key.Equal(key, key) {
532
533
534
535 continue
536 }
537
538
539 hash := typ.Hasher(key, m.seed)
540 for seq := makeProbeSeq(h1(hash), t.groups.lengthMask); ; seq = seq.next() {
541 if seq.offset == i {
542 break
543 }
544 g := t.groups.group(typ, seq.offset)
545 m := g.ctrls().matchEmptyOrDeleted()
546 if m != 0 {
547
548 needed[seq.offset/32] |= 1 << (seq.offset % 32)
549 }
550 }
551 }
552 if g.ctrls().matchEmpty() != 0 {
553
554
555 needed[i/32] |= 1 << (i % 32)
556 }
557 }
558
559
560
561
562 cnt := 0
563 for i := uint64(0); i <= t.groups.lengthMask; i++ {
564 if needed[i/32]>>(i%32)&1 != 0 {
565 continue
566 }
567 g := t.groups.group(typ, i)
568 m := g.ctrls().matchEmptyOrDeleted()
569 cnt += m.count()
570 }
571 if cnt*10 < int(t.capacity) {
572 return
573 }
574
575
576 for i := uint64(0); i <= t.groups.lengthMask; i++ {
577 if needed[i/32]>>(i%32)&1 != 0 {
578 continue
579 }
580 g := t.groups.group(typ, i)
581 m := g.ctrls().matchEmptyOrDeleted()
582 for m != 0 {
583 k := m.first()
584 m = m.removeFirst()
585 g.ctrls().set(k, ctrlEmpty)
586 t.growthLeft++
587 }
588
589
590 }
591 }
592
593
594
595
596 func (t *table) tombstones() uint16 {
597 return (t.capacity*maxAvgGroupLoad)/abi.MapGroupSlots - t.used - t.growthLeft
598 }
599
600
601 func (t *table) Clear(typ *abi.MapType) {
602 mgl := t.maxGrowthLeft()
603 if t.used == 0 && t.growthLeft == mgl {
604 return
605 }
606
607
608
609
610
611
612
613
614
615
616
617
618 fullTest := uint64(t.used)*4 <= t.groups.lengthMask
619 if goexperiment.MapSplitGroup {
620 if (typ.KeyStride + typ.ElemStride) > 32 {
621
622 fullTest = true
623 }
624 } else {
625 if typ.KeyStride > 32 {
626
627 fullTest = true
628 }
629 }
630 if fullTest {
631 for i := uint64(0); i <= t.groups.lengthMask; i++ {
632 g := t.groups.group(typ, i)
633 if g.ctrls().anyFull() {
634 typedmemclr(typ.Group, g.data)
635 }
636 g.ctrls().setEmpty()
637 }
638 } else {
639 for i := uint64(0); i <= t.groups.lengthMask; i++ {
640 g := t.groups.group(typ, i)
641 typedmemclr(typ.Group, g.data)
642 g.ctrls().setEmpty()
643 }
644 }
645 t.used = 0
646 t.growthLeft = mgl
647 }
648
649 type Iter struct {
650 key unsafe.Pointer
651 elem unsafe.Pointer
652 typ *abi.MapType
653 m *Map
654
655
656
657
658 entryOffset uint64
659 dirOffset uint64
660
661
662
663 clearSeq uint64
664
665
666
667 globalDepth uint8
668
669
670
671 dirIdx int
672
673
674 tab *table
675
676
677 group groupReference
678
679
680
681
682 entryIdx uint64
683 }
684
685
686 func (it *Iter) Init(typ *abi.MapType, m *Map) {
687 it.typ = typ
688
689 if m == nil || m.used == 0 {
690 return
691 }
692
693 dirIdx := 0
694 var groupSmall groupReference
695 if m.dirLen <= 0 {
696
697 dirIdx = -1
698 groupSmall.data = m.dirPtr
699 }
700
701 it.m = m
702 it.entryOffset = rand()
703 it.dirOffset = rand()
704 it.globalDepth = m.globalDepth
705 it.dirIdx = dirIdx
706 it.group = groupSmall
707 it.clearSeq = m.clearSeq
708 }
709
710 func (it *Iter) Initialized() bool {
711 return it.typ != nil
712 }
713
714
715 func (it *Iter) Map() *Map {
716 return it.m
717 }
718
719
720
721
722 func (it *Iter) Key() unsafe.Pointer {
723 return it.key
724 }
725
726
727
728
729
730 func (it *Iter) Elem() unsafe.Pointer {
731 return it.elem
732 }
733
734 func (it *Iter) nextDirIdx() {
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761 entries := 1 << (it.m.globalDepth - it.tab.localDepth)
762 it.dirIdx += entries
763 it.tab = nil
764 it.group = groupReference{}
765 it.entryIdx = 0
766 }
767
768
769
770 func (it *Iter) grownKeyElem(key unsafe.Pointer, slotIdx uintptr) (unsafe.Pointer, unsafe.Pointer, bool) {
771 newKey, newElem, ok := it.m.getWithKey(it.typ, key)
772 if !ok {
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796 if it.clearSeq == it.m.clearSeq && !it.typ.Key.Equal(key, key) {
797 elem := it.group.elem(it.typ, slotIdx)
798 if it.typ.IndirectElem() {
799 elem = *((*unsafe.Pointer)(elem))
800 }
801 return key, elem, true
802 }
803
804
805 return nil, nil, false
806 }
807
808 return newKey, newElem, true
809 }
810
811
812
813
814
815
816
817
818 func (it *Iter) Next() {
819 if it.m == nil {
820
821 it.key = nil
822 it.elem = nil
823 return
824 }
825
826 if it.m.writing != 0 {
827 fatal("concurrent map iteration and map write")
828 return
829 }
830
831 if it.dirIdx < 0 {
832
833 for ; it.entryIdx < abi.MapGroupSlots; it.entryIdx++ {
834 k := uintptr(it.entryIdx+it.entryOffset) % abi.MapGroupSlots
835
836 if (it.group.ctrls().get(k) & ctrlEmpty) == ctrlEmpty {
837
838 continue
839 }
840
841 key := it.group.key(it.typ, k)
842 if it.typ.IndirectKey() {
843 key = *((*unsafe.Pointer)(key))
844 }
845
846
847
848
849
850 grown := it.m.dirLen > 0
851 var elem unsafe.Pointer
852 if grown {
853 var ok bool
854 newKey, newElem, ok := it.m.getWithKey(it.typ, key)
855 if !ok {
856
857 if it.clearSeq == it.m.clearSeq && !it.typ.Key.Equal(key, key) {
858 elem = it.group.elem(it.typ, k)
859 if it.typ.IndirectElem() {
860 elem = *((*unsafe.Pointer)(elem))
861 }
862 } else {
863 continue
864 }
865 } else {
866 key = newKey
867 elem = newElem
868 }
869 } else {
870 elem = it.group.elem(it.typ, k)
871 if it.typ.IndirectElem() {
872 elem = *((*unsafe.Pointer)(elem))
873 }
874 }
875
876 it.entryIdx++
877 it.key = key
878 it.elem = elem
879 return
880 }
881 it.key = nil
882 it.elem = nil
883 return
884 }
885
886 if it.globalDepth != it.m.globalDepth {
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916 orders := it.m.globalDepth - it.globalDepth
917 it.dirIdx <<= orders
918 it.dirOffset <<= orders
919
920
921 it.globalDepth = it.m.globalDepth
922 }
923
924
925 for ; it.dirIdx < it.m.dirLen; it.nextDirIdx() {
926
927 if it.tab == nil {
928 dirIdx := int((uint64(it.dirIdx) + it.dirOffset) & uint64(it.m.dirLen-1))
929 newTab := it.m.directoryAt(uintptr(dirIdx))
930 if newTab.index != dirIdx {
931
932
933
934
935
936
937
938
939
940
941
942 diff := dirIdx - newTab.index
943 it.dirOffset -= uint64(diff)
944 dirIdx = newTab.index
945 }
946 it.tab = newTab
947 }
948
949
950
951
952
953 entryMask := uint64(it.tab.capacity) - 1
954 if it.entryIdx > entryMask {
955
956 continue
957 }
958
959
960
961
962
963
964
965
966
967
968
969
970 entryIdx := (it.entryIdx + it.entryOffset) & entryMask
971 slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
972 if slotIdx == 0 || it.group.data == nil {
973
974
975
976
977 groupIdx := entryIdx >> abi.MapGroupSlotsBits
978 it.group = it.tab.groups.group(it.typ, groupIdx)
979 }
980
981 if (it.group.ctrls().get(slotIdx) & ctrlEmpty) == 0 {
982
983
984 key := it.group.key(it.typ, slotIdx)
985 if it.typ.IndirectKey() {
986 key = *((*unsafe.Pointer)(key))
987 }
988
989 grown := it.tab.index == -1
990 var elem unsafe.Pointer
991 if grown {
992 newKey, newElem, ok := it.grownKeyElem(key, slotIdx)
993 if !ok {
994
995
996
997 goto next
998 } else {
999 key = newKey
1000 elem = newElem
1001 }
1002 } else {
1003 elem = it.group.elem(it.typ, slotIdx)
1004 if it.typ.IndirectElem() {
1005 elem = *((*unsafe.Pointer)(elem))
1006 }
1007 }
1008
1009 it.entryIdx++
1010 it.key = key
1011 it.elem = elem
1012 return
1013 }
1014
1015 next:
1016 it.entryIdx++
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 var groupMatch bitset
1036 for it.entryIdx <= entryMask {
1037 entryIdx := (it.entryIdx + it.entryOffset) & entryMask
1038 slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
1039
1040 if slotIdx == 0 || it.group.data == nil {
1041
1042
1043
1044
1045 groupIdx := entryIdx >> abi.MapGroupSlotsBits
1046 it.group = it.tab.groups.group(it.typ, groupIdx)
1047 }
1048
1049 if groupMatch == 0 {
1050 groupMatch = it.group.ctrls().matchFull()
1051
1052 if slotIdx != 0 {
1053
1054
1055 groupMatch = groupMatch.removeBelow(slotIdx)
1056 }
1057
1058
1059
1060 if groupMatch == 0 {
1061
1062
1063 it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
1064 continue
1065 }
1066
1067 i := groupMatch.first()
1068 it.entryIdx += uint64(i - slotIdx)
1069 if it.entryIdx > entryMask {
1070
1071 continue
1072 }
1073 entryIdx += uint64(i - slotIdx)
1074 slotIdx = i
1075 }
1076
1077 key := it.group.key(it.typ, slotIdx)
1078 if it.typ.IndirectKey() {
1079 key = *((*unsafe.Pointer)(key))
1080 }
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093 grown := it.tab.index == -1
1094 var elem unsafe.Pointer
1095 if grown {
1096 newKey, newElem, ok := it.grownKeyElem(key, slotIdx)
1097 if !ok {
1098
1099
1100 groupMatch = groupMatch.removeFirst()
1101 if groupMatch == 0 {
1102
1103
1104
1105 it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
1106 continue
1107 }
1108
1109
1110 i := groupMatch.first()
1111 it.entryIdx += uint64(i - slotIdx)
1112 continue
1113 } else {
1114 key = newKey
1115 elem = newElem
1116 }
1117 } else {
1118 elem = it.group.elem(it.typ, slotIdx)
1119 if it.typ.IndirectElem() {
1120 elem = *((*unsafe.Pointer)(elem))
1121 }
1122 }
1123
1124
1125 groupMatch = groupMatch.removeFirst()
1126 if groupMatch == 0 {
1127
1128
1129
1130 it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
1131 } else {
1132
1133 i := groupMatch.first()
1134 it.entryIdx += uint64(i - slotIdx)
1135 }
1136
1137 it.key = key
1138 it.elem = elem
1139 return
1140 }
1141
1142
1143 }
1144
1145 it.key = nil
1146 it.elem = nil
1147 return
1148 }
1149
1150
1151
1152
1153 func (t *table) rehash(typ *abi.MapType, m *Map) {
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 newCapacity := 2 * t.capacity
1170 if newCapacity <= maxTableCapacity {
1171 t.grow(typ, m, newCapacity)
1172 return
1173 }
1174
1175 t.split(typ, m)
1176 }
1177
1178
1179 func localDepthMask(localDepth uint8) uintptr {
1180 if !Use64BitHash {
1181 return uintptr(1) << (32 - localDepth)
1182 }
1183 return uintptr(1) << (64 - localDepth)
1184 }
1185
1186
1187 func (t *table) split(typ *abi.MapType, m *Map) {
1188 localDepth := t.localDepth
1189 localDepth++
1190
1191
1192 left := newTable(typ, maxTableCapacity, -1, localDepth)
1193 right := newTable(typ, maxTableCapacity, -1, localDepth)
1194
1195
1196 mask := localDepthMask(localDepth)
1197
1198 for i := uint64(0); i <= t.groups.lengthMask; i++ {
1199 g := t.groups.group(typ, i)
1200 for j := uintptr(0); j < abi.MapGroupSlots; j++ {
1201 if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
1202
1203 continue
1204 }
1205
1206 key := g.key(typ, j)
1207 if typ.IndirectKey() {
1208 key = *((*unsafe.Pointer)(key))
1209 }
1210
1211 elem := g.elem(typ, j)
1212 if typ.IndirectElem() {
1213 elem = *((*unsafe.Pointer)(elem))
1214 }
1215
1216 hash := typ.Hasher(key, m.seed)
1217 var newTable *table
1218 if hash&mask == 0 {
1219 newTable = left
1220 } else {
1221 newTable = right
1222 }
1223 newTable.uncheckedPutSlot(typ, hash, key, elem)
1224 }
1225 }
1226
1227 m.installTableSplit(t, left, right)
1228 t.index = -1
1229 }
1230
1231
1232
1233
1234
1235 func (t *table) grow(typ *abi.MapType, m *Map, newCapacity uint16) {
1236 newTable := newTable(typ, uint64(newCapacity), t.index, t.localDepth)
1237
1238 if t.capacity > 0 {
1239 for i := uint64(0); i <= t.groups.lengthMask; i++ {
1240 g := t.groups.group(typ, i)
1241 for j := uintptr(0); j < abi.MapGroupSlots; j++ {
1242 if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
1243
1244 continue
1245 }
1246
1247 key := g.key(typ, j)
1248 if typ.IndirectKey() {
1249 key = *((*unsafe.Pointer)(key))
1250 }
1251
1252 elem := g.elem(typ, j)
1253 if typ.IndirectElem() {
1254 elem = *((*unsafe.Pointer)(elem))
1255 }
1256
1257 hash := typ.Hasher(key, m.seed)
1258
1259 newTable.uncheckedPutSlot(typ, hash, key, elem)
1260 }
1261 }
1262 }
1263
1264 newTable.checkInvariants(typ, m)
1265 m.replaceTable(newTable)
1266 t.index = -1
1267 }
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282 type probeSeq struct {
1283 mask uint64
1284 offset uint64
1285 index uint64
1286 }
1287
1288 func makeProbeSeq(hash uintptr, mask uint64) probeSeq {
1289 return probeSeq{
1290 mask: mask,
1291 offset: uint64(hash) & mask,
1292 index: 0,
1293 }
1294 }
1295
1296 func (s probeSeq) next() probeSeq {
1297 s.index++
1298 s.offset = (s.offset + s.index) & s.mask
1299 return s
1300 }
1301
1302 func (t *table) clone(typ *abi.MapType) *table {
1303
1304 t2 := new(table)
1305 *t2 = *t
1306 t = t2
1307
1308
1309 oldGroups := t.groups
1310 newGroups := newGroups(typ, oldGroups.lengthMask+1)
1311 for i := uint64(0); i <= oldGroups.lengthMask; i++ {
1312 oldGroup := oldGroups.group(typ, i)
1313 newGroup := newGroups.group(typ, i)
1314 cloneGroup(typ, newGroup, oldGroup)
1315 }
1316 t.groups = newGroups
1317
1318 return t
1319 }
1320
View as plain text