Source file
src/runtime/malloc_generated.go
1
2
3
4 package runtime
5
6 import (
7 "internal/goarch"
8 "internal/goexperiment"
9 "internal/runtime/sys"
10 "unsafe"
11 )
12
13 func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
14
15 if doubleCheckMalloc {
16 if gcphase == _GCmarktermination {
17 throw("mallocgc called with gcphase == _GCmarktermination")
18 }
19 }
20
21 lockRankMayQueueFinalizer()
22
23 if debug.malloc {
24 if x := preMallocgcDebug(size, typ); x != nil {
25 return x
26 }
27 }
28
29 if gcBlackenEnabled != 0 {
30 deductAssistCredit(size)
31 }
32
33 const sizeclass = 1
34
35 const elemsize = 8
36
37 mp := acquirem()
38 if doubleCheckMalloc {
39 doubleCheckSmallScanNoHeader(size, typ, mp)
40 }
41 mp.mallocing = 1
42
43 checkGCTrigger := false
44 c := getMCache(mp)
45 const spc = spanClass(sizeclass<<1) | spanClass(0)
46 span := c.alloc[spc]
47
48 var nextFreeFastResult gclinkptr
49 if span.allocCache != 0 {
50 theBit := sys.TrailingZeros64(span.allocCache)
51 result := span.freeindex + uint16(theBit)
52 if result < span.nelems {
53 freeidx := result + 1
54 if !(freeidx%64 == 0 && freeidx != span.nelems) {
55 span.allocCache >>= uint(theBit + 1)
56 span.freeindex = freeidx
57 span.allocCount++
58 nextFreeFastResult = gclinkptr(uintptr(result)*
59 8 +
60 span.base())
61 }
62 }
63 }
64 v := nextFreeFastResult
65 if v == 0 {
66 v, span, checkGCTrigger = c.nextFree(spc)
67 }
68 x := unsafe.Pointer(v)
69 if span.needzero != 0 {
70 memclrNoHeapPointers(x, elemsize)
71 }
72 if goarch.PtrSize == 8 && sizeclass == 1 {
73
74 c.scanAlloc += 8
75 } else {
76 dataSize := size
77 x := uintptr(x)
78
79 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(8)) {
80 throw("tried to write heap bits, but no heap bits in span")
81 }
82
83 src0 := readUintptr(getGCMask(typ))
84
85 const elemsize = 8
86
87 var scanSize uintptr
88 src := src0
89 if typ.Size_ == goarch.PtrSize {
90 src = (1 << (dataSize / goarch.PtrSize)) - 1
91
92 scanSize = dataSize
93 } else {
94
95 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
96 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
97 }
98 scanSize = typ.PtrBytes
99 for i := typ.Size_; i < dataSize; i += typ.Size_ {
100 src |= src0 << (i / goarch.PtrSize)
101 scanSize += typ.Size_
102 }
103 }
104
105 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
106 dst := unsafe.Pointer(dstBase)
107 o := (x - span.base()) / goarch.PtrSize
108 i := o / ptrBits
109 j := o % ptrBits
110 const bits uintptr = elemsize / goarch.PtrSize
111
112 const bitsIsPowerOfTwo = bits&(bits-1) == 0
113 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
114
115 bits0 := ptrBits - j
116 bits1 := bits - bits0
117 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
118 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
119 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
120 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
121 } else {
122
123 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
124 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
125 }
126
127 const doubleCheck = false
128 if doubleCheck {
129 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
130 }
131 if doubleCheckHeapSetType {
132 doubleCheckHeapType(x, dataSize, typ, nil, span)
133 }
134 c.scanAlloc += scanSize
135 }
136
137 publicationBarrier()
138
139 if writeBarrier.enabled {
140
141 gcmarknewobject(span, uintptr(x))
142 } else {
143
144 span.freeIndexForScan = span.freeindex
145 }
146
147 c.nextSample -= int64(elemsize)
148 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
149 profilealloc(mp, x, elemsize)
150 }
151 mp.mallocing = 0
152 releasem(mp)
153
154 if checkGCTrigger {
155 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
156 gcStart(t)
157 }
158 }
159 gp := getg()
160 if goexperiment.RuntimeSecret && gp.secret > 0 {
161
162 addSecret(x, size)
163 }
164
165 if valgrindenabled {
166 valgrindMalloc(x, size)
167 }
168
169 if gcBlackenEnabled != 0 && elemsize != 0 {
170 if assistG := getg().m.curg; assistG != nil {
171 assistG.gcAssistBytes -= int64(elemsize - size)
172 }
173 }
174
175 if debug.malloc {
176 postMallocgcDebug(x, elemsize, typ)
177 }
178 return x
179 }
180
181 func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
182
183 if doubleCheckMalloc {
184 if gcphase == _GCmarktermination {
185 throw("mallocgc called with gcphase == _GCmarktermination")
186 }
187 }
188
189 lockRankMayQueueFinalizer()
190
191 if debug.malloc {
192 if x := preMallocgcDebug(size, typ); x != nil {
193 return x
194 }
195 }
196
197 if gcBlackenEnabled != 0 {
198 deductAssistCredit(size)
199 }
200
201 const sizeclass = 2
202
203 const elemsize = 16
204
205 mp := acquirem()
206 if doubleCheckMalloc {
207 doubleCheckSmallScanNoHeader(size, typ, mp)
208 }
209 mp.mallocing = 1
210
211 checkGCTrigger := false
212 c := getMCache(mp)
213 const spc = spanClass(sizeclass<<1) | spanClass(0)
214 span := c.alloc[spc]
215
216 var nextFreeFastResult gclinkptr
217 if span.allocCache != 0 {
218 theBit := sys.TrailingZeros64(span.allocCache)
219 result := span.freeindex + uint16(theBit)
220 if result < span.nelems {
221 freeidx := result + 1
222 if !(freeidx%64 == 0 && freeidx != span.nelems) {
223 span.allocCache >>= uint(theBit + 1)
224 span.freeindex = freeidx
225 span.allocCount++
226 nextFreeFastResult = gclinkptr(uintptr(result)*
227 16 +
228 span.base())
229 }
230 }
231 }
232 v := nextFreeFastResult
233 if v == 0 {
234 v, span, checkGCTrigger = c.nextFree(spc)
235 }
236 x := unsafe.Pointer(v)
237 if span.needzero != 0 {
238 memclrNoHeapPointers(x, elemsize)
239 }
240 if goarch.PtrSize == 8 && sizeclass == 1 {
241
242 c.scanAlloc += 8
243 } else {
244 dataSize := size
245 x := uintptr(x)
246
247 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(16)) {
248 throw("tried to write heap bits, but no heap bits in span")
249 }
250
251 src0 := readUintptr(getGCMask(typ))
252
253 const elemsize = 16
254
255 var scanSize uintptr
256 src := src0
257 if typ.Size_ == goarch.PtrSize {
258 src = (1 << (dataSize / goarch.PtrSize)) - 1
259
260 scanSize = dataSize
261 } else {
262
263 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
264 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
265 }
266 scanSize = typ.PtrBytes
267 for i := typ.Size_; i < dataSize; i += typ.Size_ {
268 src |= src0 << (i / goarch.PtrSize)
269 scanSize += typ.Size_
270 }
271 }
272
273 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
274 dst := unsafe.Pointer(dstBase)
275 o := (x - span.base()) / goarch.PtrSize
276 i := o / ptrBits
277 j := o % ptrBits
278 const bits uintptr = elemsize / goarch.PtrSize
279
280 const bitsIsPowerOfTwo = bits&(bits-1) == 0
281 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
282
283 bits0 := ptrBits - j
284 bits1 := bits - bits0
285 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
286 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
287 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
288 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
289 } else {
290
291 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
292 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
293 }
294
295 const doubleCheck = false
296 if doubleCheck {
297 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
298 }
299 if doubleCheckHeapSetType {
300 doubleCheckHeapType(x, dataSize, typ, nil, span)
301 }
302 c.scanAlloc += scanSize
303 }
304
305 publicationBarrier()
306
307 if writeBarrier.enabled {
308
309 gcmarknewobject(span, uintptr(x))
310 } else {
311
312 span.freeIndexForScan = span.freeindex
313 }
314
315 c.nextSample -= int64(elemsize)
316 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
317 profilealloc(mp, x, elemsize)
318 }
319 mp.mallocing = 0
320 releasem(mp)
321
322 if checkGCTrigger {
323 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
324 gcStart(t)
325 }
326 }
327 gp := getg()
328 if goexperiment.RuntimeSecret && gp.secret > 0 {
329
330 addSecret(x, size)
331 }
332
333 if valgrindenabled {
334 valgrindMalloc(x, size)
335 }
336
337 if gcBlackenEnabled != 0 && elemsize != 0 {
338 if assistG := getg().m.curg; assistG != nil {
339 assistG.gcAssistBytes -= int64(elemsize - size)
340 }
341 }
342
343 if debug.malloc {
344 postMallocgcDebug(x, elemsize, typ)
345 }
346 return x
347 }
348
349 func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
350
351 if doubleCheckMalloc {
352 if gcphase == _GCmarktermination {
353 throw("mallocgc called with gcphase == _GCmarktermination")
354 }
355 }
356
357 lockRankMayQueueFinalizer()
358
359 if debug.malloc {
360 if x := preMallocgcDebug(size, typ); x != nil {
361 return x
362 }
363 }
364
365 if gcBlackenEnabled != 0 {
366 deductAssistCredit(size)
367 }
368
369 const sizeclass = 3
370
371 const elemsize = 24
372
373 mp := acquirem()
374 if doubleCheckMalloc {
375 doubleCheckSmallScanNoHeader(size, typ, mp)
376 }
377 mp.mallocing = 1
378
379 checkGCTrigger := false
380 c := getMCache(mp)
381 const spc = spanClass(sizeclass<<1) | spanClass(0)
382 span := c.alloc[spc]
383
384 var nextFreeFastResult gclinkptr
385 if span.allocCache != 0 {
386 theBit := sys.TrailingZeros64(span.allocCache)
387 result := span.freeindex + uint16(theBit)
388 if result < span.nelems {
389 freeidx := result + 1
390 if !(freeidx%64 == 0 && freeidx != span.nelems) {
391 span.allocCache >>= uint(theBit + 1)
392 span.freeindex = freeidx
393 span.allocCount++
394 nextFreeFastResult = gclinkptr(uintptr(result)*
395 24 +
396 span.base())
397 }
398 }
399 }
400 v := nextFreeFastResult
401 if v == 0 {
402 v, span, checkGCTrigger = c.nextFree(spc)
403 }
404 x := unsafe.Pointer(v)
405 if span.needzero != 0 {
406 memclrNoHeapPointers(x, elemsize)
407 }
408 if goarch.PtrSize == 8 && sizeclass == 1 {
409
410 c.scanAlloc += 8
411 } else {
412 dataSize := size
413 x := uintptr(x)
414
415 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(24)) {
416 throw("tried to write heap bits, but no heap bits in span")
417 }
418
419 src0 := readUintptr(getGCMask(typ))
420
421 const elemsize = 24
422
423 var scanSize uintptr
424 src := src0
425 if typ.Size_ == goarch.PtrSize {
426 src = (1 << (dataSize / goarch.PtrSize)) - 1
427
428 scanSize = dataSize
429 } else {
430
431 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
432 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
433 }
434 scanSize = typ.PtrBytes
435 for i := typ.Size_; i < dataSize; i += typ.Size_ {
436 src |= src0 << (i / goarch.PtrSize)
437 scanSize += typ.Size_
438 }
439 }
440
441 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
442 dst := unsafe.Pointer(dstBase)
443 o := (x - span.base()) / goarch.PtrSize
444 i := o / ptrBits
445 j := o % ptrBits
446 const bits uintptr = elemsize / goarch.PtrSize
447
448 const bitsIsPowerOfTwo = bits&(bits-1) == 0
449 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
450
451 bits0 := ptrBits - j
452 bits1 := bits - bits0
453 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
454 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
455 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
456 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
457 } else {
458
459 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
460 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
461 }
462
463 const doubleCheck = false
464 if doubleCheck {
465 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
466 }
467 if doubleCheckHeapSetType {
468 doubleCheckHeapType(x, dataSize, typ, nil, span)
469 }
470 c.scanAlloc += scanSize
471 }
472
473 publicationBarrier()
474
475 if writeBarrier.enabled {
476
477 gcmarknewobject(span, uintptr(x))
478 } else {
479
480 span.freeIndexForScan = span.freeindex
481 }
482
483 c.nextSample -= int64(elemsize)
484 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
485 profilealloc(mp, x, elemsize)
486 }
487 mp.mallocing = 0
488 releasem(mp)
489
490 if checkGCTrigger {
491 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
492 gcStart(t)
493 }
494 }
495 gp := getg()
496 if goexperiment.RuntimeSecret && gp.secret > 0 {
497
498 addSecret(x, size)
499 }
500
501 if valgrindenabled {
502 valgrindMalloc(x, size)
503 }
504
505 if gcBlackenEnabled != 0 && elemsize != 0 {
506 if assistG := getg().m.curg; assistG != nil {
507 assistG.gcAssistBytes -= int64(elemsize - size)
508 }
509 }
510
511 if debug.malloc {
512 postMallocgcDebug(x, elemsize, typ)
513 }
514 return x
515 }
516
517 func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
518
519 if doubleCheckMalloc {
520 if gcphase == _GCmarktermination {
521 throw("mallocgc called with gcphase == _GCmarktermination")
522 }
523 }
524
525 lockRankMayQueueFinalizer()
526
527 if debug.malloc {
528 if x := preMallocgcDebug(size, typ); x != nil {
529 return x
530 }
531 }
532
533 if gcBlackenEnabled != 0 {
534 deductAssistCredit(size)
535 }
536
537 const sizeclass = 4
538
539 const elemsize = 32
540
541 mp := acquirem()
542 if doubleCheckMalloc {
543 doubleCheckSmallScanNoHeader(size, typ, mp)
544 }
545 mp.mallocing = 1
546
547 checkGCTrigger := false
548 c := getMCache(mp)
549 const spc = spanClass(sizeclass<<1) | spanClass(0)
550 span := c.alloc[spc]
551
552 var nextFreeFastResult gclinkptr
553 if span.allocCache != 0 {
554 theBit := sys.TrailingZeros64(span.allocCache)
555 result := span.freeindex + uint16(theBit)
556 if result < span.nelems {
557 freeidx := result + 1
558 if !(freeidx%64 == 0 && freeidx != span.nelems) {
559 span.allocCache >>= uint(theBit + 1)
560 span.freeindex = freeidx
561 span.allocCount++
562 nextFreeFastResult = gclinkptr(uintptr(result)*
563 32 +
564 span.base())
565 }
566 }
567 }
568 v := nextFreeFastResult
569 if v == 0 {
570 v, span, checkGCTrigger = c.nextFree(spc)
571 }
572 x := unsafe.Pointer(v)
573 if span.needzero != 0 {
574 memclrNoHeapPointers(x, elemsize)
575 }
576 if goarch.PtrSize == 8 && sizeclass == 1 {
577
578 c.scanAlloc += 8
579 } else {
580 dataSize := size
581 x := uintptr(x)
582
583 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(32)) {
584 throw("tried to write heap bits, but no heap bits in span")
585 }
586
587 src0 := readUintptr(getGCMask(typ))
588
589 const elemsize = 32
590
591 var scanSize uintptr
592 src := src0
593 if typ.Size_ == goarch.PtrSize {
594 src = (1 << (dataSize / goarch.PtrSize)) - 1
595
596 scanSize = dataSize
597 } else {
598
599 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
600 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
601 }
602 scanSize = typ.PtrBytes
603 for i := typ.Size_; i < dataSize; i += typ.Size_ {
604 src |= src0 << (i / goarch.PtrSize)
605 scanSize += typ.Size_
606 }
607 }
608
609 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
610 dst := unsafe.Pointer(dstBase)
611 o := (x - span.base()) / goarch.PtrSize
612 i := o / ptrBits
613 j := o % ptrBits
614 const bits uintptr = elemsize / goarch.PtrSize
615
616 const bitsIsPowerOfTwo = bits&(bits-1) == 0
617 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
618
619 bits0 := ptrBits - j
620 bits1 := bits - bits0
621 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
622 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
623 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
624 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
625 } else {
626
627 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
628 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
629 }
630
631 const doubleCheck = false
632 if doubleCheck {
633 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
634 }
635 if doubleCheckHeapSetType {
636 doubleCheckHeapType(x, dataSize, typ, nil, span)
637 }
638 c.scanAlloc += scanSize
639 }
640
641 publicationBarrier()
642
643 if writeBarrier.enabled {
644
645 gcmarknewobject(span, uintptr(x))
646 } else {
647
648 span.freeIndexForScan = span.freeindex
649 }
650
651 c.nextSample -= int64(elemsize)
652 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
653 profilealloc(mp, x, elemsize)
654 }
655 mp.mallocing = 0
656 releasem(mp)
657
658 if checkGCTrigger {
659 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
660 gcStart(t)
661 }
662 }
663 gp := getg()
664 if goexperiment.RuntimeSecret && gp.secret > 0 {
665
666 addSecret(x, size)
667 }
668
669 if valgrindenabled {
670 valgrindMalloc(x, size)
671 }
672
673 if gcBlackenEnabled != 0 && elemsize != 0 {
674 if assistG := getg().m.curg; assistG != nil {
675 assistG.gcAssistBytes -= int64(elemsize - size)
676 }
677 }
678
679 if debug.malloc {
680 postMallocgcDebug(x, elemsize, typ)
681 }
682 return x
683 }
684
685 func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
686
687 if doubleCheckMalloc {
688 if gcphase == _GCmarktermination {
689 throw("mallocgc called with gcphase == _GCmarktermination")
690 }
691 }
692
693 lockRankMayQueueFinalizer()
694
695 if debug.malloc {
696 if x := preMallocgcDebug(size, typ); x != nil {
697 return x
698 }
699 }
700
701 if gcBlackenEnabled != 0 {
702 deductAssistCredit(size)
703 }
704
705 const sizeclass = 5
706
707 const elemsize = 48
708
709 mp := acquirem()
710 if doubleCheckMalloc {
711 doubleCheckSmallScanNoHeader(size, typ, mp)
712 }
713 mp.mallocing = 1
714
715 checkGCTrigger := false
716 c := getMCache(mp)
717 const spc = spanClass(sizeclass<<1) | spanClass(0)
718 span := c.alloc[spc]
719
720 var nextFreeFastResult gclinkptr
721 if span.allocCache != 0 {
722 theBit := sys.TrailingZeros64(span.allocCache)
723 result := span.freeindex + uint16(theBit)
724 if result < span.nelems {
725 freeidx := result + 1
726 if !(freeidx%64 == 0 && freeidx != span.nelems) {
727 span.allocCache >>= uint(theBit + 1)
728 span.freeindex = freeidx
729 span.allocCount++
730 nextFreeFastResult = gclinkptr(uintptr(result)*
731 48 +
732 span.base())
733 }
734 }
735 }
736 v := nextFreeFastResult
737 if v == 0 {
738 v, span, checkGCTrigger = c.nextFree(spc)
739 }
740 x := unsafe.Pointer(v)
741 if span.needzero != 0 {
742 memclrNoHeapPointers(x, elemsize)
743 }
744 if goarch.PtrSize == 8 && sizeclass == 1 {
745
746 c.scanAlloc += 8
747 } else {
748 dataSize := size
749 x := uintptr(x)
750
751 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(48)) {
752 throw("tried to write heap bits, but no heap bits in span")
753 }
754
755 src0 := readUintptr(getGCMask(typ))
756
757 const elemsize = 48
758
759 var scanSize uintptr
760 src := src0
761 if typ.Size_ == goarch.PtrSize {
762 src = (1 << (dataSize / goarch.PtrSize)) - 1
763
764 scanSize = dataSize
765 } else {
766
767 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
768 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
769 }
770 scanSize = typ.PtrBytes
771 for i := typ.Size_; i < dataSize; i += typ.Size_ {
772 src |= src0 << (i / goarch.PtrSize)
773 scanSize += typ.Size_
774 }
775 }
776
777 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
778 dst := unsafe.Pointer(dstBase)
779 o := (x - span.base()) / goarch.PtrSize
780 i := o / ptrBits
781 j := o % ptrBits
782 const bits uintptr = elemsize / goarch.PtrSize
783
784 const bitsIsPowerOfTwo = bits&(bits-1) == 0
785 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
786
787 bits0 := ptrBits - j
788 bits1 := bits - bits0
789 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
790 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
791 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
792 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
793 } else {
794
795 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
796 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
797 }
798
799 const doubleCheck = false
800 if doubleCheck {
801 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
802 }
803 if doubleCheckHeapSetType {
804 doubleCheckHeapType(x, dataSize, typ, nil, span)
805 }
806 c.scanAlloc += scanSize
807 }
808
809 publicationBarrier()
810
811 if writeBarrier.enabled {
812
813 gcmarknewobject(span, uintptr(x))
814 } else {
815
816 span.freeIndexForScan = span.freeindex
817 }
818
819 c.nextSample -= int64(elemsize)
820 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
821 profilealloc(mp, x, elemsize)
822 }
823 mp.mallocing = 0
824 releasem(mp)
825
826 if checkGCTrigger {
827 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
828 gcStart(t)
829 }
830 }
831 gp := getg()
832 if goexperiment.RuntimeSecret && gp.secret > 0 {
833
834 addSecret(x, size)
835 }
836
837 if valgrindenabled {
838 valgrindMalloc(x, size)
839 }
840
841 if gcBlackenEnabled != 0 && elemsize != 0 {
842 if assistG := getg().m.curg; assistG != nil {
843 assistG.gcAssistBytes -= int64(elemsize - size)
844 }
845 }
846
847 if debug.malloc {
848 postMallocgcDebug(x, elemsize, typ)
849 }
850 return x
851 }
852
853 func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
854
855 if doubleCheckMalloc {
856 if gcphase == _GCmarktermination {
857 throw("mallocgc called with gcphase == _GCmarktermination")
858 }
859 }
860
861 lockRankMayQueueFinalizer()
862
863 if debug.malloc {
864 if x := preMallocgcDebug(size, typ); x != nil {
865 return x
866 }
867 }
868
869 if gcBlackenEnabled != 0 {
870 deductAssistCredit(size)
871 }
872
873 const sizeclass = 6
874
875 const elemsize = 64
876
877 mp := acquirem()
878 if doubleCheckMalloc {
879 doubleCheckSmallScanNoHeader(size, typ, mp)
880 }
881 mp.mallocing = 1
882
883 checkGCTrigger := false
884 c := getMCache(mp)
885 const spc = spanClass(sizeclass<<1) | spanClass(0)
886 span := c.alloc[spc]
887
888 var nextFreeFastResult gclinkptr
889 if span.allocCache != 0 {
890 theBit := sys.TrailingZeros64(span.allocCache)
891 result := span.freeindex + uint16(theBit)
892 if result < span.nelems {
893 freeidx := result + 1
894 if !(freeidx%64 == 0 && freeidx != span.nelems) {
895 span.allocCache >>= uint(theBit + 1)
896 span.freeindex = freeidx
897 span.allocCount++
898 nextFreeFastResult = gclinkptr(uintptr(result)*
899 64 +
900 span.base())
901 }
902 }
903 }
904 v := nextFreeFastResult
905 if v == 0 {
906 v, span, checkGCTrigger = c.nextFree(spc)
907 }
908 x := unsafe.Pointer(v)
909 if span.needzero != 0 {
910 memclrNoHeapPointers(x, elemsize)
911 }
912 if goarch.PtrSize == 8 && sizeclass == 1 {
913
914 c.scanAlloc += 8
915 } else {
916 dataSize := size
917 x := uintptr(x)
918
919 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(64)) {
920 throw("tried to write heap bits, but no heap bits in span")
921 }
922
923 src0 := readUintptr(getGCMask(typ))
924
925 const elemsize = 64
926
927 var scanSize uintptr
928 src := src0
929 if typ.Size_ == goarch.PtrSize {
930 src = (1 << (dataSize / goarch.PtrSize)) - 1
931
932 scanSize = dataSize
933 } else {
934
935 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
936 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
937 }
938 scanSize = typ.PtrBytes
939 for i := typ.Size_; i < dataSize; i += typ.Size_ {
940 src |= src0 << (i / goarch.PtrSize)
941 scanSize += typ.Size_
942 }
943 }
944
945 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
946 dst := unsafe.Pointer(dstBase)
947 o := (x - span.base()) / goarch.PtrSize
948 i := o / ptrBits
949 j := o % ptrBits
950 const bits uintptr = elemsize / goarch.PtrSize
951
952 const bitsIsPowerOfTwo = bits&(bits-1) == 0
953 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
954
955 bits0 := ptrBits - j
956 bits1 := bits - bits0
957 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
958 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
959 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
960 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
961 } else {
962
963 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
964 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
965 }
966
967 const doubleCheck = false
968 if doubleCheck {
969 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
970 }
971 if doubleCheckHeapSetType {
972 doubleCheckHeapType(x, dataSize, typ, nil, span)
973 }
974 c.scanAlloc += scanSize
975 }
976
977 publicationBarrier()
978
979 if writeBarrier.enabled {
980
981 gcmarknewobject(span, uintptr(x))
982 } else {
983
984 span.freeIndexForScan = span.freeindex
985 }
986
987 c.nextSample -= int64(elemsize)
988 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
989 profilealloc(mp, x, elemsize)
990 }
991 mp.mallocing = 0
992 releasem(mp)
993
994 if checkGCTrigger {
995 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
996 gcStart(t)
997 }
998 }
999 gp := getg()
1000 if goexperiment.RuntimeSecret && gp.secret > 0 {
1001
1002 addSecret(x, size)
1003 }
1004
1005 if valgrindenabled {
1006 valgrindMalloc(x, size)
1007 }
1008
1009 if gcBlackenEnabled != 0 && elemsize != 0 {
1010 if assistG := getg().m.curg; assistG != nil {
1011 assistG.gcAssistBytes -= int64(elemsize - size)
1012 }
1013 }
1014
1015 if debug.malloc {
1016 postMallocgcDebug(x, elemsize, typ)
1017 }
1018 return x
1019 }
1020
1021 func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
1022
1023 if doubleCheckMalloc {
1024 if gcphase == _GCmarktermination {
1025 throw("mallocgc called with gcphase == _GCmarktermination")
1026 }
1027 }
1028
1029 lockRankMayQueueFinalizer()
1030
1031 if debug.malloc {
1032 if x := preMallocgcDebug(size, typ); x != nil {
1033 return x
1034 }
1035 }
1036
1037 if gcBlackenEnabled != 0 {
1038 deductAssistCredit(size)
1039 }
1040
1041 const sizeclass = 7
1042
1043 const elemsize = 80
1044
1045 mp := acquirem()
1046 if doubleCheckMalloc {
1047 doubleCheckSmallScanNoHeader(size, typ, mp)
1048 }
1049 mp.mallocing = 1
1050
1051 checkGCTrigger := false
1052 c := getMCache(mp)
1053 const spc = spanClass(sizeclass<<1) | spanClass(0)
1054 span := c.alloc[spc]
1055
1056 var nextFreeFastResult gclinkptr
1057 if span.allocCache != 0 {
1058 theBit := sys.TrailingZeros64(span.allocCache)
1059 result := span.freeindex + uint16(theBit)
1060 if result < span.nelems {
1061 freeidx := result + 1
1062 if !(freeidx%64 == 0 && freeidx != span.nelems) {
1063 span.allocCache >>= uint(theBit + 1)
1064 span.freeindex = freeidx
1065 span.allocCount++
1066 nextFreeFastResult = gclinkptr(uintptr(result)*
1067 80 +
1068 span.base())
1069 }
1070 }
1071 }
1072 v := nextFreeFastResult
1073 if v == 0 {
1074 v, span, checkGCTrigger = c.nextFree(spc)
1075 }
1076 x := unsafe.Pointer(v)
1077 if span.needzero != 0 {
1078 memclrNoHeapPointers(x, elemsize)
1079 }
1080 if goarch.PtrSize == 8 && sizeclass == 1 {
1081
1082 c.scanAlloc += 8
1083 } else {
1084 dataSize := size
1085 x := uintptr(x)
1086
1087 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(80)) {
1088 throw("tried to write heap bits, but no heap bits in span")
1089 }
1090
1091 src0 := readUintptr(getGCMask(typ))
1092
1093 const elemsize = 80
1094
1095 var scanSize uintptr
1096 src := src0
1097 if typ.Size_ == goarch.PtrSize {
1098 src = (1 << (dataSize / goarch.PtrSize)) - 1
1099
1100 scanSize = dataSize
1101 } else {
1102
1103 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
1104 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
1105 }
1106 scanSize = typ.PtrBytes
1107 for i := typ.Size_; i < dataSize; i += typ.Size_ {
1108 src |= src0 << (i / goarch.PtrSize)
1109 scanSize += typ.Size_
1110 }
1111 }
1112
1113 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
1114 dst := unsafe.Pointer(dstBase)
1115 o := (x - span.base()) / goarch.PtrSize
1116 i := o / ptrBits
1117 j := o % ptrBits
1118 const bits uintptr = elemsize / goarch.PtrSize
1119
1120 const bitsIsPowerOfTwo = bits&(bits-1) == 0
1121 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
1122
1123 bits0 := ptrBits - j
1124 bits1 := bits - bits0
1125 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
1126 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
1127 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
1128 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
1129 } else {
1130
1131 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
1132 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
1133 }
1134
1135 const doubleCheck = false
1136 if doubleCheck {
1137 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
1138 }
1139 if doubleCheckHeapSetType {
1140 doubleCheckHeapType(x, dataSize, typ, nil, span)
1141 }
1142 c.scanAlloc += scanSize
1143 }
1144
1145 publicationBarrier()
1146
1147 if writeBarrier.enabled {
1148
1149 gcmarknewobject(span, uintptr(x))
1150 } else {
1151
1152 span.freeIndexForScan = span.freeindex
1153 }
1154
1155 c.nextSample -= int64(elemsize)
1156 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
1157 profilealloc(mp, x, elemsize)
1158 }
1159 mp.mallocing = 0
1160 releasem(mp)
1161
1162 if checkGCTrigger {
1163 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
1164 gcStart(t)
1165 }
1166 }
1167 gp := getg()
1168 if goexperiment.RuntimeSecret && gp.secret > 0 {
1169
1170 addSecret(x, size)
1171 }
1172
1173 if valgrindenabled {
1174 valgrindMalloc(x, size)
1175 }
1176
1177 if gcBlackenEnabled != 0 && elemsize != 0 {
1178 if assistG := getg().m.curg; assistG != nil {
1179 assistG.gcAssistBytes -= int64(elemsize - size)
1180 }
1181 }
1182
1183 if debug.malloc {
1184 postMallocgcDebug(x, elemsize, typ)
1185 }
1186 return x
1187 }
1188
1189 func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
1190
1191 if doubleCheckMalloc {
1192 if gcphase == _GCmarktermination {
1193 throw("mallocgc called with gcphase == _GCmarktermination")
1194 }
1195 }
1196
1197 lockRankMayQueueFinalizer()
1198
1199 if debug.malloc {
1200 if x := preMallocgcDebug(size, typ); x != nil {
1201 return x
1202 }
1203 }
1204
1205 if gcBlackenEnabled != 0 {
1206 deductAssistCredit(size)
1207 }
1208
1209 const sizeclass = 8
1210
1211 const elemsize = 96
1212
1213 mp := acquirem()
1214 if doubleCheckMalloc {
1215 doubleCheckSmallScanNoHeader(size, typ, mp)
1216 }
1217 mp.mallocing = 1
1218
1219 checkGCTrigger := false
1220 c := getMCache(mp)
1221 const spc = spanClass(sizeclass<<1) | spanClass(0)
1222 span := c.alloc[spc]
1223
1224 var nextFreeFastResult gclinkptr
1225 if span.allocCache != 0 {
1226 theBit := sys.TrailingZeros64(span.allocCache)
1227 result := span.freeindex + uint16(theBit)
1228 if result < span.nelems {
1229 freeidx := result + 1
1230 if !(freeidx%64 == 0 && freeidx != span.nelems) {
1231 span.allocCache >>= uint(theBit + 1)
1232 span.freeindex = freeidx
1233 span.allocCount++
1234 nextFreeFastResult = gclinkptr(uintptr(result)*
1235 96 +
1236 span.base())
1237 }
1238 }
1239 }
1240 v := nextFreeFastResult
1241 if v == 0 {
1242 v, span, checkGCTrigger = c.nextFree(spc)
1243 }
1244 x := unsafe.Pointer(v)
1245 if span.needzero != 0 {
1246 memclrNoHeapPointers(x, elemsize)
1247 }
1248 if goarch.PtrSize == 8 && sizeclass == 1 {
1249
1250 c.scanAlloc += 8
1251 } else {
1252 dataSize := size
1253 x := uintptr(x)
1254
1255 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(96)) {
1256 throw("tried to write heap bits, but no heap bits in span")
1257 }
1258
1259 src0 := readUintptr(getGCMask(typ))
1260
1261 const elemsize = 96
1262
1263 var scanSize uintptr
1264 src := src0
1265 if typ.Size_ == goarch.PtrSize {
1266 src = (1 << (dataSize / goarch.PtrSize)) - 1
1267
1268 scanSize = dataSize
1269 } else {
1270
1271 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
1272 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
1273 }
1274 scanSize = typ.PtrBytes
1275 for i := typ.Size_; i < dataSize; i += typ.Size_ {
1276 src |= src0 << (i / goarch.PtrSize)
1277 scanSize += typ.Size_
1278 }
1279 }
1280
1281 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
1282 dst := unsafe.Pointer(dstBase)
1283 o := (x - span.base()) / goarch.PtrSize
1284 i := o / ptrBits
1285 j := o % ptrBits
1286 const bits uintptr = elemsize / goarch.PtrSize
1287
1288 const bitsIsPowerOfTwo = bits&(bits-1) == 0
1289 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
1290
1291 bits0 := ptrBits - j
1292 bits1 := bits - bits0
1293 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
1294 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
1295 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
1296 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
1297 } else {
1298
1299 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
1300 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
1301 }
1302
1303 const doubleCheck = false
1304 if doubleCheck {
1305 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
1306 }
1307 if doubleCheckHeapSetType {
1308 doubleCheckHeapType(x, dataSize, typ, nil, span)
1309 }
1310 c.scanAlloc += scanSize
1311 }
1312
1313 publicationBarrier()
1314
1315 if writeBarrier.enabled {
1316
1317 gcmarknewobject(span, uintptr(x))
1318 } else {
1319
1320 span.freeIndexForScan = span.freeindex
1321 }
1322
1323 c.nextSample -= int64(elemsize)
1324 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
1325 profilealloc(mp, x, elemsize)
1326 }
1327 mp.mallocing = 0
1328 releasem(mp)
1329
1330 if checkGCTrigger {
1331 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
1332 gcStart(t)
1333 }
1334 }
1335 gp := getg()
1336 if goexperiment.RuntimeSecret && gp.secret > 0 {
1337
1338 addSecret(x, size)
1339 }
1340
1341 if valgrindenabled {
1342 valgrindMalloc(x, size)
1343 }
1344
1345 if gcBlackenEnabled != 0 && elemsize != 0 {
1346 if assistG := getg().m.curg; assistG != nil {
1347 assistG.gcAssistBytes -= int64(elemsize - size)
1348 }
1349 }
1350
1351 if debug.malloc {
1352 postMallocgcDebug(x, elemsize, typ)
1353 }
1354 return x
1355 }
1356
1357 func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
1358
1359 if doubleCheckMalloc {
1360 if gcphase == _GCmarktermination {
1361 throw("mallocgc called with gcphase == _GCmarktermination")
1362 }
1363 }
1364
1365 lockRankMayQueueFinalizer()
1366
1367 if debug.malloc {
1368 if x := preMallocgcDebug(size, typ); x != nil {
1369 return x
1370 }
1371 }
1372
1373 if gcBlackenEnabled != 0 {
1374 deductAssistCredit(size)
1375 }
1376
1377 const sizeclass = 9
1378
1379 const elemsize = 112
1380
1381 mp := acquirem()
1382 if doubleCheckMalloc {
1383 doubleCheckSmallScanNoHeader(size, typ, mp)
1384 }
1385 mp.mallocing = 1
1386
1387 checkGCTrigger := false
1388 c := getMCache(mp)
1389 const spc = spanClass(sizeclass<<1) | spanClass(0)
1390 span := c.alloc[spc]
1391
1392 var nextFreeFastResult gclinkptr
1393 if span.allocCache != 0 {
1394 theBit := sys.TrailingZeros64(span.allocCache)
1395 result := span.freeindex + uint16(theBit)
1396 if result < span.nelems {
1397 freeidx := result + 1
1398 if !(freeidx%64 == 0 && freeidx != span.nelems) {
1399 span.allocCache >>= uint(theBit + 1)
1400 span.freeindex = freeidx
1401 span.allocCount++
1402 nextFreeFastResult = gclinkptr(uintptr(result)*
1403 112 +
1404 span.base())
1405 }
1406 }
1407 }
1408 v := nextFreeFastResult
1409 if v == 0 {
1410 v, span, checkGCTrigger = c.nextFree(spc)
1411 }
1412 x := unsafe.Pointer(v)
1413 if span.needzero != 0 {
1414 memclrNoHeapPointers(x, elemsize)
1415 }
1416 if goarch.PtrSize == 8 && sizeclass == 1 {
1417
1418 c.scanAlloc += 8
1419 } else {
1420 dataSize := size
1421 x := uintptr(x)
1422
1423 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(112)) {
1424 throw("tried to write heap bits, but no heap bits in span")
1425 }
1426
1427 src0 := readUintptr(getGCMask(typ))
1428
1429 const elemsize = 112
1430
1431 var scanSize uintptr
1432 src := src0
1433 if typ.Size_ == goarch.PtrSize {
1434 src = (1 << (dataSize / goarch.PtrSize)) - 1
1435
1436 scanSize = dataSize
1437 } else {
1438
1439 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
1440 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
1441 }
1442 scanSize = typ.PtrBytes
1443 for i := typ.Size_; i < dataSize; i += typ.Size_ {
1444 src |= src0 << (i / goarch.PtrSize)
1445 scanSize += typ.Size_
1446 }
1447 }
1448
1449 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
1450 dst := unsafe.Pointer(dstBase)
1451 o := (x - span.base()) / goarch.PtrSize
1452 i := o / ptrBits
1453 j := o % ptrBits
1454 const bits uintptr = elemsize / goarch.PtrSize
1455
1456 const bitsIsPowerOfTwo = bits&(bits-1) == 0
1457 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
1458
1459 bits0 := ptrBits - j
1460 bits1 := bits - bits0
1461 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
1462 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
1463 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
1464 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
1465 } else {
1466
1467 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
1468 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
1469 }
1470
1471 const doubleCheck = false
1472 if doubleCheck {
1473 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
1474 }
1475 if doubleCheckHeapSetType {
1476 doubleCheckHeapType(x, dataSize, typ, nil, span)
1477 }
1478 c.scanAlloc += scanSize
1479 }
1480
1481 publicationBarrier()
1482
1483 if writeBarrier.enabled {
1484
1485 gcmarknewobject(span, uintptr(x))
1486 } else {
1487
1488 span.freeIndexForScan = span.freeindex
1489 }
1490
1491 c.nextSample -= int64(elemsize)
1492 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
1493 profilealloc(mp, x, elemsize)
1494 }
1495 mp.mallocing = 0
1496 releasem(mp)
1497
1498 if checkGCTrigger {
1499 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
1500 gcStart(t)
1501 }
1502 }
1503 gp := getg()
1504 if goexperiment.RuntimeSecret && gp.secret > 0 {
1505
1506 addSecret(x, size)
1507 }
1508
1509 if valgrindenabled {
1510 valgrindMalloc(x, size)
1511 }
1512
1513 if gcBlackenEnabled != 0 && elemsize != 0 {
1514 if assistG := getg().m.curg; assistG != nil {
1515 assistG.gcAssistBytes -= int64(elemsize - size)
1516 }
1517 }
1518
1519 if debug.malloc {
1520 postMallocgcDebug(x, elemsize, typ)
1521 }
1522 return x
1523 }
1524
1525 func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
1526
1527 if doubleCheckMalloc {
1528 if gcphase == _GCmarktermination {
1529 throw("mallocgc called with gcphase == _GCmarktermination")
1530 }
1531 }
1532
1533 lockRankMayQueueFinalizer()
1534
1535 if debug.malloc {
1536 if x := preMallocgcDebug(size, typ); x != nil {
1537 return x
1538 }
1539 }
1540
1541 if gcBlackenEnabled != 0 {
1542 deductAssistCredit(size)
1543 }
1544
1545 const sizeclass = 10
1546
1547 const elemsize = 128
1548
1549 mp := acquirem()
1550 if doubleCheckMalloc {
1551 doubleCheckSmallScanNoHeader(size, typ, mp)
1552 }
1553 mp.mallocing = 1
1554
1555 checkGCTrigger := false
1556 c := getMCache(mp)
1557 const spc = spanClass(sizeclass<<1) | spanClass(0)
1558 span := c.alloc[spc]
1559
1560 var nextFreeFastResult gclinkptr
1561 if span.allocCache != 0 {
1562 theBit := sys.TrailingZeros64(span.allocCache)
1563 result := span.freeindex + uint16(theBit)
1564 if result < span.nelems {
1565 freeidx := result + 1
1566 if !(freeidx%64 == 0 && freeidx != span.nelems) {
1567 span.allocCache >>= uint(theBit + 1)
1568 span.freeindex = freeidx
1569 span.allocCount++
1570 nextFreeFastResult = gclinkptr(uintptr(result)*
1571 128 +
1572 span.base())
1573 }
1574 }
1575 }
1576 v := nextFreeFastResult
1577 if v == 0 {
1578 v, span, checkGCTrigger = c.nextFree(spc)
1579 }
1580 x := unsafe.Pointer(v)
1581 if span.needzero != 0 {
1582 memclrNoHeapPointers(x, elemsize)
1583 }
1584 if goarch.PtrSize == 8 && sizeclass == 1 {
1585
1586 c.scanAlloc += 8
1587 } else {
1588 dataSize := size
1589 x := uintptr(x)
1590
1591 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(128)) {
1592 throw("tried to write heap bits, but no heap bits in span")
1593 }
1594
1595 src0 := readUintptr(getGCMask(typ))
1596
1597 const elemsize = 128
1598
1599 var scanSize uintptr
1600 src := src0
1601 if typ.Size_ == goarch.PtrSize {
1602 src = (1 << (dataSize / goarch.PtrSize)) - 1
1603
1604 scanSize = dataSize
1605 } else {
1606
1607 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
1608 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
1609 }
1610 scanSize = typ.PtrBytes
1611 for i := typ.Size_; i < dataSize; i += typ.Size_ {
1612 src |= src0 << (i / goarch.PtrSize)
1613 scanSize += typ.Size_
1614 }
1615 }
1616
1617 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
1618 dst := unsafe.Pointer(dstBase)
1619 o := (x - span.base()) / goarch.PtrSize
1620 i := o / ptrBits
1621 j := o % ptrBits
1622 const bits uintptr = elemsize / goarch.PtrSize
1623
1624 const bitsIsPowerOfTwo = bits&(bits-1) == 0
1625 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
1626
1627 bits0 := ptrBits - j
1628 bits1 := bits - bits0
1629 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
1630 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
1631 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
1632 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
1633 } else {
1634
1635 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
1636 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
1637 }
1638
1639 const doubleCheck = false
1640 if doubleCheck {
1641 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
1642 }
1643 if doubleCheckHeapSetType {
1644 doubleCheckHeapType(x, dataSize, typ, nil, span)
1645 }
1646 c.scanAlloc += scanSize
1647 }
1648
1649 publicationBarrier()
1650
1651 if writeBarrier.enabled {
1652
1653 gcmarknewobject(span, uintptr(x))
1654 } else {
1655
1656 span.freeIndexForScan = span.freeindex
1657 }
1658
1659 c.nextSample -= int64(elemsize)
1660 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
1661 profilealloc(mp, x, elemsize)
1662 }
1663 mp.mallocing = 0
1664 releasem(mp)
1665
1666 if checkGCTrigger {
1667 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
1668 gcStart(t)
1669 }
1670 }
1671 gp := getg()
1672 if goexperiment.RuntimeSecret && gp.secret > 0 {
1673
1674 addSecret(x, size)
1675 }
1676
1677 if valgrindenabled {
1678 valgrindMalloc(x, size)
1679 }
1680
1681 if gcBlackenEnabled != 0 && elemsize != 0 {
1682 if assistG := getg().m.curg; assistG != nil {
1683 assistG.gcAssistBytes -= int64(elemsize - size)
1684 }
1685 }
1686
1687 if debug.malloc {
1688 postMallocgcDebug(x, elemsize, typ)
1689 }
1690 return x
1691 }
1692
1693 func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
1694
1695 if doubleCheckMalloc {
1696 if gcphase == _GCmarktermination {
1697 throw("mallocgc called with gcphase == _GCmarktermination")
1698 }
1699 }
1700
1701 lockRankMayQueueFinalizer()
1702
1703 if debug.malloc {
1704 if x := preMallocgcDebug(size, typ); x != nil {
1705 return x
1706 }
1707 }
1708
1709 if gcBlackenEnabled != 0 {
1710 deductAssistCredit(size)
1711 }
1712
1713 const sizeclass = 11
1714
1715 const elemsize = 144
1716
1717 mp := acquirem()
1718 if doubleCheckMalloc {
1719 doubleCheckSmallScanNoHeader(size, typ, mp)
1720 }
1721 mp.mallocing = 1
1722
1723 checkGCTrigger := false
1724 c := getMCache(mp)
1725 const spc = spanClass(sizeclass<<1) | spanClass(0)
1726 span := c.alloc[spc]
1727
1728 var nextFreeFastResult gclinkptr
1729 if span.allocCache != 0 {
1730 theBit := sys.TrailingZeros64(span.allocCache)
1731 result := span.freeindex + uint16(theBit)
1732 if result < span.nelems {
1733 freeidx := result + 1
1734 if !(freeidx%64 == 0 && freeidx != span.nelems) {
1735 span.allocCache >>= uint(theBit + 1)
1736 span.freeindex = freeidx
1737 span.allocCount++
1738 nextFreeFastResult = gclinkptr(uintptr(result)*
1739 144 +
1740 span.base())
1741 }
1742 }
1743 }
1744 v := nextFreeFastResult
1745 if v == 0 {
1746 v, span, checkGCTrigger = c.nextFree(spc)
1747 }
1748 x := unsafe.Pointer(v)
1749 if span.needzero != 0 {
1750 memclrNoHeapPointers(x, elemsize)
1751 }
1752 if goarch.PtrSize == 8 && sizeclass == 1 {
1753
1754 c.scanAlloc += 8
1755 } else {
1756 dataSize := size
1757 x := uintptr(x)
1758
1759 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(144)) {
1760 throw("tried to write heap bits, but no heap bits in span")
1761 }
1762
1763 src0 := readUintptr(getGCMask(typ))
1764
1765 const elemsize = 144
1766
1767 var scanSize uintptr
1768 src := src0
1769 if typ.Size_ == goarch.PtrSize {
1770 src = (1 << (dataSize / goarch.PtrSize)) - 1
1771
1772 scanSize = dataSize
1773 } else {
1774
1775 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
1776 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
1777 }
1778 scanSize = typ.PtrBytes
1779 for i := typ.Size_; i < dataSize; i += typ.Size_ {
1780 src |= src0 << (i / goarch.PtrSize)
1781 scanSize += typ.Size_
1782 }
1783 }
1784
1785 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
1786 dst := unsafe.Pointer(dstBase)
1787 o := (x - span.base()) / goarch.PtrSize
1788 i := o / ptrBits
1789 j := o % ptrBits
1790 const bits uintptr = elemsize / goarch.PtrSize
1791
1792 const bitsIsPowerOfTwo = bits&(bits-1) == 0
1793 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
1794
1795 bits0 := ptrBits - j
1796 bits1 := bits - bits0
1797 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
1798 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
1799 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
1800 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
1801 } else {
1802
1803 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
1804 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
1805 }
1806
1807 const doubleCheck = false
1808 if doubleCheck {
1809 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
1810 }
1811 if doubleCheckHeapSetType {
1812 doubleCheckHeapType(x, dataSize, typ, nil, span)
1813 }
1814 c.scanAlloc += scanSize
1815 }
1816
1817 publicationBarrier()
1818
1819 if writeBarrier.enabled {
1820
1821 gcmarknewobject(span, uintptr(x))
1822 } else {
1823
1824 span.freeIndexForScan = span.freeindex
1825 }
1826
1827 c.nextSample -= int64(elemsize)
1828 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
1829 profilealloc(mp, x, elemsize)
1830 }
1831 mp.mallocing = 0
1832 releasem(mp)
1833
1834 if checkGCTrigger {
1835 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
1836 gcStart(t)
1837 }
1838 }
1839 gp := getg()
1840 if goexperiment.RuntimeSecret && gp.secret > 0 {
1841
1842 addSecret(x, size)
1843 }
1844
1845 if valgrindenabled {
1846 valgrindMalloc(x, size)
1847 }
1848
1849 if gcBlackenEnabled != 0 && elemsize != 0 {
1850 if assistG := getg().m.curg; assistG != nil {
1851 assistG.gcAssistBytes -= int64(elemsize - size)
1852 }
1853 }
1854
1855 if debug.malloc {
1856 postMallocgcDebug(x, elemsize, typ)
1857 }
1858 return x
1859 }
1860
1861 func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
1862
1863 if doubleCheckMalloc {
1864 if gcphase == _GCmarktermination {
1865 throw("mallocgc called with gcphase == _GCmarktermination")
1866 }
1867 }
1868
1869 lockRankMayQueueFinalizer()
1870
1871 if debug.malloc {
1872 if x := preMallocgcDebug(size, typ); x != nil {
1873 return x
1874 }
1875 }
1876
1877 if gcBlackenEnabled != 0 {
1878 deductAssistCredit(size)
1879 }
1880
1881 const sizeclass = 12
1882
1883 const elemsize = 160
1884
1885 mp := acquirem()
1886 if doubleCheckMalloc {
1887 doubleCheckSmallScanNoHeader(size, typ, mp)
1888 }
1889 mp.mallocing = 1
1890
1891 checkGCTrigger := false
1892 c := getMCache(mp)
1893 const spc = spanClass(sizeclass<<1) | spanClass(0)
1894 span := c.alloc[spc]
1895
1896 var nextFreeFastResult gclinkptr
1897 if span.allocCache != 0 {
1898 theBit := sys.TrailingZeros64(span.allocCache)
1899 result := span.freeindex + uint16(theBit)
1900 if result < span.nelems {
1901 freeidx := result + 1
1902 if !(freeidx%64 == 0 && freeidx != span.nelems) {
1903 span.allocCache >>= uint(theBit + 1)
1904 span.freeindex = freeidx
1905 span.allocCount++
1906 nextFreeFastResult = gclinkptr(uintptr(result)*
1907 160 +
1908 span.base())
1909 }
1910 }
1911 }
1912 v := nextFreeFastResult
1913 if v == 0 {
1914 v, span, checkGCTrigger = c.nextFree(spc)
1915 }
1916 x := unsafe.Pointer(v)
1917 if span.needzero != 0 {
1918 memclrNoHeapPointers(x, elemsize)
1919 }
1920 if goarch.PtrSize == 8 && sizeclass == 1 {
1921
1922 c.scanAlloc += 8
1923 } else {
1924 dataSize := size
1925 x := uintptr(x)
1926
1927 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(160)) {
1928 throw("tried to write heap bits, but no heap bits in span")
1929 }
1930
1931 src0 := readUintptr(getGCMask(typ))
1932
1933 const elemsize = 160
1934
1935 var scanSize uintptr
1936 src := src0
1937 if typ.Size_ == goarch.PtrSize {
1938 src = (1 << (dataSize / goarch.PtrSize)) - 1
1939
1940 scanSize = dataSize
1941 } else {
1942
1943 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
1944 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
1945 }
1946 scanSize = typ.PtrBytes
1947 for i := typ.Size_; i < dataSize; i += typ.Size_ {
1948 src |= src0 << (i / goarch.PtrSize)
1949 scanSize += typ.Size_
1950 }
1951 }
1952
1953 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
1954 dst := unsafe.Pointer(dstBase)
1955 o := (x - span.base()) / goarch.PtrSize
1956 i := o / ptrBits
1957 j := o % ptrBits
1958 const bits uintptr = elemsize / goarch.PtrSize
1959
1960 const bitsIsPowerOfTwo = bits&(bits-1) == 0
1961 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
1962
1963 bits0 := ptrBits - j
1964 bits1 := bits - bits0
1965 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
1966 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
1967 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
1968 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
1969 } else {
1970
1971 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
1972 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
1973 }
1974
1975 const doubleCheck = false
1976 if doubleCheck {
1977 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
1978 }
1979 if doubleCheckHeapSetType {
1980 doubleCheckHeapType(x, dataSize, typ, nil, span)
1981 }
1982 c.scanAlloc += scanSize
1983 }
1984
1985 publicationBarrier()
1986
1987 if writeBarrier.enabled {
1988
1989 gcmarknewobject(span, uintptr(x))
1990 } else {
1991
1992 span.freeIndexForScan = span.freeindex
1993 }
1994
1995 c.nextSample -= int64(elemsize)
1996 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
1997 profilealloc(mp, x, elemsize)
1998 }
1999 mp.mallocing = 0
2000 releasem(mp)
2001
2002 if checkGCTrigger {
2003 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
2004 gcStart(t)
2005 }
2006 }
2007 gp := getg()
2008 if goexperiment.RuntimeSecret && gp.secret > 0 {
2009
2010 addSecret(x, size)
2011 }
2012
2013 if valgrindenabled {
2014 valgrindMalloc(x, size)
2015 }
2016
2017 if gcBlackenEnabled != 0 && elemsize != 0 {
2018 if assistG := getg().m.curg; assistG != nil {
2019 assistG.gcAssistBytes -= int64(elemsize - size)
2020 }
2021 }
2022
2023 if debug.malloc {
2024 postMallocgcDebug(x, elemsize, typ)
2025 }
2026 return x
2027 }
2028
2029 func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
2030
2031 if doubleCheckMalloc {
2032 if gcphase == _GCmarktermination {
2033 throw("mallocgc called with gcphase == _GCmarktermination")
2034 }
2035 }
2036
2037 lockRankMayQueueFinalizer()
2038
2039 if debug.malloc {
2040 if x := preMallocgcDebug(size, typ); x != nil {
2041 return x
2042 }
2043 }
2044
2045 if gcBlackenEnabled != 0 {
2046 deductAssistCredit(size)
2047 }
2048
2049 const sizeclass = 13
2050
2051 const elemsize = 176
2052
2053 mp := acquirem()
2054 if doubleCheckMalloc {
2055 doubleCheckSmallScanNoHeader(size, typ, mp)
2056 }
2057 mp.mallocing = 1
2058
2059 checkGCTrigger := false
2060 c := getMCache(mp)
2061 const spc = spanClass(sizeclass<<1) | spanClass(0)
2062 span := c.alloc[spc]
2063
2064 var nextFreeFastResult gclinkptr
2065 if span.allocCache != 0 {
2066 theBit := sys.TrailingZeros64(span.allocCache)
2067 result := span.freeindex + uint16(theBit)
2068 if result < span.nelems {
2069 freeidx := result + 1
2070 if !(freeidx%64 == 0 && freeidx != span.nelems) {
2071 span.allocCache >>= uint(theBit + 1)
2072 span.freeindex = freeidx
2073 span.allocCount++
2074 nextFreeFastResult = gclinkptr(uintptr(result)*
2075 176 +
2076 span.base())
2077 }
2078 }
2079 }
2080 v := nextFreeFastResult
2081 if v == 0 {
2082 v, span, checkGCTrigger = c.nextFree(spc)
2083 }
2084 x := unsafe.Pointer(v)
2085 if span.needzero != 0 {
2086 memclrNoHeapPointers(x, elemsize)
2087 }
2088 if goarch.PtrSize == 8 && sizeclass == 1 {
2089
2090 c.scanAlloc += 8
2091 } else {
2092 dataSize := size
2093 x := uintptr(x)
2094
2095 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(176)) {
2096 throw("tried to write heap bits, but no heap bits in span")
2097 }
2098
2099 src0 := readUintptr(getGCMask(typ))
2100
2101 const elemsize = 176
2102
2103 var scanSize uintptr
2104 src := src0
2105 if typ.Size_ == goarch.PtrSize {
2106 src = (1 << (dataSize / goarch.PtrSize)) - 1
2107
2108 scanSize = dataSize
2109 } else {
2110
2111 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
2112 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
2113 }
2114 scanSize = typ.PtrBytes
2115 for i := typ.Size_; i < dataSize; i += typ.Size_ {
2116 src |= src0 << (i / goarch.PtrSize)
2117 scanSize += typ.Size_
2118 }
2119 }
2120
2121 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
2122 dst := unsafe.Pointer(dstBase)
2123 o := (x - span.base()) / goarch.PtrSize
2124 i := o / ptrBits
2125 j := o % ptrBits
2126 const bits uintptr = elemsize / goarch.PtrSize
2127
2128 const bitsIsPowerOfTwo = bits&(bits-1) == 0
2129 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
2130
2131 bits0 := ptrBits - j
2132 bits1 := bits - bits0
2133 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
2134 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
2135 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
2136 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
2137 } else {
2138
2139 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
2140 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
2141 }
2142
2143 const doubleCheck = false
2144 if doubleCheck {
2145 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
2146 }
2147 if doubleCheckHeapSetType {
2148 doubleCheckHeapType(x, dataSize, typ, nil, span)
2149 }
2150 c.scanAlloc += scanSize
2151 }
2152
2153 publicationBarrier()
2154
2155 if writeBarrier.enabled {
2156
2157 gcmarknewobject(span, uintptr(x))
2158 } else {
2159
2160 span.freeIndexForScan = span.freeindex
2161 }
2162
2163 c.nextSample -= int64(elemsize)
2164 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
2165 profilealloc(mp, x, elemsize)
2166 }
2167 mp.mallocing = 0
2168 releasem(mp)
2169
2170 if checkGCTrigger {
2171 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
2172 gcStart(t)
2173 }
2174 }
2175 gp := getg()
2176 if goexperiment.RuntimeSecret && gp.secret > 0 {
2177
2178 addSecret(x, size)
2179 }
2180
2181 if valgrindenabled {
2182 valgrindMalloc(x, size)
2183 }
2184
2185 if gcBlackenEnabled != 0 && elemsize != 0 {
2186 if assistG := getg().m.curg; assistG != nil {
2187 assistG.gcAssistBytes -= int64(elemsize - size)
2188 }
2189 }
2190
2191 if debug.malloc {
2192 postMallocgcDebug(x, elemsize, typ)
2193 }
2194 return x
2195 }
2196
2197 func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
2198
2199 if doubleCheckMalloc {
2200 if gcphase == _GCmarktermination {
2201 throw("mallocgc called with gcphase == _GCmarktermination")
2202 }
2203 }
2204
2205 lockRankMayQueueFinalizer()
2206
2207 if debug.malloc {
2208 if x := preMallocgcDebug(size, typ); x != nil {
2209 return x
2210 }
2211 }
2212
2213 if gcBlackenEnabled != 0 {
2214 deductAssistCredit(size)
2215 }
2216
2217 const sizeclass = 14
2218
2219 const elemsize = 192
2220
2221 mp := acquirem()
2222 if doubleCheckMalloc {
2223 doubleCheckSmallScanNoHeader(size, typ, mp)
2224 }
2225 mp.mallocing = 1
2226
2227 checkGCTrigger := false
2228 c := getMCache(mp)
2229 const spc = spanClass(sizeclass<<1) | spanClass(0)
2230 span := c.alloc[spc]
2231
2232 var nextFreeFastResult gclinkptr
2233 if span.allocCache != 0 {
2234 theBit := sys.TrailingZeros64(span.allocCache)
2235 result := span.freeindex + uint16(theBit)
2236 if result < span.nelems {
2237 freeidx := result + 1
2238 if !(freeidx%64 == 0 && freeidx != span.nelems) {
2239 span.allocCache >>= uint(theBit + 1)
2240 span.freeindex = freeidx
2241 span.allocCount++
2242 nextFreeFastResult = gclinkptr(uintptr(result)*
2243 192 +
2244 span.base())
2245 }
2246 }
2247 }
2248 v := nextFreeFastResult
2249 if v == 0 {
2250 v, span, checkGCTrigger = c.nextFree(spc)
2251 }
2252 x := unsafe.Pointer(v)
2253 if span.needzero != 0 {
2254 memclrNoHeapPointers(x, elemsize)
2255 }
2256 if goarch.PtrSize == 8 && sizeclass == 1 {
2257
2258 c.scanAlloc += 8
2259 } else {
2260 dataSize := size
2261 x := uintptr(x)
2262
2263 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(192)) {
2264 throw("tried to write heap bits, but no heap bits in span")
2265 }
2266
2267 src0 := readUintptr(getGCMask(typ))
2268
2269 const elemsize = 192
2270
2271 var scanSize uintptr
2272 src := src0
2273 if typ.Size_ == goarch.PtrSize {
2274 src = (1 << (dataSize / goarch.PtrSize)) - 1
2275
2276 scanSize = dataSize
2277 } else {
2278
2279 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
2280 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
2281 }
2282 scanSize = typ.PtrBytes
2283 for i := typ.Size_; i < dataSize; i += typ.Size_ {
2284 src |= src0 << (i / goarch.PtrSize)
2285 scanSize += typ.Size_
2286 }
2287 }
2288
2289 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
2290 dst := unsafe.Pointer(dstBase)
2291 o := (x - span.base()) / goarch.PtrSize
2292 i := o / ptrBits
2293 j := o % ptrBits
2294 const bits uintptr = elemsize / goarch.PtrSize
2295
2296 const bitsIsPowerOfTwo = bits&(bits-1) == 0
2297 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
2298
2299 bits0 := ptrBits - j
2300 bits1 := bits - bits0
2301 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
2302 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
2303 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
2304 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
2305 } else {
2306
2307 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
2308 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
2309 }
2310
2311 const doubleCheck = false
2312 if doubleCheck {
2313 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
2314 }
2315 if doubleCheckHeapSetType {
2316 doubleCheckHeapType(x, dataSize, typ, nil, span)
2317 }
2318 c.scanAlloc += scanSize
2319 }
2320
2321 publicationBarrier()
2322
2323 if writeBarrier.enabled {
2324
2325 gcmarknewobject(span, uintptr(x))
2326 } else {
2327
2328 span.freeIndexForScan = span.freeindex
2329 }
2330
2331 c.nextSample -= int64(elemsize)
2332 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
2333 profilealloc(mp, x, elemsize)
2334 }
2335 mp.mallocing = 0
2336 releasem(mp)
2337
2338 if checkGCTrigger {
2339 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
2340 gcStart(t)
2341 }
2342 }
2343 gp := getg()
2344 if goexperiment.RuntimeSecret && gp.secret > 0 {
2345
2346 addSecret(x, size)
2347 }
2348
2349 if valgrindenabled {
2350 valgrindMalloc(x, size)
2351 }
2352
2353 if gcBlackenEnabled != 0 && elemsize != 0 {
2354 if assistG := getg().m.curg; assistG != nil {
2355 assistG.gcAssistBytes -= int64(elemsize - size)
2356 }
2357 }
2358
2359 if debug.malloc {
2360 postMallocgcDebug(x, elemsize, typ)
2361 }
2362 return x
2363 }
2364
2365 func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
2366
2367 if doubleCheckMalloc {
2368 if gcphase == _GCmarktermination {
2369 throw("mallocgc called with gcphase == _GCmarktermination")
2370 }
2371 }
2372
2373 lockRankMayQueueFinalizer()
2374
2375 if debug.malloc {
2376 if x := preMallocgcDebug(size, typ); x != nil {
2377 return x
2378 }
2379 }
2380
2381 if gcBlackenEnabled != 0 {
2382 deductAssistCredit(size)
2383 }
2384
2385 const sizeclass = 15
2386
2387 const elemsize = 208
2388
2389 mp := acquirem()
2390 if doubleCheckMalloc {
2391 doubleCheckSmallScanNoHeader(size, typ, mp)
2392 }
2393 mp.mallocing = 1
2394
2395 checkGCTrigger := false
2396 c := getMCache(mp)
2397 const spc = spanClass(sizeclass<<1) | spanClass(0)
2398 span := c.alloc[spc]
2399
2400 var nextFreeFastResult gclinkptr
2401 if span.allocCache != 0 {
2402 theBit := sys.TrailingZeros64(span.allocCache)
2403 result := span.freeindex + uint16(theBit)
2404 if result < span.nelems {
2405 freeidx := result + 1
2406 if !(freeidx%64 == 0 && freeidx != span.nelems) {
2407 span.allocCache >>= uint(theBit + 1)
2408 span.freeindex = freeidx
2409 span.allocCount++
2410 nextFreeFastResult = gclinkptr(uintptr(result)*
2411 208 +
2412 span.base())
2413 }
2414 }
2415 }
2416 v := nextFreeFastResult
2417 if v == 0 {
2418 v, span, checkGCTrigger = c.nextFree(spc)
2419 }
2420 x := unsafe.Pointer(v)
2421 if span.needzero != 0 {
2422 memclrNoHeapPointers(x, elemsize)
2423 }
2424 if goarch.PtrSize == 8 && sizeclass == 1 {
2425
2426 c.scanAlloc += 8
2427 } else {
2428 dataSize := size
2429 x := uintptr(x)
2430
2431 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(208)) {
2432 throw("tried to write heap bits, but no heap bits in span")
2433 }
2434
2435 src0 := readUintptr(getGCMask(typ))
2436
2437 const elemsize = 208
2438
2439 var scanSize uintptr
2440 src := src0
2441 if typ.Size_ == goarch.PtrSize {
2442 src = (1 << (dataSize / goarch.PtrSize)) - 1
2443
2444 scanSize = dataSize
2445 } else {
2446
2447 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
2448 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
2449 }
2450 scanSize = typ.PtrBytes
2451 for i := typ.Size_; i < dataSize; i += typ.Size_ {
2452 src |= src0 << (i / goarch.PtrSize)
2453 scanSize += typ.Size_
2454 }
2455 }
2456
2457 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
2458 dst := unsafe.Pointer(dstBase)
2459 o := (x - span.base()) / goarch.PtrSize
2460 i := o / ptrBits
2461 j := o % ptrBits
2462 const bits uintptr = elemsize / goarch.PtrSize
2463
2464 const bitsIsPowerOfTwo = bits&(bits-1) == 0
2465 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
2466
2467 bits0 := ptrBits - j
2468 bits1 := bits - bits0
2469 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
2470 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
2471 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
2472 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
2473 } else {
2474
2475 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
2476 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
2477 }
2478
2479 const doubleCheck = false
2480 if doubleCheck {
2481 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
2482 }
2483 if doubleCheckHeapSetType {
2484 doubleCheckHeapType(x, dataSize, typ, nil, span)
2485 }
2486 c.scanAlloc += scanSize
2487 }
2488
2489 publicationBarrier()
2490
2491 if writeBarrier.enabled {
2492
2493 gcmarknewobject(span, uintptr(x))
2494 } else {
2495
2496 span.freeIndexForScan = span.freeindex
2497 }
2498
2499 c.nextSample -= int64(elemsize)
2500 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
2501 profilealloc(mp, x, elemsize)
2502 }
2503 mp.mallocing = 0
2504 releasem(mp)
2505
2506 if checkGCTrigger {
2507 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
2508 gcStart(t)
2509 }
2510 }
2511 gp := getg()
2512 if goexperiment.RuntimeSecret && gp.secret > 0 {
2513
2514 addSecret(x, size)
2515 }
2516
2517 if valgrindenabled {
2518 valgrindMalloc(x, size)
2519 }
2520
2521 if gcBlackenEnabled != 0 && elemsize != 0 {
2522 if assistG := getg().m.curg; assistG != nil {
2523 assistG.gcAssistBytes -= int64(elemsize - size)
2524 }
2525 }
2526
2527 if debug.malloc {
2528 postMallocgcDebug(x, elemsize, typ)
2529 }
2530 return x
2531 }
2532
2533 func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
2534
2535 if doubleCheckMalloc {
2536 if gcphase == _GCmarktermination {
2537 throw("mallocgc called with gcphase == _GCmarktermination")
2538 }
2539 }
2540
2541 lockRankMayQueueFinalizer()
2542
2543 if debug.malloc {
2544 if x := preMallocgcDebug(size, typ); x != nil {
2545 return x
2546 }
2547 }
2548
2549 if gcBlackenEnabled != 0 {
2550 deductAssistCredit(size)
2551 }
2552
2553 const sizeclass = 16
2554
2555 const elemsize = 224
2556
2557 mp := acquirem()
2558 if doubleCheckMalloc {
2559 doubleCheckSmallScanNoHeader(size, typ, mp)
2560 }
2561 mp.mallocing = 1
2562
2563 checkGCTrigger := false
2564 c := getMCache(mp)
2565 const spc = spanClass(sizeclass<<1) | spanClass(0)
2566 span := c.alloc[spc]
2567
2568 var nextFreeFastResult gclinkptr
2569 if span.allocCache != 0 {
2570 theBit := sys.TrailingZeros64(span.allocCache)
2571 result := span.freeindex + uint16(theBit)
2572 if result < span.nelems {
2573 freeidx := result + 1
2574 if !(freeidx%64 == 0 && freeidx != span.nelems) {
2575 span.allocCache >>= uint(theBit + 1)
2576 span.freeindex = freeidx
2577 span.allocCount++
2578 nextFreeFastResult = gclinkptr(uintptr(result)*
2579 224 +
2580 span.base())
2581 }
2582 }
2583 }
2584 v := nextFreeFastResult
2585 if v == 0 {
2586 v, span, checkGCTrigger = c.nextFree(spc)
2587 }
2588 x := unsafe.Pointer(v)
2589 if span.needzero != 0 {
2590 memclrNoHeapPointers(x, elemsize)
2591 }
2592 if goarch.PtrSize == 8 && sizeclass == 1 {
2593
2594 c.scanAlloc += 8
2595 } else {
2596 dataSize := size
2597 x := uintptr(x)
2598
2599 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(224)) {
2600 throw("tried to write heap bits, but no heap bits in span")
2601 }
2602
2603 src0 := readUintptr(getGCMask(typ))
2604
2605 const elemsize = 224
2606
2607 var scanSize uintptr
2608 src := src0
2609 if typ.Size_ == goarch.PtrSize {
2610 src = (1 << (dataSize / goarch.PtrSize)) - 1
2611
2612 scanSize = dataSize
2613 } else {
2614
2615 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
2616 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
2617 }
2618 scanSize = typ.PtrBytes
2619 for i := typ.Size_; i < dataSize; i += typ.Size_ {
2620 src |= src0 << (i / goarch.PtrSize)
2621 scanSize += typ.Size_
2622 }
2623 }
2624
2625 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
2626 dst := unsafe.Pointer(dstBase)
2627 o := (x - span.base()) / goarch.PtrSize
2628 i := o / ptrBits
2629 j := o % ptrBits
2630 const bits uintptr = elemsize / goarch.PtrSize
2631
2632 const bitsIsPowerOfTwo = bits&(bits-1) == 0
2633 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
2634
2635 bits0 := ptrBits - j
2636 bits1 := bits - bits0
2637 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
2638 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
2639 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
2640 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
2641 } else {
2642
2643 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
2644 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
2645 }
2646
2647 const doubleCheck = false
2648 if doubleCheck {
2649 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
2650 }
2651 if doubleCheckHeapSetType {
2652 doubleCheckHeapType(x, dataSize, typ, nil, span)
2653 }
2654 c.scanAlloc += scanSize
2655 }
2656
2657 publicationBarrier()
2658
2659 if writeBarrier.enabled {
2660
2661 gcmarknewobject(span, uintptr(x))
2662 } else {
2663
2664 span.freeIndexForScan = span.freeindex
2665 }
2666
2667 c.nextSample -= int64(elemsize)
2668 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
2669 profilealloc(mp, x, elemsize)
2670 }
2671 mp.mallocing = 0
2672 releasem(mp)
2673
2674 if checkGCTrigger {
2675 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
2676 gcStart(t)
2677 }
2678 }
2679 gp := getg()
2680 if goexperiment.RuntimeSecret && gp.secret > 0 {
2681
2682 addSecret(x, size)
2683 }
2684
2685 if valgrindenabled {
2686 valgrindMalloc(x, size)
2687 }
2688
2689 if gcBlackenEnabled != 0 && elemsize != 0 {
2690 if assistG := getg().m.curg; assistG != nil {
2691 assistG.gcAssistBytes -= int64(elemsize - size)
2692 }
2693 }
2694
2695 if debug.malloc {
2696 postMallocgcDebug(x, elemsize, typ)
2697 }
2698 return x
2699 }
2700
2701 func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
2702
2703 if doubleCheckMalloc {
2704 if gcphase == _GCmarktermination {
2705 throw("mallocgc called with gcphase == _GCmarktermination")
2706 }
2707 }
2708
2709 lockRankMayQueueFinalizer()
2710
2711 if debug.malloc {
2712 if x := preMallocgcDebug(size, typ); x != nil {
2713 return x
2714 }
2715 }
2716
2717 if gcBlackenEnabled != 0 {
2718 deductAssistCredit(size)
2719 }
2720
2721 const sizeclass = 17
2722
2723 const elemsize = 240
2724
2725 mp := acquirem()
2726 if doubleCheckMalloc {
2727 doubleCheckSmallScanNoHeader(size, typ, mp)
2728 }
2729 mp.mallocing = 1
2730
2731 checkGCTrigger := false
2732 c := getMCache(mp)
2733 const spc = spanClass(sizeclass<<1) | spanClass(0)
2734 span := c.alloc[spc]
2735
2736 var nextFreeFastResult gclinkptr
2737 if span.allocCache != 0 {
2738 theBit := sys.TrailingZeros64(span.allocCache)
2739 result := span.freeindex + uint16(theBit)
2740 if result < span.nelems {
2741 freeidx := result + 1
2742 if !(freeidx%64 == 0 && freeidx != span.nelems) {
2743 span.allocCache >>= uint(theBit + 1)
2744 span.freeindex = freeidx
2745 span.allocCount++
2746 nextFreeFastResult = gclinkptr(uintptr(result)*
2747 240 +
2748 span.base())
2749 }
2750 }
2751 }
2752 v := nextFreeFastResult
2753 if v == 0 {
2754 v, span, checkGCTrigger = c.nextFree(spc)
2755 }
2756 x := unsafe.Pointer(v)
2757 if span.needzero != 0 {
2758 memclrNoHeapPointers(x, elemsize)
2759 }
2760 if goarch.PtrSize == 8 && sizeclass == 1 {
2761
2762 c.scanAlloc += 8
2763 } else {
2764 dataSize := size
2765 x := uintptr(x)
2766
2767 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(240)) {
2768 throw("tried to write heap bits, but no heap bits in span")
2769 }
2770
2771 src0 := readUintptr(getGCMask(typ))
2772
2773 const elemsize = 240
2774
2775 var scanSize uintptr
2776 src := src0
2777 if typ.Size_ == goarch.PtrSize {
2778 src = (1 << (dataSize / goarch.PtrSize)) - 1
2779
2780 scanSize = dataSize
2781 } else {
2782
2783 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
2784 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
2785 }
2786 scanSize = typ.PtrBytes
2787 for i := typ.Size_; i < dataSize; i += typ.Size_ {
2788 src |= src0 << (i / goarch.PtrSize)
2789 scanSize += typ.Size_
2790 }
2791 }
2792
2793 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
2794 dst := unsafe.Pointer(dstBase)
2795 o := (x - span.base()) / goarch.PtrSize
2796 i := o / ptrBits
2797 j := o % ptrBits
2798 const bits uintptr = elemsize / goarch.PtrSize
2799
2800 const bitsIsPowerOfTwo = bits&(bits-1) == 0
2801 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
2802
2803 bits0 := ptrBits - j
2804 bits1 := bits - bits0
2805 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
2806 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
2807 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
2808 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
2809 } else {
2810
2811 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
2812 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
2813 }
2814
2815 const doubleCheck = false
2816 if doubleCheck {
2817 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
2818 }
2819 if doubleCheckHeapSetType {
2820 doubleCheckHeapType(x, dataSize, typ, nil, span)
2821 }
2822 c.scanAlloc += scanSize
2823 }
2824
2825 publicationBarrier()
2826
2827 if writeBarrier.enabled {
2828
2829 gcmarknewobject(span, uintptr(x))
2830 } else {
2831
2832 span.freeIndexForScan = span.freeindex
2833 }
2834
2835 c.nextSample -= int64(elemsize)
2836 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
2837 profilealloc(mp, x, elemsize)
2838 }
2839 mp.mallocing = 0
2840 releasem(mp)
2841
2842 if checkGCTrigger {
2843 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
2844 gcStart(t)
2845 }
2846 }
2847 gp := getg()
2848 if goexperiment.RuntimeSecret && gp.secret > 0 {
2849
2850 addSecret(x, size)
2851 }
2852
2853 if valgrindenabled {
2854 valgrindMalloc(x, size)
2855 }
2856
2857 if gcBlackenEnabled != 0 && elemsize != 0 {
2858 if assistG := getg().m.curg; assistG != nil {
2859 assistG.gcAssistBytes -= int64(elemsize - size)
2860 }
2861 }
2862
2863 if debug.malloc {
2864 postMallocgcDebug(x, elemsize, typ)
2865 }
2866 return x
2867 }
2868
2869 func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
2870
2871 if doubleCheckMalloc {
2872 if gcphase == _GCmarktermination {
2873 throw("mallocgc called with gcphase == _GCmarktermination")
2874 }
2875 }
2876
2877 lockRankMayQueueFinalizer()
2878
2879 if debug.malloc {
2880 if x := preMallocgcDebug(size, typ); x != nil {
2881 return x
2882 }
2883 }
2884
2885 if gcBlackenEnabled != 0 {
2886 deductAssistCredit(size)
2887 }
2888
2889 const sizeclass = 18
2890
2891 const elemsize = 256
2892
2893 mp := acquirem()
2894 if doubleCheckMalloc {
2895 doubleCheckSmallScanNoHeader(size, typ, mp)
2896 }
2897 mp.mallocing = 1
2898
2899 checkGCTrigger := false
2900 c := getMCache(mp)
2901 const spc = spanClass(sizeclass<<1) | spanClass(0)
2902 span := c.alloc[spc]
2903
2904 var nextFreeFastResult gclinkptr
2905 if span.allocCache != 0 {
2906 theBit := sys.TrailingZeros64(span.allocCache)
2907 result := span.freeindex + uint16(theBit)
2908 if result < span.nelems {
2909 freeidx := result + 1
2910 if !(freeidx%64 == 0 && freeidx != span.nelems) {
2911 span.allocCache >>= uint(theBit + 1)
2912 span.freeindex = freeidx
2913 span.allocCount++
2914 nextFreeFastResult = gclinkptr(uintptr(result)*
2915 256 +
2916 span.base())
2917 }
2918 }
2919 }
2920 v := nextFreeFastResult
2921 if v == 0 {
2922 v, span, checkGCTrigger = c.nextFree(spc)
2923 }
2924 x := unsafe.Pointer(v)
2925 if span.needzero != 0 {
2926 memclrNoHeapPointers(x, elemsize)
2927 }
2928 if goarch.PtrSize == 8 && sizeclass == 1 {
2929
2930 c.scanAlloc += 8
2931 } else {
2932 dataSize := size
2933 x := uintptr(x)
2934
2935 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(256)) {
2936 throw("tried to write heap bits, but no heap bits in span")
2937 }
2938
2939 src0 := readUintptr(getGCMask(typ))
2940
2941 const elemsize = 256
2942
2943 var scanSize uintptr
2944 src := src0
2945 if typ.Size_ == goarch.PtrSize {
2946 src = (1 << (dataSize / goarch.PtrSize)) - 1
2947
2948 scanSize = dataSize
2949 } else {
2950
2951 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
2952 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
2953 }
2954 scanSize = typ.PtrBytes
2955 for i := typ.Size_; i < dataSize; i += typ.Size_ {
2956 src |= src0 << (i / goarch.PtrSize)
2957 scanSize += typ.Size_
2958 }
2959 }
2960
2961 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
2962 dst := unsafe.Pointer(dstBase)
2963 o := (x - span.base()) / goarch.PtrSize
2964 i := o / ptrBits
2965 j := o % ptrBits
2966 const bits uintptr = elemsize / goarch.PtrSize
2967
2968 const bitsIsPowerOfTwo = bits&(bits-1) == 0
2969 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
2970
2971 bits0 := ptrBits - j
2972 bits1 := bits - bits0
2973 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
2974 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
2975 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
2976 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
2977 } else {
2978
2979 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
2980 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
2981 }
2982
2983 const doubleCheck = false
2984 if doubleCheck {
2985 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
2986 }
2987 if doubleCheckHeapSetType {
2988 doubleCheckHeapType(x, dataSize, typ, nil, span)
2989 }
2990 c.scanAlloc += scanSize
2991 }
2992
2993 publicationBarrier()
2994
2995 if writeBarrier.enabled {
2996
2997 gcmarknewobject(span, uintptr(x))
2998 } else {
2999
3000 span.freeIndexForScan = span.freeindex
3001 }
3002
3003 c.nextSample -= int64(elemsize)
3004 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
3005 profilealloc(mp, x, elemsize)
3006 }
3007 mp.mallocing = 0
3008 releasem(mp)
3009
3010 if checkGCTrigger {
3011 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
3012 gcStart(t)
3013 }
3014 }
3015 gp := getg()
3016 if goexperiment.RuntimeSecret && gp.secret > 0 {
3017
3018 addSecret(x, size)
3019 }
3020
3021 if valgrindenabled {
3022 valgrindMalloc(x, size)
3023 }
3024
3025 if gcBlackenEnabled != 0 && elemsize != 0 {
3026 if assistG := getg().m.curg; assistG != nil {
3027 assistG.gcAssistBytes -= int64(elemsize - size)
3028 }
3029 }
3030
3031 if debug.malloc {
3032 postMallocgcDebug(x, elemsize, typ)
3033 }
3034 return x
3035 }
3036
3037 func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
3038
3039 if doubleCheckMalloc {
3040 if gcphase == _GCmarktermination {
3041 throw("mallocgc called with gcphase == _GCmarktermination")
3042 }
3043 }
3044
3045 lockRankMayQueueFinalizer()
3046
3047 if debug.malloc {
3048 if x := preMallocgcDebug(size, typ); x != nil {
3049 return x
3050 }
3051 }
3052
3053 if gcBlackenEnabled != 0 {
3054 deductAssistCredit(size)
3055 }
3056
3057 const sizeclass = 19
3058
3059 const elemsize = 288
3060
3061 mp := acquirem()
3062 if doubleCheckMalloc {
3063 doubleCheckSmallScanNoHeader(size, typ, mp)
3064 }
3065 mp.mallocing = 1
3066
3067 checkGCTrigger := false
3068 c := getMCache(mp)
3069 const spc = spanClass(sizeclass<<1) | spanClass(0)
3070 span := c.alloc[spc]
3071
3072 var nextFreeFastResult gclinkptr
3073 if span.allocCache != 0 {
3074 theBit := sys.TrailingZeros64(span.allocCache)
3075 result := span.freeindex + uint16(theBit)
3076 if result < span.nelems {
3077 freeidx := result + 1
3078 if !(freeidx%64 == 0 && freeidx != span.nelems) {
3079 span.allocCache >>= uint(theBit + 1)
3080 span.freeindex = freeidx
3081 span.allocCount++
3082 nextFreeFastResult = gclinkptr(uintptr(result)*
3083 288 +
3084 span.base())
3085 }
3086 }
3087 }
3088 v := nextFreeFastResult
3089 if v == 0 {
3090 v, span, checkGCTrigger = c.nextFree(spc)
3091 }
3092 x := unsafe.Pointer(v)
3093 if span.needzero != 0 {
3094 memclrNoHeapPointers(x, elemsize)
3095 }
3096 if goarch.PtrSize == 8 && sizeclass == 1 {
3097
3098 c.scanAlloc += 8
3099 } else {
3100 dataSize := size
3101 x := uintptr(x)
3102
3103 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(288)) {
3104 throw("tried to write heap bits, but no heap bits in span")
3105 }
3106
3107 src0 := readUintptr(getGCMask(typ))
3108
3109 const elemsize = 288
3110
3111 var scanSize uintptr
3112 src := src0
3113 if typ.Size_ == goarch.PtrSize {
3114 src = (1 << (dataSize / goarch.PtrSize)) - 1
3115
3116 scanSize = dataSize
3117 } else {
3118
3119 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
3120 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
3121 }
3122 scanSize = typ.PtrBytes
3123 for i := typ.Size_; i < dataSize; i += typ.Size_ {
3124 src |= src0 << (i / goarch.PtrSize)
3125 scanSize += typ.Size_
3126 }
3127 }
3128
3129 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
3130 dst := unsafe.Pointer(dstBase)
3131 o := (x - span.base()) / goarch.PtrSize
3132 i := o / ptrBits
3133 j := o % ptrBits
3134 const bits uintptr = elemsize / goarch.PtrSize
3135
3136 const bitsIsPowerOfTwo = bits&(bits-1) == 0
3137 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
3138
3139 bits0 := ptrBits - j
3140 bits1 := bits - bits0
3141 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
3142 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
3143 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
3144 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
3145 } else {
3146
3147 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
3148 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
3149 }
3150
3151 const doubleCheck = false
3152 if doubleCheck {
3153 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
3154 }
3155 if doubleCheckHeapSetType {
3156 doubleCheckHeapType(x, dataSize, typ, nil, span)
3157 }
3158 c.scanAlloc += scanSize
3159 }
3160
3161 publicationBarrier()
3162
3163 if writeBarrier.enabled {
3164
3165 gcmarknewobject(span, uintptr(x))
3166 } else {
3167
3168 span.freeIndexForScan = span.freeindex
3169 }
3170
3171 c.nextSample -= int64(elemsize)
3172 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
3173 profilealloc(mp, x, elemsize)
3174 }
3175 mp.mallocing = 0
3176 releasem(mp)
3177
3178 if checkGCTrigger {
3179 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
3180 gcStart(t)
3181 }
3182 }
3183 gp := getg()
3184 if goexperiment.RuntimeSecret && gp.secret > 0 {
3185
3186 addSecret(x, size)
3187 }
3188
3189 if valgrindenabled {
3190 valgrindMalloc(x, size)
3191 }
3192
3193 if gcBlackenEnabled != 0 && elemsize != 0 {
3194 if assistG := getg().m.curg; assistG != nil {
3195 assistG.gcAssistBytes -= int64(elemsize - size)
3196 }
3197 }
3198
3199 if debug.malloc {
3200 postMallocgcDebug(x, elemsize, typ)
3201 }
3202 return x
3203 }
3204
3205 func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
3206
3207 if doubleCheckMalloc {
3208 if gcphase == _GCmarktermination {
3209 throw("mallocgc called with gcphase == _GCmarktermination")
3210 }
3211 }
3212
3213 lockRankMayQueueFinalizer()
3214
3215 if debug.malloc {
3216 if x := preMallocgcDebug(size, typ); x != nil {
3217 return x
3218 }
3219 }
3220
3221 if gcBlackenEnabled != 0 {
3222 deductAssistCredit(size)
3223 }
3224
3225 const sizeclass = 20
3226
3227 const elemsize = 320
3228
3229 mp := acquirem()
3230 if doubleCheckMalloc {
3231 doubleCheckSmallScanNoHeader(size, typ, mp)
3232 }
3233 mp.mallocing = 1
3234
3235 checkGCTrigger := false
3236 c := getMCache(mp)
3237 const spc = spanClass(sizeclass<<1) | spanClass(0)
3238 span := c.alloc[spc]
3239
3240 var nextFreeFastResult gclinkptr
3241 if span.allocCache != 0 {
3242 theBit := sys.TrailingZeros64(span.allocCache)
3243 result := span.freeindex + uint16(theBit)
3244 if result < span.nelems {
3245 freeidx := result + 1
3246 if !(freeidx%64 == 0 && freeidx != span.nelems) {
3247 span.allocCache >>= uint(theBit + 1)
3248 span.freeindex = freeidx
3249 span.allocCount++
3250 nextFreeFastResult = gclinkptr(uintptr(result)*
3251 320 +
3252 span.base())
3253 }
3254 }
3255 }
3256 v := nextFreeFastResult
3257 if v == 0 {
3258 v, span, checkGCTrigger = c.nextFree(spc)
3259 }
3260 x := unsafe.Pointer(v)
3261 if span.needzero != 0 {
3262 memclrNoHeapPointers(x, elemsize)
3263 }
3264 if goarch.PtrSize == 8 && sizeclass == 1 {
3265
3266 c.scanAlloc += 8
3267 } else {
3268 dataSize := size
3269 x := uintptr(x)
3270
3271 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(320)) {
3272 throw("tried to write heap bits, but no heap bits in span")
3273 }
3274
3275 src0 := readUintptr(getGCMask(typ))
3276
3277 const elemsize = 320
3278
3279 var scanSize uintptr
3280 src := src0
3281 if typ.Size_ == goarch.PtrSize {
3282 src = (1 << (dataSize / goarch.PtrSize)) - 1
3283
3284 scanSize = dataSize
3285 } else {
3286
3287 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
3288 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
3289 }
3290 scanSize = typ.PtrBytes
3291 for i := typ.Size_; i < dataSize; i += typ.Size_ {
3292 src |= src0 << (i / goarch.PtrSize)
3293 scanSize += typ.Size_
3294 }
3295 }
3296
3297 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
3298 dst := unsafe.Pointer(dstBase)
3299 o := (x - span.base()) / goarch.PtrSize
3300 i := o / ptrBits
3301 j := o % ptrBits
3302 const bits uintptr = elemsize / goarch.PtrSize
3303
3304 const bitsIsPowerOfTwo = bits&(bits-1) == 0
3305 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
3306
3307 bits0 := ptrBits - j
3308 bits1 := bits - bits0
3309 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
3310 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
3311 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
3312 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
3313 } else {
3314
3315 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
3316 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
3317 }
3318
3319 const doubleCheck = false
3320 if doubleCheck {
3321 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
3322 }
3323 if doubleCheckHeapSetType {
3324 doubleCheckHeapType(x, dataSize, typ, nil, span)
3325 }
3326 c.scanAlloc += scanSize
3327 }
3328
3329 publicationBarrier()
3330
3331 if writeBarrier.enabled {
3332
3333 gcmarknewobject(span, uintptr(x))
3334 } else {
3335
3336 span.freeIndexForScan = span.freeindex
3337 }
3338
3339 c.nextSample -= int64(elemsize)
3340 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
3341 profilealloc(mp, x, elemsize)
3342 }
3343 mp.mallocing = 0
3344 releasem(mp)
3345
3346 if checkGCTrigger {
3347 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
3348 gcStart(t)
3349 }
3350 }
3351 gp := getg()
3352 if goexperiment.RuntimeSecret && gp.secret > 0 {
3353
3354 addSecret(x, size)
3355 }
3356
3357 if valgrindenabled {
3358 valgrindMalloc(x, size)
3359 }
3360
3361 if gcBlackenEnabled != 0 && elemsize != 0 {
3362 if assistG := getg().m.curg; assistG != nil {
3363 assistG.gcAssistBytes -= int64(elemsize - size)
3364 }
3365 }
3366
3367 if debug.malloc {
3368 postMallocgcDebug(x, elemsize, typ)
3369 }
3370 return x
3371 }
3372
3373 func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
3374
3375 if doubleCheckMalloc {
3376 if gcphase == _GCmarktermination {
3377 throw("mallocgc called with gcphase == _GCmarktermination")
3378 }
3379 }
3380
3381 lockRankMayQueueFinalizer()
3382
3383 if debug.malloc {
3384 if x := preMallocgcDebug(size, typ); x != nil {
3385 return x
3386 }
3387 }
3388
3389 if gcBlackenEnabled != 0 {
3390 deductAssistCredit(size)
3391 }
3392
3393 const sizeclass = 21
3394
3395 const elemsize = 352
3396
3397 mp := acquirem()
3398 if doubleCheckMalloc {
3399 doubleCheckSmallScanNoHeader(size, typ, mp)
3400 }
3401 mp.mallocing = 1
3402
3403 checkGCTrigger := false
3404 c := getMCache(mp)
3405 const spc = spanClass(sizeclass<<1) | spanClass(0)
3406 span := c.alloc[spc]
3407
3408 var nextFreeFastResult gclinkptr
3409 if span.allocCache != 0 {
3410 theBit := sys.TrailingZeros64(span.allocCache)
3411 result := span.freeindex + uint16(theBit)
3412 if result < span.nelems {
3413 freeidx := result + 1
3414 if !(freeidx%64 == 0 && freeidx != span.nelems) {
3415 span.allocCache >>= uint(theBit + 1)
3416 span.freeindex = freeidx
3417 span.allocCount++
3418 nextFreeFastResult = gclinkptr(uintptr(result)*
3419 352 +
3420 span.base())
3421 }
3422 }
3423 }
3424 v := nextFreeFastResult
3425 if v == 0 {
3426 v, span, checkGCTrigger = c.nextFree(spc)
3427 }
3428 x := unsafe.Pointer(v)
3429 if span.needzero != 0 {
3430 memclrNoHeapPointers(x, elemsize)
3431 }
3432 if goarch.PtrSize == 8 && sizeclass == 1 {
3433
3434 c.scanAlloc += 8
3435 } else {
3436 dataSize := size
3437 x := uintptr(x)
3438
3439 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(352)) {
3440 throw("tried to write heap bits, but no heap bits in span")
3441 }
3442
3443 src0 := readUintptr(getGCMask(typ))
3444
3445 const elemsize = 352
3446
3447 var scanSize uintptr
3448 src := src0
3449 if typ.Size_ == goarch.PtrSize {
3450 src = (1 << (dataSize / goarch.PtrSize)) - 1
3451
3452 scanSize = dataSize
3453 } else {
3454
3455 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
3456 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
3457 }
3458 scanSize = typ.PtrBytes
3459 for i := typ.Size_; i < dataSize; i += typ.Size_ {
3460 src |= src0 << (i / goarch.PtrSize)
3461 scanSize += typ.Size_
3462 }
3463 }
3464
3465 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
3466 dst := unsafe.Pointer(dstBase)
3467 o := (x - span.base()) / goarch.PtrSize
3468 i := o / ptrBits
3469 j := o % ptrBits
3470 const bits uintptr = elemsize / goarch.PtrSize
3471
3472 const bitsIsPowerOfTwo = bits&(bits-1) == 0
3473 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
3474
3475 bits0 := ptrBits - j
3476 bits1 := bits - bits0
3477 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
3478 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
3479 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
3480 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
3481 } else {
3482
3483 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
3484 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
3485 }
3486
3487 const doubleCheck = false
3488 if doubleCheck {
3489 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
3490 }
3491 if doubleCheckHeapSetType {
3492 doubleCheckHeapType(x, dataSize, typ, nil, span)
3493 }
3494 c.scanAlloc += scanSize
3495 }
3496
3497 publicationBarrier()
3498
3499 if writeBarrier.enabled {
3500
3501 gcmarknewobject(span, uintptr(x))
3502 } else {
3503
3504 span.freeIndexForScan = span.freeindex
3505 }
3506
3507 c.nextSample -= int64(elemsize)
3508 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
3509 profilealloc(mp, x, elemsize)
3510 }
3511 mp.mallocing = 0
3512 releasem(mp)
3513
3514 if checkGCTrigger {
3515 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
3516 gcStart(t)
3517 }
3518 }
3519 gp := getg()
3520 if goexperiment.RuntimeSecret && gp.secret > 0 {
3521
3522 addSecret(x, size)
3523 }
3524
3525 if valgrindenabled {
3526 valgrindMalloc(x, size)
3527 }
3528
3529 if gcBlackenEnabled != 0 && elemsize != 0 {
3530 if assistG := getg().m.curg; assistG != nil {
3531 assistG.gcAssistBytes -= int64(elemsize - size)
3532 }
3533 }
3534
3535 if debug.malloc {
3536 postMallocgcDebug(x, elemsize, typ)
3537 }
3538 return x
3539 }
3540
3541 func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
3542
3543 if doubleCheckMalloc {
3544 if gcphase == _GCmarktermination {
3545 throw("mallocgc called with gcphase == _GCmarktermination")
3546 }
3547 }
3548
3549 lockRankMayQueueFinalizer()
3550
3551 if debug.malloc {
3552 if x := preMallocgcDebug(size, typ); x != nil {
3553 return x
3554 }
3555 }
3556
3557 if gcBlackenEnabled != 0 {
3558 deductAssistCredit(size)
3559 }
3560
3561 const sizeclass = 22
3562
3563 const elemsize = 384
3564
3565 mp := acquirem()
3566 if doubleCheckMalloc {
3567 doubleCheckSmallScanNoHeader(size, typ, mp)
3568 }
3569 mp.mallocing = 1
3570
3571 checkGCTrigger := false
3572 c := getMCache(mp)
3573 const spc = spanClass(sizeclass<<1) | spanClass(0)
3574 span := c.alloc[spc]
3575
3576 var nextFreeFastResult gclinkptr
3577 if span.allocCache != 0 {
3578 theBit := sys.TrailingZeros64(span.allocCache)
3579 result := span.freeindex + uint16(theBit)
3580 if result < span.nelems {
3581 freeidx := result + 1
3582 if !(freeidx%64 == 0 && freeidx != span.nelems) {
3583 span.allocCache >>= uint(theBit + 1)
3584 span.freeindex = freeidx
3585 span.allocCount++
3586 nextFreeFastResult = gclinkptr(uintptr(result)*
3587 384 +
3588 span.base())
3589 }
3590 }
3591 }
3592 v := nextFreeFastResult
3593 if v == 0 {
3594 v, span, checkGCTrigger = c.nextFree(spc)
3595 }
3596 x := unsafe.Pointer(v)
3597 if span.needzero != 0 {
3598 memclrNoHeapPointers(x, elemsize)
3599 }
3600 if goarch.PtrSize == 8 && sizeclass == 1 {
3601
3602 c.scanAlloc += 8
3603 } else {
3604 dataSize := size
3605 x := uintptr(x)
3606
3607 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(384)) {
3608 throw("tried to write heap bits, but no heap bits in span")
3609 }
3610
3611 src0 := readUintptr(getGCMask(typ))
3612
3613 const elemsize = 384
3614
3615 var scanSize uintptr
3616 src := src0
3617 if typ.Size_ == goarch.PtrSize {
3618 src = (1 << (dataSize / goarch.PtrSize)) - 1
3619
3620 scanSize = dataSize
3621 } else {
3622
3623 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
3624 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
3625 }
3626 scanSize = typ.PtrBytes
3627 for i := typ.Size_; i < dataSize; i += typ.Size_ {
3628 src |= src0 << (i / goarch.PtrSize)
3629 scanSize += typ.Size_
3630 }
3631 }
3632
3633 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
3634 dst := unsafe.Pointer(dstBase)
3635 o := (x - span.base()) / goarch.PtrSize
3636 i := o / ptrBits
3637 j := o % ptrBits
3638 const bits uintptr = elemsize / goarch.PtrSize
3639
3640 const bitsIsPowerOfTwo = bits&(bits-1) == 0
3641 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
3642
3643 bits0 := ptrBits - j
3644 bits1 := bits - bits0
3645 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
3646 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
3647 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
3648 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
3649 } else {
3650
3651 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
3652 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
3653 }
3654
3655 const doubleCheck = false
3656 if doubleCheck {
3657 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
3658 }
3659 if doubleCheckHeapSetType {
3660 doubleCheckHeapType(x, dataSize, typ, nil, span)
3661 }
3662 c.scanAlloc += scanSize
3663 }
3664
3665 publicationBarrier()
3666
3667 if writeBarrier.enabled {
3668
3669 gcmarknewobject(span, uintptr(x))
3670 } else {
3671
3672 span.freeIndexForScan = span.freeindex
3673 }
3674
3675 c.nextSample -= int64(elemsize)
3676 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
3677 profilealloc(mp, x, elemsize)
3678 }
3679 mp.mallocing = 0
3680 releasem(mp)
3681
3682 if checkGCTrigger {
3683 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
3684 gcStart(t)
3685 }
3686 }
3687 gp := getg()
3688 if goexperiment.RuntimeSecret && gp.secret > 0 {
3689
3690 addSecret(x, size)
3691 }
3692
3693 if valgrindenabled {
3694 valgrindMalloc(x, size)
3695 }
3696
3697 if gcBlackenEnabled != 0 && elemsize != 0 {
3698 if assistG := getg().m.curg; assistG != nil {
3699 assistG.gcAssistBytes -= int64(elemsize - size)
3700 }
3701 }
3702
3703 if debug.malloc {
3704 postMallocgcDebug(x, elemsize, typ)
3705 }
3706 return x
3707 }
3708
3709 func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
3710
3711 if doubleCheckMalloc {
3712 if gcphase == _GCmarktermination {
3713 throw("mallocgc called with gcphase == _GCmarktermination")
3714 }
3715 }
3716
3717 lockRankMayQueueFinalizer()
3718
3719 if debug.malloc {
3720 if x := preMallocgcDebug(size, typ); x != nil {
3721 return x
3722 }
3723 }
3724
3725 if gcBlackenEnabled != 0 {
3726 deductAssistCredit(size)
3727 }
3728
3729 const sizeclass = 23
3730
3731 const elemsize = 416
3732
3733 mp := acquirem()
3734 if doubleCheckMalloc {
3735 doubleCheckSmallScanNoHeader(size, typ, mp)
3736 }
3737 mp.mallocing = 1
3738
3739 checkGCTrigger := false
3740 c := getMCache(mp)
3741 const spc = spanClass(sizeclass<<1) | spanClass(0)
3742 span := c.alloc[spc]
3743
3744 var nextFreeFastResult gclinkptr
3745 if span.allocCache != 0 {
3746 theBit := sys.TrailingZeros64(span.allocCache)
3747 result := span.freeindex + uint16(theBit)
3748 if result < span.nelems {
3749 freeidx := result + 1
3750 if !(freeidx%64 == 0 && freeidx != span.nelems) {
3751 span.allocCache >>= uint(theBit + 1)
3752 span.freeindex = freeidx
3753 span.allocCount++
3754 nextFreeFastResult = gclinkptr(uintptr(result)*
3755 416 +
3756 span.base())
3757 }
3758 }
3759 }
3760 v := nextFreeFastResult
3761 if v == 0 {
3762 v, span, checkGCTrigger = c.nextFree(spc)
3763 }
3764 x := unsafe.Pointer(v)
3765 if span.needzero != 0 {
3766 memclrNoHeapPointers(x, elemsize)
3767 }
3768 if goarch.PtrSize == 8 && sizeclass == 1 {
3769
3770 c.scanAlloc += 8
3771 } else {
3772 dataSize := size
3773 x := uintptr(x)
3774
3775 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(416)) {
3776 throw("tried to write heap bits, but no heap bits in span")
3777 }
3778
3779 src0 := readUintptr(getGCMask(typ))
3780
3781 const elemsize = 416
3782
3783 var scanSize uintptr
3784 src := src0
3785 if typ.Size_ == goarch.PtrSize {
3786 src = (1 << (dataSize / goarch.PtrSize)) - 1
3787
3788 scanSize = dataSize
3789 } else {
3790
3791 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
3792 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
3793 }
3794 scanSize = typ.PtrBytes
3795 for i := typ.Size_; i < dataSize; i += typ.Size_ {
3796 src |= src0 << (i / goarch.PtrSize)
3797 scanSize += typ.Size_
3798 }
3799 }
3800
3801 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
3802 dst := unsafe.Pointer(dstBase)
3803 o := (x - span.base()) / goarch.PtrSize
3804 i := o / ptrBits
3805 j := o % ptrBits
3806 const bits uintptr = elemsize / goarch.PtrSize
3807
3808 const bitsIsPowerOfTwo = bits&(bits-1) == 0
3809 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
3810
3811 bits0 := ptrBits - j
3812 bits1 := bits - bits0
3813 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
3814 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
3815 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
3816 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
3817 } else {
3818
3819 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
3820 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
3821 }
3822
3823 const doubleCheck = false
3824 if doubleCheck {
3825 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
3826 }
3827 if doubleCheckHeapSetType {
3828 doubleCheckHeapType(x, dataSize, typ, nil, span)
3829 }
3830 c.scanAlloc += scanSize
3831 }
3832
3833 publicationBarrier()
3834
3835 if writeBarrier.enabled {
3836
3837 gcmarknewobject(span, uintptr(x))
3838 } else {
3839
3840 span.freeIndexForScan = span.freeindex
3841 }
3842
3843 c.nextSample -= int64(elemsize)
3844 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
3845 profilealloc(mp, x, elemsize)
3846 }
3847 mp.mallocing = 0
3848 releasem(mp)
3849
3850 if checkGCTrigger {
3851 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
3852 gcStart(t)
3853 }
3854 }
3855 gp := getg()
3856 if goexperiment.RuntimeSecret && gp.secret > 0 {
3857
3858 addSecret(x, size)
3859 }
3860
3861 if valgrindenabled {
3862 valgrindMalloc(x, size)
3863 }
3864
3865 if gcBlackenEnabled != 0 && elemsize != 0 {
3866 if assistG := getg().m.curg; assistG != nil {
3867 assistG.gcAssistBytes -= int64(elemsize - size)
3868 }
3869 }
3870
3871 if debug.malloc {
3872 postMallocgcDebug(x, elemsize, typ)
3873 }
3874 return x
3875 }
3876
3877 func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
3878
3879 if doubleCheckMalloc {
3880 if gcphase == _GCmarktermination {
3881 throw("mallocgc called with gcphase == _GCmarktermination")
3882 }
3883 }
3884
3885 lockRankMayQueueFinalizer()
3886
3887 if debug.malloc {
3888 if x := preMallocgcDebug(size, typ); x != nil {
3889 return x
3890 }
3891 }
3892
3893 if gcBlackenEnabled != 0 {
3894 deductAssistCredit(size)
3895 }
3896
3897 const sizeclass = 24
3898
3899 const elemsize = 448
3900
3901 mp := acquirem()
3902 if doubleCheckMalloc {
3903 doubleCheckSmallScanNoHeader(size, typ, mp)
3904 }
3905 mp.mallocing = 1
3906
3907 checkGCTrigger := false
3908 c := getMCache(mp)
3909 const spc = spanClass(sizeclass<<1) | spanClass(0)
3910 span := c.alloc[spc]
3911
3912 var nextFreeFastResult gclinkptr
3913 if span.allocCache != 0 {
3914 theBit := sys.TrailingZeros64(span.allocCache)
3915 result := span.freeindex + uint16(theBit)
3916 if result < span.nelems {
3917 freeidx := result + 1
3918 if !(freeidx%64 == 0 && freeidx != span.nelems) {
3919 span.allocCache >>= uint(theBit + 1)
3920 span.freeindex = freeidx
3921 span.allocCount++
3922 nextFreeFastResult = gclinkptr(uintptr(result)*
3923 448 +
3924 span.base())
3925 }
3926 }
3927 }
3928 v := nextFreeFastResult
3929 if v == 0 {
3930 v, span, checkGCTrigger = c.nextFree(spc)
3931 }
3932 x := unsafe.Pointer(v)
3933 if span.needzero != 0 {
3934 memclrNoHeapPointers(x, elemsize)
3935 }
3936 if goarch.PtrSize == 8 && sizeclass == 1 {
3937
3938 c.scanAlloc += 8
3939 } else {
3940 dataSize := size
3941 x := uintptr(x)
3942
3943 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(448)) {
3944 throw("tried to write heap bits, but no heap bits in span")
3945 }
3946
3947 src0 := readUintptr(getGCMask(typ))
3948
3949 const elemsize = 448
3950
3951 var scanSize uintptr
3952 src := src0
3953 if typ.Size_ == goarch.PtrSize {
3954 src = (1 << (dataSize / goarch.PtrSize)) - 1
3955
3956 scanSize = dataSize
3957 } else {
3958
3959 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
3960 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
3961 }
3962 scanSize = typ.PtrBytes
3963 for i := typ.Size_; i < dataSize; i += typ.Size_ {
3964 src |= src0 << (i / goarch.PtrSize)
3965 scanSize += typ.Size_
3966 }
3967 }
3968
3969 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
3970 dst := unsafe.Pointer(dstBase)
3971 o := (x - span.base()) / goarch.PtrSize
3972 i := o / ptrBits
3973 j := o % ptrBits
3974 const bits uintptr = elemsize / goarch.PtrSize
3975
3976 const bitsIsPowerOfTwo = bits&(bits-1) == 0
3977 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
3978
3979 bits0 := ptrBits - j
3980 bits1 := bits - bits0
3981 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
3982 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
3983 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
3984 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
3985 } else {
3986
3987 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
3988 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
3989 }
3990
3991 const doubleCheck = false
3992 if doubleCheck {
3993 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
3994 }
3995 if doubleCheckHeapSetType {
3996 doubleCheckHeapType(x, dataSize, typ, nil, span)
3997 }
3998 c.scanAlloc += scanSize
3999 }
4000
4001 publicationBarrier()
4002
4003 if writeBarrier.enabled {
4004
4005 gcmarknewobject(span, uintptr(x))
4006 } else {
4007
4008 span.freeIndexForScan = span.freeindex
4009 }
4010
4011 c.nextSample -= int64(elemsize)
4012 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
4013 profilealloc(mp, x, elemsize)
4014 }
4015 mp.mallocing = 0
4016 releasem(mp)
4017
4018 if checkGCTrigger {
4019 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
4020 gcStart(t)
4021 }
4022 }
4023 gp := getg()
4024 if goexperiment.RuntimeSecret && gp.secret > 0 {
4025
4026 addSecret(x, size)
4027 }
4028
4029 if valgrindenabled {
4030 valgrindMalloc(x, size)
4031 }
4032
4033 if gcBlackenEnabled != 0 && elemsize != 0 {
4034 if assistG := getg().m.curg; assistG != nil {
4035 assistG.gcAssistBytes -= int64(elemsize - size)
4036 }
4037 }
4038
4039 if debug.malloc {
4040 postMallocgcDebug(x, elemsize, typ)
4041 }
4042 return x
4043 }
4044
4045 func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
4046
4047 if doubleCheckMalloc {
4048 if gcphase == _GCmarktermination {
4049 throw("mallocgc called with gcphase == _GCmarktermination")
4050 }
4051 }
4052
4053 lockRankMayQueueFinalizer()
4054
4055 if debug.malloc {
4056 if x := preMallocgcDebug(size, typ); x != nil {
4057 return x
4058 }
4059 }
4060
4061 if gcBlackenEnabled != 0 {
4062 deductAssistCredit(size)
4063 }
4064
4065 const sizeclass = 25
4066
4067 const elemsize = 480
4068
4069 mp := acquirem()
4070 if doubleCheckMalloc {
4071 doubleCheckSmallScanNoHeader(size, typ, mp)
4072 }
4073 mp.mallocing = 1
4074
4075 checkGCTrigger := false
4076 c := getMCache(mp)
4077 const spc = spanClass(sizeclass<<1) | spanClass(0)
4078 span := c.alloc[spc]
4079
4080 var nextFreeFastResult gclinkptr
4081 if span.allocCache != 0 {
4082 theBit := sys.TrailingZeros64(span.allocCache)
4083 result := span.freeindex + uint16(theBit)
4084 if result < span.nelems {
4085 freeidx := result + 1
4086 if !(freeidx%64 == 0 && freeidx != span.nelems) {
4087 span.allocCache >>= uint(theBit + 1)
4088 span.freeindex = freeidx
4089 span.allocCount++
4090 nextFreeFastResult = gclinkptr(uintptr(result)*
4091 480 +
4092 span.base())
4093 }
4094 }
4095 }
4096 v := nextFreeFastResult
4097 if v == 0 {
4098 v, span, checkGCTrigger = c.nextFree(spc)
4099 }
4100 x := unsafe.Pointer(v)
4101 if span.needzero != 0 {
4102 memclrNoHeapPointers(x, elemsize)
4103 }
4104 if goarch.PtrSize == 8 && sizeclass == 1 {
4105
4106 c.scanAlloc += 8
4107 } else {
4108 dataSize := size
4109 x := uintptr(x)
4110
4111 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(480)) {
4112 throw("tried to write heap bits, but no heap bits in span")
4113 }
4114
4115 src0 := readUintptr(getGCMask(typ))
4116
4117 const elemsize = 480
4118
4119 var scanSize uintptr
4120 src := src0
4121 if typ.Size_ == goarch.PtrSize {
4122 src = (1 << (dataSize / goarch.PtrSize)) - 1
4123
4124 scanSize = dataSize
4125 } else {
4126
4127 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
4128 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
4129 }
4130 scanSize = typ.PtrBytes
4131 for i := typ.Size_; i < dataSize; i += typ.Size_ {
4132 src |= src0 << (i / goarch.PtrSize)
4133 scanSize += typ.Size_
4134 }
4135 }
4136
4137 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
4138 dst := unsafe.Pointer(dstBase)
4139 o := (x - span.base()) / goarch.PtrSize
4140 i := o / ptrBits
4141 j := o % ptrBits
4142 const bits uintptr = elemsize / goarch.PtrSize
4143
4144 const bitsIsPowerOfTwo = bits&(bits-1) == 0
4145 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
4146
4147 bits0 := ptrBits - j
4148 bits1 := bits - bits0
4149 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
4150 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
4151 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
4152 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
4153 } else {
4154
4155 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
4156 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
4157 }
4158
4159 const doubleCheck = false
4160 if doubleCheck {
4161 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
4162 }
4163 if doubleCheckHeapSetType {
4164 doubleCheckHeapType(x, dataSize, typ, nil, span)
4165 }
4166 c.scanAlloc += scanSize
4167 }
4168
4169 publicationBarrier()
4170
4171 if writeBarrier.enabled {
4172
4173 gcmarknewobject(span, uintptr(x))
4174 } else {
4175
4176 span.freeIndexForScan = span.freeindex
4177 }
4178
4179 c.nextSample -= int64(elemsize)
4180 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
4181 profilealloc(mp, x, elemsize)
4182 }
4183 mp.mallocing = 0
4184 releasem(mp)
4185
4186 if checkGCTrigger {
4187 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
4188 gcStart(t)
4189 }
4190 }
4191 gp := getg()
4192 if goexperiment.RuntimeSecret && gp.secret > 0 {
4193
4194 addSecret(x, size)
4195 }
4196
4197 if valgrindenabled {
4198 valgrindMalloc(x, size)
4199 }
4200
4201 if gcBlackenEnabled != 0 && elemsize != 0 {
4202 if assistG := getg().m.curg; assistG != nil {
4203 assistG.gcAssistBytes -= int64(elemsize - size)
4204 }
4205 }
4206
4207 if debug.malloc {
4208 postMallocgcDebug(x, elemsize, typ)
4209 }
4210 return x
4211 }
4212
4213 func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
4214
4215 if doubleCheckMalloc {
4216 if gcphase == _GCmarktermination {
4217 throw("mallocgc called with gcphase == _GCmarktermination")
4218 }
4219 }
4220
4221 lockRankMayQueueFinalizer()
4222
4223 if debug.malloc {
4224 if x := preMallocgcDebug(size, typ); x != nil {
4225 return x
4226 }
4227 }
4228
4229 if gcBlackenEnabled != 0 {
4230 deductAssistCredit(size)
4231 }
4232
4233 const sizeclass = 26
4234
4235 const elemsize = 512
4236
4237 mp := acquirem()
4238 if doubleCheckMalloc {
4239 doubleCheckSmallScanNoHeader(size, typ, mp)
4240 }
4241 mp.mallocing = 1
4242
4243 checkGCTrigger := false
4244 c := getMCache(mp)
4245 const spc = spanClass(sizeclass<<1) | spanClass(0)
4246 span := c.alloc[spc]
4247
4248 var nextFreeFastResult gclinkptr
4249 if span.allocCache != 0 {
4250 theBit := sys.TrailingZeros64(span.allocCache)
4251 result := span.freeindex + uint16(theBit)
4252 if result < span.nelems {
4253 freeidx := result + 1
4254 if !(freeidx%64 == 0 && freeidx != span.nelems) {
4255 span.allocCache >>= uint(theBit + 1)
4256 span.freeindex = freeidx
4257 span.allocCount++
4258 nextFreeFastResult = gclinkptr(uintptr(result)*
4259 512 +
4260 span.base())
4261 }
4262 }
4263 }
4264 v := nextFreeFastResult
4265 if v == 0 {
4266 v, span, checkGCTrigger = c.nextFree(spc)
4267 }
4268 x := unsafe.Pointer(v)
4269 if span.needzero != 0 {
4270 memclrNoHeapPointers(x, elemsize)
4271 }
4272 if goarch.PtrSize == 8 && sizeclass == 1 {
4273
4274 c.scanAlloc += 8
4275 } else {
4276 dataSize := size
4277 x := uintptr(x)
4278
4279 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(512)) {
4280 throw("tried to write heap bits, but no heap bits in span")
4281 }
4282
4283 src0 := readUintptr(getGCMask(typ))
4284
4285 const elemsize = 512
4286
4287 var scanSize uintptr
4288 src := src0
4289 if typ.Size_ == goarch.PtrSize {
4290 src = (1 << (dataSize / goarch.PtrSize)) - 1
4291
4292 scanSize = dataSize
4293 } else {
4294
4295 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
4296 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
4297 }
4298 scanSize = typ.PtrBytes
4299 for i := typ.Size_; i < dataSize; i += typ.Size_ {
4300 src |= src0 << (i / goarch.PtrSize)
4301 scanSize += typ.Size_
4302 }
4303 }
4304
4305 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
4306 dst := unsafe.Pointer(dstBase)
4307 o := (x - span.base()) / goarch.PtrSize
4308 i := o / ptrBits
4309 j := o % ptrBits
4310 const bits uintptr = elemsize / goarch.PtrSize
4311
4312 const bitsIsPowerOfTwo = bits&(bits-1) == 0
4313 if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
4314
4315 bits0 := ptrBits - j
4316 bits1 := bits - bits0
4317 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
4318 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
4319 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
4320 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
4321 } else {
4322
4323 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
4324 *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
4325 }
4326
4327 const doubleCheck = false
4328 if doubleCheck {
4329 writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
4330 }
4331 if doubleCheckHeapSetType {
4332 doubleCheckHeapType(x, dataSize, typ, nil, span)
4333 }
4334 c.scanAlloc += scanSize
4335 }
4336
4337 publicationBarrier()
4338
4339 if writeBarrier.enabled {
4340
4341 gcmarknewobject(span, uintptr(x))
4342 } else {
4343
4344 span.freeIndexForScan = span.freeindex
4345 }
4346
4347 c.nextSample -= int64(elemsize)
4348 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
4349 profilealloc(mp, x, elemsize)
4350 }
4351 mp.mallocing = 0
4352 releasem(mp)
4353
4354 if checkGCTrigger {
4355 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
4356 gcStart(t)
4357 }
4358 }
4359 gp := getg()
4360 if goexperiment.RuntimeSecret && gp.secret > 0 {
4361
4362 addSecret(x, size)
4363 }
4364
4365 if valgrindenabled {
4366 valgrindMalloc(x, size)
4367 }
4368
4369 if gcBlackenEnabled != 0 && elemsize != 0 {
4370 if assistG := getg().m.curg; assistG != nil {
4371 assistG.gcAssistBytes -= int64(elemsize - size)
4372 }
4373 }
4374
4375 if debug.malloc {
4376 postMallocgcDebug(x, elemsize, typ)
4377 }
4378 return x
4379 }
4380
4381 func mallocgcTinySize1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
4382
4383 gp := getg()
4384 if goexperiment.RuntimeSecret && gp.secret > 0 {
4385 return mallocgcSmallNoScanSC2(size, typ, needzero)
4386 }
4387
4388 if doubleCheckMalloc {
4389 if gcphase == _GCmarktermination {
4390 throw("mallocgc called with gcphase == _GCmarktermination")
4391 }
4392 }
4393
4394 lockRankMayQueueFinalizer()
4395
4396 if debug.malloc {
4397 if x := preMallocgcDebug(size, typ); x != nil {
4398 return x
4399 }
4400 }
4401
4402 if gcBlackenEnabled != 0 {
4403 deductAssistCredit(size)
4404 }
4405
4406 const constsize = 1
4407
4408 const elemsize = 16
4409
4410 mp := acquirem()
4411 if doubleCheckMalloc {
4412 doubleCheckTiny(constsize, typ, mp)
4413 }
4414 mp.mallocing = 1
4415
4416 c := getMCache(mp)
4417 off := c.tinyoffset
4418
4419 if constsize&7 == 0 {
4420 off = alignUp(off, 8)
4421 } else if goarch.PtrSize == 4 && constsize == 12 {
4422
4423 off = alignUp(off, 8)
4424 } else if constsize&3 == 0 {
4425 off = alignUp(off, 4)
4426 } else if constsize&1 == 0 {
4427 off = alignUp(off, 2)
4428 }
4429 if off+constsize <= maxTinySize && c.tiny != 0 {
4430
4431 x := unsafe.Pointer(c.tiny + off)
4432 c.tinyoffset = off + constsize
4433 c.tinyAllocs++
4434 mp.mallocing = 0
4435 releasem(mp)
4436 const elemsize = 0
4437 {
4438
4439 if valgrindenabled {
4440 valgrindMalloc(x, size)
4441 }
4442
4443 if gcBlackenEnabled != 0 && elemsize != 0 {
4444 if assistG := getg().m.curg; assistG != nil {
4445 assistG.gcAssistBytes -= int64(elemsize - size)
4446 }
4447 }
4448
4449 if debug.malloc {
4450 postMallocgcDebug(x, elemsize, typ)
4451 }
4452 return x
4453 }
4454
4455 }
4456
4457 checkGCTrigger := false
4458 span := c.alloc[tinySpanClass]
4459
4460 const nbytes = 8192
4461 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
4462 16,
4463 )
4464 var nextFreeFastResult gclinkptr
4465 if span.allocCache != 0 {
4466 theBit := sys.TrailingZeros64(span.allocCache)
4467 result := span.freeindex + uint16(theBit)
4468 if result < nelems {
4469 freeidx := result + 1
4470 if !(freeidx%64 == 0 && freeidx != nelems) {
4471 span.allocCache >>= uint(theBit + 1)
4472 span.freeindex = freeidx
4473 span.allocCount++
4474 nextFreeFastResult = gclinkptr(uintptr(result)*
4475 16 +
4476 span.base())
4477 }
4478 }
4479 }
4480 v := nextFreeFastResult
4481 if v == 0 {
4482 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
4483 }
4484 x := unsafe.Pointer(v)
4485 (*[2]uint64)(x)[0] = 0
4486 (*[2]uint64)(x)[1] = 0
4487
4488 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
4489
4490 c.tiny = uintptr(x)
4491 c.tinyoffset = constsize
4492 }
4493
4494 publicationBarrier()
4495
4496 if writeBarrier.enabled {
4497
4498 gcmarknewobject(span, uintptr(x))
4499 } else {
4500
4501 span.freeIndexForScan = span.freeindex
4502 }
4503
4504 c.nextSample -= int64(elemsize)
4505 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
4506 profilealloc(mp, x, elemsize)
4507 }
4508 mp.mallocing = 0
4509 releasem(mp)
4510
4511 if checkGCTrigger {
4512 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
4513 gcStart(t)
4514 }
4515 }
4516
4517 if raceenabled {
4518
4519 x = add(x, elemsize-constsize)
4520 }
4521 if valgrindenabled {
4522 valgrindMalloc(x, size)
4523 }
4524
4525 if gcBlackenEnabled != 0 && elemsize != 0 {
4526 if assistG := getg().m.curg; assistG != nil {
4527 assistG.gcAssistBytes -= int64(elemsize - size)
4528 }
4529 }
4530
4531 if debug.malloc {
4532 postMallocgcDebug(x, elemsize, typ)
4533 }
4534 return x
4535 }
4536
4537 func mallocgcTinySize2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
4538
4539 gp := getg()
4540 if goexperiment.RuntimeSecret && gp.secret > 0 {
4541 return mallocgcSmallNoScanSC2(size, typ, needzero)
4542 }
4543
4544 if doubleCheckMalloc {
4545 if gcphase == _GCmarktermination {
4546 throw("mallocgc called with gcphase == _GCmarktermination")
4547 }
4548 }
4549
4550 lockRankMayQueueFinalizer()
4551
4552 if debug.malloc {
4553 if x := preMallocgcDebug(size, typ); x != nil {
4554 return x
4555 }
4556 }
4557
4558 if gcBlackenEnabled != 0 {
4559 deductAssistCredit(size)
4560 }
4561
4562 const constsize = 2
4563
4564 const elemsize = 16
4565
4566 mp := acquirem()
4567 if doubleCheckMalloc {
4568 doubleCheckTiny(constsize, typ, mp)
4569 }
4570 mp.mallocing = 1
4571
4572 c := getMCache(mp)
4573 off := c.tinyoffset
4574
4575 if constsize&7 == 0 {
4576 off = alignUp(off, 8)
4577 } else if goarch.PtrSize == 4 && constsize == 12 {
4578
4579 off = alignUp(off, 8)
4580 } else if constsize&3 == 0 {
4581 off = alignUp(off, 4)
4582 } else if constsize&1 == 0 {
4583 off = alignUp(off, 2)
4584 }
4585 if off+constsize <= maxTinySize && c.tiny != 0 {
4586
4587 x := unsafe.Pointer(c.tiny + off)
4588 c.tinyoffset = off + constsize
4589 c.tinyAllocs++
4590 mp.mallocing = 0
4591 releasem(mp)
4592 const elemsize = 0
4593 {
4594
4595 if valgrindenabled {
4596 valgrindMalloc(x, size)
4597 }
4598
4599 if gcBlackenEnabled != 0 && elemsize != 0 {
4600 if assistG := getg().m.curg; assistG != nil {
4601 assistG.gcAssistBytes -= int64(elemsize - size)
4602 }
4603 }
4604
4605 if debug.malloc {
4606 postMallocgcDebug(x, elemsize, typ)
4607 }
4608 return x
4609 }
4610
4611 }
4612
4613 checkGCTrigger := false
4614 span := c.alloc[tinySpanClass]
4615
4616 const nbytes = 8192
4617 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
4618 16,
4619 )
4620 var nextFreeFastResult gclinkptr
4621 if span.allocCache != 0 {
4622 theBit := sys.TrailingZeros64(span.allocCache)
4623 result := span.freeindex + uint16(theBit)
4624 if result < nelems {
4625 freeidx := result + 1
4626 if !(freeidx%64 == 0 && freeidx != nelems) {
4627 span.allocCache >>= uint(theBit + 1)
4628 span.freeindex = freeidx
4629 span.allocCount++
4630 nextFreeFastResult = gclinkptr(uintptr(result)*
4631 16 +
4632 span.base())
4633 }
4634 }
4635 }
4636 v := nextFreeFastResult
4637 if v == 0 {
4638 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
4639 }
4640 x := unsafe.Pointer(v)
4641 (*[2]uint64)(x)[0] = 0
4642 (*[2]uint64)(x)[1] = 0
4643
4644 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
4645
4646 c.tiny = uintptr(x)
4647 c.tinyoffset = constsize
4648 }
4649
4650 publicationBarrier()
4651
4652 if writeBarrier.enabled {
4653
4654 gcmarknewobject(span, uintptr(x))
4655 } else {
4656
4657 span.freeIndexForScan = span.freeindex
4658 }
4659
4660 c.nextSample -= int64(elemsize)
4661 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
4662 profilealloc(mp, x, elemsize)
4663 }
4664 mp.mallocing = 0
4665 releasem(mp)
4666
4667 if checkGCTrigger {
4668 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
4669 gcStart(t)
4670 }
4671 }
4672
4673 if raceenabled {
4674
4675 x = add(x, elemsize-constsize)
4676 }
4677 if valgrindenabled {
4678 valgrindMalloc(x, size)
4679 }
4680
4681 if gcBlackenEnabled != 0 && elemsize != 0 {
4682 if assistG := getg().m.curg; assistG != nil {
4683 assistG.gcAssistBytes -= int64(elemsize - size)
4684 }
4685 }
4686
4687 if debug.malloc {
4688 postMallocgcDebug(x, elemsize, typ)
4689 }
4690 return x
4691 }
4692
4693 func mallocgcTinySize3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
4694
4695 gp := getg()
4696 if goexperiment.RuntimeSecret && gp.secret > 0 {
4697 return mallocgcSmallNoScanSC2(size, typ, needzero)
4698 }
4699
4700 if doubleCheckMalloc {
4701 if gcphase == _GCmarktermination {
4702 throw("mallocgc called with gcphase == _GCmarktermination")
4703 }
4704 }
4705
4706 lockRankMayQueueFinalizer()
4707
4708 if debug.malloc {
4709 if x := preMallocgcDebug(size, typ); x != nil {
4710 return x
4711 }
4712 }
4713
4714 if gcBlackenEnabled != 0 {
4715 deductAssistCredit(size)
4716 }
4717
4718 const constsize = 3
4719
4720 const elemsize = 16
4721
4722 mp := acquirem()
4723 if doubleCheckMalloc {
4724 doubleCheckTiny(constsize, typ, mp)
4725 }
4726 mp.mallocing = 1
4727
4728 c := getMCache(mp)
4729 off := c.tinyoffset
4730
4731 if constsize&7 == 0 {
4732 off = alignUp(off, 8)
4733 } else if goarch.PtrSize == 4 && constsize == 12 {
4734
4735 off = alignUp(off, 8)
4736 } else if constsize&3 == 0 {
4737 off = alignUp(off, 4)
4738 } else if constsize&1 == 0 {
4739 off = alignUp(off, 2)
4740 }
4741 if off+constsize <= maxTinySize && c.tiny != 0 {
4742
4743 x := unsafe.Pointer(c.tiny + off)
4744 c.tinyoffset = off + constsize
4745 c.tinyAllocs++
4746 mp.mallocing = 0
4747 releasem(mp)
4748 const elemsize = 0
4749 {
4750
4751 if valgrindenabled {
4752 valgrindMalloc(x, size)
4753 }
4754
4755 if gcBlackenEnabled != 0 && elemsize != 0 {
4756 if assistG := getg().m.curg; assistG != nil {
4757 assistG.gcAssistBytes -= int64(elemsize - size)
4758 }
4759 }
4760
4761 if debug.malloc {
4762 postMallocgcDebug(x, elemsize, typ)
4763 }
4764 return x
4765 }
4766
4767 }
4768
4769 checkGCTrigger := false
4770 span := c.alloc[tinySpanClass]
4771
4772 const nbytes = 8192
4773 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
4774 16,
4775 )
4776 var nextFreeFastResult gclinkptr
4777 if span.allocCache != 0 {
4778 theBit := sys.TrailingZeros64(span.allocCache)
4779 result := span.freeindex + uint16(theBit)
4780 if result < nelems {
4781 freeidx := result + 1
4782 if !(freeidx%64 == 0 && freeidx != nelems) {
4783 span.allocCache >>= uint(theBit + 1)
4784 span.freeindex = freeidx
4785 span.allocCount++
4786 nextFreeFastResult = gclinkptr(uintptr(result)*
4787 16 +
4788 span.base())
4789 }
4790 }
4791 }
4792 v := nextFreeFastResult
4793 if v == 0 {
4794 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
4795 }
4796 x := unsafe.Pointer(v)
4797 (*[2]uint64)(x)[0] = 0
4798 (*[2]uint64)(x)[1] = 0
4799
4800 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
4801
4802 c.tiny = uintptr(x)
4803 c.tinyoffset = constsize
4804 }
4805
4806 publicationBarrier()
4807
4808 if writeBarrier.enabled {
4809
4810 gcmarknewobject(span, uintptr(x))
4811 } else {
4812
4813 span.freeIndexForScan = span.freeindex
4814 }
4815
4816 c.nextSample -= int64(elemsize)
4817 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
4818 profilealloc(mp, x, elemsize)
4819 }
4820 mp.mallocing = 0
4821 releasem(mp)
4822
4823 if checkGCTrigger {
4824 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
4825 gcStart(t)
4826 }
4827 }
4828
4829 if raceenabled {
4830
4831 x = add(x, elemsize-constsize)
4832 }
4833 if valgrindenabled {
4834 valgrindMalloc(x, size)
4835 }
4836
4837 if gcBlackenEnabled != 0 && elemsize != 0 {
4838 if assistG := getg().m.curg; assistG != nil {
4839 assistG.gcAssistBytes -= int64(elemsize - size)
4840 }
4841 }
4842
4843 if debug.malloc {
4844 postMallocgcDebug(x, elemsize, typ)
4845 }
4846 return x
4847 }
4848
4849 func mallocgcTinySize4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
4850
4851 gp := getg()
4852 if goexperiment.RuntimeSecret && gp.secret > 0 {
4853 return mallocgcSmallNoScanSC2(size, typ, needzero)
4854 }
4855
4856 if doubleCheckMalloc {
4857 if gcphase == _GCmarktermination {
4858 throw("mallocgc called with gcphase == _GCmarktermination")
4859 }
4860 }
4861
4862 lockRankMayQueueFinalizer()
4863
4864 if debug.malloc {
4865 if x := preMallocgcDebug(size, typ); x != nil {
4866 return x
4867 }
4868 }
4869
4870 if gcBlackenEnabled != 0 {
4871 deductAssistCredit(size)
4872 }
4873
4874 const constsize = 4
4875
4876 const elemsize = 16
4877
4878 mp := acquirem()
4879 if doubleCheckMalloc {
4880 doubleCheckTiny(constsize, typ, mp)
4881 }
4882 mp.mallocing = 1
4883
4884 c := getMCache(mp)
4885 off := c.tinyoffset
4886
4887 if constsize&7 == 0 {
4888 off = alignUp(off, 8)
4889 } else if goarch.PtrSize == 4 && constsize == 12 {
4890
4891 off = alignUp(off, 8)
4892 } else if constsize&3 == 0 {
4893 off = alignUp(off, 4)
4894 } else if constsize&1 == 0 {
4895 off = alignUp(off, 2)
4896 }
4897 if off+constsize <= maxTinySize && c.tiny != 0 {
4898
4899 x := unsafe.Pointer(c.tiny + off)
4900 c.tinyoffset = off + constsize
4901 c.tinyAllocs++
4902 mp.mallocing = 0
4903 releasem(mp)
4904 const elemsize = 0
4905 {
4906
4907 if valgrindenabled {
4908 valgrindMalloc(x, size)
4909 }
4910
4911 if gcBlackenEnabled != 0 && elemsize != 0 {
4912 if assistG := getg().m.curg; assistG != nil {
4913 assistG.gcAssistBytes -= int64(elemsize - size)
4914 }
4915 }
4916
4917 if debug.malloc {
4918 postMallocgcDebug(x, elemsize, typ)
4919 }
4920 return x
4921 }
4922
4923 }
4924
4925 checkGCTrigger := false
4926 span := c.alloc[tinySpanClass]
4927
4928 const nbytes = 8192
4929 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
4930 16,
4931 )
4932 var nextFreeFastResult gclinkptr
4933 if span.allocCache != 0 {
4934 theBit := sys.TrailingZeros64(span.allocCache)
4935 result := span.freeindex + uint16(theBit)
4936 if result < nelems {
4937 freeidx := result + 1
4938 if !(freeidx%64 == 0 && freeidx != nelems) {
4939 span.allocCache >>= uint(theBit + 1)
4940 span.freeindex = freeidx
4941 span.allocCount++
4942 nextFreeFastResult = gclinkptr(uintptr(result)*
4943 16 +
4944 span.base())
4945 }
4946 }
4947 }
4948 v := nextFreeFastResult
4949 if v == 0 {
4950 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
4951 }
4952 x := unsafe.Pointer(v)
4953 (*[2]uint64)(x)[0] = 0
4954 (*[2]uint64)(x)[1] = 0
4955
4956 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
4957
4958 c.tiny = uintptr(x)
4959 c.tinyoffset = constsize
4960 }
4961
4962 publicationBarrier()
4963
4964 if writeBarrier.enabled {
4965
4966 gcmarknewobject(span, uintptr(x))
4967 } else {
4968
4969 span.freeIndexForScan = span.freeindex
4970 }
4971
4972 c.nextSample -= int64(elemsize)
4973 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
4974 profilealloc(mp, x, elemsize)
4975 }
4976 mp.mallocing = 0
4977 releasem(mp)
4978
4979 if checkGCTrigger {
4980 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
4981 gcStart(t)
4982 }
4983 }
4984
4985 if raceenabled {
4986
4987 x = add(x, elemsize-constsize)
4988 }
4989 if valgrindenabled {
4990 valgrindMalloc(x, size)
4991 }
4992
4993 if gcBlackenEnabled != 0 && elemsize != 0 {
4994 if assistG := getg().m.curg; assistG != nil {
4995 assistG.gcAssistBytes -= int64(elemsize - size)
4996 }
4997 }
4998
4999 if debug.malloc {
5000 postMallocgcDebug(x, elemsize, typ)
5001 }
5002 return x
5003 }
5004
5005 func mallocgcTinySize5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
5006
5007 gp := getg()
5008 if goexperiment.RuntimeSecret && gp.secret > 0 {
5009 return mallocgcSmallNoScanSC2(size, typ, needzero)
5010 }
5011
5012 if doubleCheckMalloc {
5013 if gcphase == _GCmarktermination {
5014 throw("mallocgc called with gcphase == _GCmarktermination")
5015 }
5016 }
5017
5018 lockRankMayQueueFinalizer()
5019
5020 if debug.malloc {
5021 if x := preMallocgcDebug(size, typ); x != nil {
5022 return x
5023 }
5024 }
5025
5026 if gcBlackenEnabled != 0 {
5027 deductAssistCredit(size)
5028 }
5029
5030 const constsize = 5
5031
5032 const elemsize = 16
5033
5034 mp := acquirem()
5035 if doubleCheckMalloc {
5036 doubleCheckTiny(constsize, typ, mp)
5037 }
5038 mp.mallocing = 1
5039
5040 c := getMCache(mp)
5041 off := c.tinyoffset
5042
5043 if constsize&7 == 0 {
5044 off = alignUp(off, 8)
5045 } else if goarch.PtrSize == 4 && constsize == 12 {
5046
5047 off = alignUp(off, 8)
5048 } else if constsize&3 == 0 {
5049 off = alignUp(off, 4)
5050 } else if constsize&1 == 0 {
5051 off = alignUp(off, 2)
5052 }
5053 if off+constsize <= maxTinySize && c.tiny != 0 {
5054
5055 x := unsafe.Pointer(c.tiny + off)
5056 c.tinyoffset = off + constsize
5057 c.tinyAllocs++
5058 mp.mallocing = 0
5059 releasem(mp)
5060 const elemsize = 0
5061 {
5062
5063 if valgrindenabled {
5064 valgrindMalloc(x, size)
5065 }
5066
5067 if gcBlackenEnabled != 0 && elemsize != 0 {
5068 if assistG := getg().m.curg; assistG != nil {
5069 assistG.gcAssistBytes -= int64(elemsize - size)
5070 }
5071 }
5072
5073 if debug.malloc {
5074 postMallocgcDebug(x, elemsize, typ)
5075 }
5076 return x
5077 }
5078
5079 }
5080
5081 checkGCTrigger := false
5082 span := c.alloc[tinySpanClass]
5083
5084 const nbytes = 8192
5085 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
5086 16,
5087 )
5088 var nextFreeFastResult gclinkptr
5089 if span.allocCache != 0 {
5090 theBit := sys.TrailingZeros64(span.allocCache)
5091 result := span.freeindex + uint16(theBit)
5092 if result < nelems {
5093 freeidx := result + 1
5094 if !(freeidx%64 == 0 && freeidx != nelems) {
5095 span.allocCache >>= uint(theBit + 1)
5096 span.freeindex = freeidx
5097 span.allocCount++
5098 nextFreeFastResult = gclinkptr(uintptr(result)*
5099 16 +
5100 span.base())
5101 }
5102 }
5103 }
5104 v := nextFreeFastResult
5105 if v == 0 {
5106 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
5107 }
5108 x := unsafe.Pointer(v)
5109 (*[2]uint64)(x)[0] = 0
5110 (*[2]uint64)(x)[1] = 0
5111
5112 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
5113
5114 c.tiny = uintptr(x)
5115 c.tinyoffset = constsize
5116 }
5117
5118 publicationBarrier()
5119
5120 if writeBarrier.enabled {
5121
5122 gcmarknewobject(span, uintptr(x))
5123 } else {
5124
5125 span.freeIndexForScan = span.freeindex
5126 }
5127
5128 c.nextSample -= int64(elemsize)
5129 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
5130 profilealloc(mp, x, elemsize)
5131 }
5132 mp.mallocing = 0
5133 releasem(mp)
5134
5135 if checkGCTrigger {
5136 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
5137 gcStart(t)
5138 }
5139 }
5140
5141 if raceenabled {
5142
5143 x = add(x, elemsize-constsize)
5144 }
5145 if valgrindenabled {
5146 valgrindMalloc(x, size)
5147 }
5148
5149 if gcBlackenEnabled != 0 && elemsize != 0 {
5150 if assistG := getg().m.curg; assistG != nil {
5151 assistG.gcAssistBytes -= int64(elemsize - size)
5152 }
5153 }
5154
5155 if debug.malloc {
5156 postMallocgcDebug(x, elemsize, typ)
5157 }
5158 return x
5159 }
5160
5161 func mallocgcTinySize6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
5162
5163 gp := getg()
5164 if goexperiment.RuntimeSecret && gp.secret > 0 {
5165 return mallocgcSmallNoScanSC2(size, typ, needzero)
5166 }
5167
5168 if doubleCheckMalloc {
5169 if gcphase == _GCmarktermination {
5170 throw("mallocgc called with gcphase == _GCmarktermination")
5171 }
5172 }
5173
5174 lockRankMayQueueFinalizer()
5175
5176 if debug.malloc {
5177 if x := preMallocgcDebug(size, typ); x != nil {
5178 return x
5179 }
5180 }
5181
5182 if gcBlackenEnabled != 0 {
5183 deductAssistCredit(size)
5184 }
5185
5186 const constsize = 6
5187
5188 const elemsize = 16
5189
5190 mp := acquirem()
5191 if doubleCheckMalloc {
5192 doubleCheckTiny(constsize, typ, mp)
5193 }
5194 mp.mallocing = 1
5195
5196 c := getMCache(mp)
5197 off := c.tinyoffset
5198
5199 if constsize&7 == 0 {
5200 off = alignUp(off, 8)
5201 } else if goarch.PtrSize == 4 && constsize == 12 {
5202
5203 off = alignUp(off, 8)
5204 } else if constsize&3 == 0 {
5205 off = alignUp(off, 4)
5206 } else if constsize&1 == 0 {
5207 off = alignUp(off, 2)
5208 }
5209 if off+constsize <= maxTinySize && c.tiny != 0 {
5210
5211 x := unsafe.Pointer(c.tiny + off)
5212 c.tinyoffset = off + constsize
5213 c.tinyAllocs++
5214 mp.mallocing = 0
5215 releasem(mp)
5216 const elemsize = 0
5217 {
5218
5219 if valgrindenabled {
5220 valgrindMalloc(x, size)
5221 }
5222
5223 if gcBlackenEnabled != 0 && elemsize != 0 {
5224 if assistG := getg().m.curg; assistG != nil {
5225 assistG.gcAssistBytes -= int64(elemsize - size)
5226 }
5227 }
5228
5229 if debug.malloc {
5230 postMallocgcDebug(x, elemsize, typ)
5231 }
5232 return x
5233 }
5234
5235 }
5236
5237 checkGCTrigger := false
5238 span := c.alloc[tinySpanClass]
5239
5240 const nbytes = 8192
5241 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
5242 16,
5243 )
5244 var nextFreeFastResult gclinkptr
5245 if span.allocCache != 0 {
5246 theBit := sys.TrailingZeros64(span.allocCache)
5247 result := span.freeindex + uint16(theBit)
5248 if result < nelems {
5249 freeidx := result + 1
5250 if !(freeidx%64 == 0 && freeidx != nelems) {
5251 span.allocCache >>= uint(theBit + 1)
5252 span.freeindex = freeidx
5253 span.allocCount++
5254 nextFreeFastResult = gclinkptr(uintptr(result)*
5255 16 +
5256 span.base())
5257 }
5258 }
5259 }
5260 v := nextFreeFastResult
5261 if v == 0 {
5262 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
5263 }
5264 x := unsafe.Pointer(v)
5265 (*[2]uint64)(x)[0] = 0
5266 (*[2]uint64)(x)[1] = 0
5267
5268 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
5269
5270 c.tiny = uintptr(x)
5271 c.tinyoffset = constsize
5272 }
5273
5274 publicationBarrier()
5275
5276 if writeBarrier.enabled {
5277
5278 gcmarknewobject(span, uintptr(x))
5279 } else {
5280
5281 span.freeIndexForScan = span.freeindex
5282 }
5283
5284 c.nextSample -= int64(elemsize)
5285 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
5286 profilealloc(mp, x, elemsize)
5287 }
5288 mp.mallocing = 0
5289 releasem(mp)
5290
5291 if checkGCTrigger {
5292 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
5293 gcStart(t)
5294 }
5295 }
5296
5297 if raceenabled {
5298
5299 x = add(x, elemsize-constsize)
5300 }
5301 if valgrindenabled {
5302 valgrindMalloc(x, size)
5303 }
5304
5305 if gcBlackenEnabled != 0 && elemsize != 0 {
5306 if assistG := getg().m.curg; assistG != nil {
5307 assistG.gcAssistBytes -= int64(elemsize - size)
5308 }
5309 }
5310
5311 if debug.malloc {
5312 postMallocgcDebug(x, elemsize, typ)
5313 }
5314 return x
5315 }
5316
5317 func mallocgcTinySize7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
5318
5319 gp := getg()
5320 if goexperiment.RuntimeSecret && gp.secret > 0 {
5321 return mallocgcSmallNoScanSC2(size, typ, needzero)
5322 }
5323
5324 if doubleCheckMalloc {
5325 if gcphase == _GCmarktermination {
5326 throw("mallocgc called with gcphase == _GCmarktermination")
5327 }
5328 }
5329
5330 lockRankMayQueueFinalizer()
5331
5332 if debug.malloc {
5333 if x := preMallocgcDebug(size, typ); x != nil {
5334 return x
5335 }
5336 }
5337
5338 if gcBlackenEnabled != 0 {
5339 deductAssistCredit(size)
5340 }
5341
5342 const constsize = 7
5343
5344 const elemsize = 16
5345
5346 mp := acquirem()
5347 if doubleCheckMalloc {
5348 doubleCheckTiny(constsize, typ, mp)
5349 }
5350 mp.mallocing = 1
5351
5352 c := getMCache(mp)
5353 off := c.tinyoffset
5354
5355 if constsize&7 == 0 {
5356 off = alignUp(off, 8)
5357 } else if goarch.PtrSize == 4 && constsize == 12 {
5358
5359 off = alignUp(off, 8)
5360 } else if constsize&3 == 0 {
5361 off = alignUp(off, 4)
5362 } else if constsize&1 == 0 {
5363 off = alignUp(off, 2)
5364 }
5365 if off+constsize <= maxTinySize && c.tiny != 0 {
5366
5367 x := unsafe.Pointer(c.tiny + off)
5368 c.tinyoffset = off + constsize
5369 c.tinyAllocs++
5370 mp.mallocing = 0
5371 releasem(mp)
5372 const elemsize = 0
5373 {
5374
5375 if valgrindenabled {
5376 valgrindMalloc(x, size)
5377 }
5378
5379 if gcBlackenEnabled != 0 && elemsize != 0 {
5380 if assistG := getg().m.curg; assistG != nil {
5381 assistG.gcAssistBytes -= int64(elemsize - size)
5382 }
5383 }
5384
5385 if debug.malloc {
5386 postMallocgcDebug(x, elemsize, typ)
5387 }
5388 return x
5389 }
5390
5391 }
5392
5393 checkGCTrigger := false
5394 span := c.alloc[tinySpanClass]
5395
5396 const nbytes = 8192
5397 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
5398 16,
5399 )
5400 var nextFreeFastResult gclinkptr
5401 if span.allocCache != 0 {
5402 theBit := sys.TrailingZeros64(span.allocCache)
5403 result := span.freeindex + uint16(theBit)
5404 if result < nelems {
5405 freeidx := result + 1
5406 if !(freeidx%64 == 0 && freeidx != nelems) {
5407 span.allocCache >>= uint(theBit + 1)
5408 span.freeindex = freeidx
5409 span.allocCount++
5410 nextFreeFastResult = gclinkptr(uintptr(result)*
5411 16 +
5412 span.base())
5413 }
5414 }
5415 }
5416 v := nextFreeFastResult
5417 if v == 0 {
5418 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
5419 }
5420 x := unsafe.Pointer(v)
5421 (*[2]uint64)(x)[0] = 0
5422 (*[2]uint64)(x)[1] = 0
5423
5424 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
5425
5426 c.tiny = uintptr(x)
5427 c.tinyoffset = constsize
5428 }
5429
5430 publicationBarrier()
5431
5432 if writeBarrier.enabled {
5433
5434 gcmarknewobject(span, uintptr(x))
5435 } else {
5436
5437 span.freeIndexForScan = span.freeindex
5438 }
5439
5440 c.nextSample -= int64(elemsize)
5441 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
5442 profilealloc(mp, x, elemsize)
5443 }
5444 mp.mallocing = 0
5445 releasem(mp)
5446
5447 if checkGCTrigger {
5448 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
5449 gcStart(t)
5450 }
5451 }
5452
5453 if raceenabled {
5454
5455 x = add(x, elemsize-constsize)
5456 }
5457 if valgrindenabled {
5458 valgrindMalloc(x, size)
5459 }
5460
5461 if gcBlackenEnabled != 0 && elemsize != 0 {
5462 if assistG := getg().m.curg; assistG != nil {
5463 assistG.gcAssistBytes -= int64(elemsize - size)
5464 }
5465 }
5466
5467 if debug.malloc {
5468 postMallocgcDebug(x, elemsize, typ)
5469 }
5470 return x
5471 }
5472
5473 func mallocgcTinySize8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
5474
5475 gp := getg()
5476 if goexperiment.RuntimeSecret && gp.secret > 0 {
5477 return mallocgcSmallNoScanSC2(size, typ, needzero)
5478 }
5479
5480 if doubleCheckMalloc {
5481 if gcphase == _GCmarktermination {
5482 throw("mallocgc called with gcphase == _GCmarktermination")
5483 }
5484 }
5485
5486 lockRankMayQueueFinalizer()
5487
5488 if debug.malloc {
5489 if x := preMallocgcDebug(size, typ); x != nil {
5490 return x
5491 }
5492 }
5493
5494 if gcBlackenEnabled != 0 {
5495 deductAssistCredit(size)
5496 }
5497
5498 const constsize = 8
5499
5500 const elemsize = 16
5501
5502 mp := acquirem()
5503 if doubleCheckMalloc {
5504 doubleCheckTiny(constsize, typ, mp)
5505 }
5506 mp.mallocing = 1
5507
5508 c := getMCache(mp)
5509 off := c.tinyoffset
5510
5511 if constsize&7 == 0 {
5512 off = alignUp(off, 8)
5513 } else if goarch.PtrSize == 4 && constsize == 12 {
5514
5515 off = alignUp(off, 8)
5516 } else if constsize&3 == 0 {
5517 off = alignUp(off, 4)
5518 } else if constsize&1 == 0 {
5519 off = alignUp(off, 2)
5520 }
5521 if off+constsize <= maxTinySize && c.tiny != 0 {
5522
5523 x := unsafe.Pointer(c.tiny + off)
5524 c.tinyoffset = off + constsize
5525 c.tinyAllocs++
5526 mp.mallocing = 0
5527 releasem(mp)
5528 const elemsize = 0
5529 {
5530
5531 if valgrindenabled {
5532 valgrindMalloc(x, size)
5533 }
5534
5535 if gcBlackenEnabled != 0 && elemsize != 0 {
5536 if assistG := getg().m.curg; assistG != nil {
5537 assistG.gcAssistBytes -= int64(elemsize - size)
5538 }
5539 }
5540
5541 if debug.malloc {
5542 postMallocgcDebug(x, elemsize, typ)
5543 }
5544 return x
5545 }
5546
5547 }
5548
5549 checkGCTrigger := false
5550 span := c.alloc[tinySpanClass]
5551
5552 const nbytes = 8192
5553 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
5554 16,
5555 )
5556 var nextFreeFastResult gclinkptr
5557 if span.allocCache != 0 {
5558 theBit := sys.TrailingZeros64(span.allocCache)
5559 result := span.freeindex + uint16(theBit)
5560 if result < nelems {
5561 freeidx := result + 1
5562 if !(freeidx%64 == 0 && freeidx != nelems) {
5563 span.allocCache >>= uint(theBit + 1)
5564 span.freeindex = freeidx
5565 span.allocCount++
5566 nextFreeFastResult = gclinkptr(uintptr(result)*
5567 16 +
5568 span.base())
5569 }
5570 }
5571 }
5572 v := nextFreeFastResult
5573 if v == 0 {
5574 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
5575 }
5576 x := unsafe.Pointer(v)
5577 (*[2]uint64)(x)[0] = 0
5578 (*[2]uint64)(x)[1] = 0
5579
5580 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
5581
5582 c.tiny = uintptr(x)
5583 c.tinyoffset = constsize
5584 }
5585
5586 publicationBarrier()
5587
5588 if writeBarrier.enabled {
5589
5590 gcmarknewobject(span, uintptr(x))
5591 } else {
5592
5593 span.freeIndexForScan = span.freeindex
5594 }
5595
5596 c.nextSample -= int64(elemsize)
5597 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
5598 profilealloc(mp, x, elemsize)
5599 }
5600 mp.mallocing = 0
5601 releasem(mp)
5602
5603 if checkGCTrigger {
5604 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
5605 gcStart(t)
5606 }
5607 }
5608
5609 if raceenabled {
5610
5611 x = add(x, elemsize-constsize)
5612 }
5613 if valgrindenabled {
5614 valgrindMalloc(x, size)
5615 }
5616
5617 if gcBlackenEnabled != 0 && elemsize != 0 {
5618 if assistG := getg().m.curg; assistG != nil {
5619 assistG.gcAssistBytes -= int64(elemsize - size)
5620 }
5621 }
5622
5623 if debug.malloc {
5624 postMallocgcDebug(x, elemsize, typ)
5625 }
5626 return x
5627 }
5628
5629 func mallocgcTinySize9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
5630
5631 gp := getg()
5632 if goexperiment.RuntimeSecret && gp.secret > 0 {
5633 return mallocgcSmallNoScanSC2(size, typ, needzero)
5634 }
5635
5636 if doubleCheckMalloc {
5637 if gcphase == _GCmarktermination {
5638 throw("mallocgc called with gcphase == _GCmarktermination")
5639 }
5640 }
5641
5642 lockRankMayQueueFinalizer()
5643
5644 if debug.malloc {
5645 if x := preMallocgcDebug(size, typ); x != nil {
5646 return x
5647 }
5648 }
5649
5650 if gcBlackenEnabled != 0 {
5651 deductAssistCredit(size)
5652 }
5653
5654 const constsize = 9
5655
5656 const elemsize = 16
5657
5658 mp := acquirem()
5659 if doubleCheckMalloc {
5660 doubleCheckTiny(constsize, typ, mp)
5661 }
5662 mp.mallocing = 1
5663
5664 c := getMCache(mp)
5665 off := c.tinyoffset
5666
5667 if constsize&7 == 0 {
5668 off = alignUp(off, 8)
5669 } else if goarch.PtrSize == 4 && constsize == 12 {
5670
5671 off = alignUp(off, 8)
5672 } else if constsize&3 == 0 {
5673 off = alignUp(off, 4)
5674 } else if constsize&1 == 0 {
5675 off = alignUp(off, 2)
5676 }
5677 if off+constsize <= maxTinySize && c.tiny != 0 {
5678
5679 x := unsafe.Pointer(c.tiny + off)
5680 c.tinyoffset = off + constsize
5681 c.tinyAllocs++
5682 mp.mallocing = 0
5683 releasem(mp)
5684 const elemsize = 0
5685 {
5686
5687 if valgrindenabled {
5688 valgrindMalloc(x, size)
5689 }
5690
5691 if gcBlackenEnabled != 0 && elemsize != 0 {
5692 if assistG := getg().m.curg; assistG != nil {
5693 assistG.gcAssistBytes -= int64(elemsize - size)
5694 }
5695 }
5696
5697 if debug.malloc {
5698 postMallocgcDebug(x, elemsize, typ)
5699 }
5700 return x
5701 }
5702
5703 }
5704
5705 checkGCTrigger := false
5706 span := c.alloc[tinySpanClass]
5707
5708 const nbytes = 8192
5709 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
5710 16,
5711 )
5712 var nextFreeFastResult gclinkptr
5713 if span.allocCache != 0 {
5714 theBit := sys.TrailingZeros64(span.allocCache)
5715 result := span.freeindex + uint16(theBit)
5716 if result < nelems {
5717 freeidx := result + 1
5718 if !(freeidx%64 == 0 && freeidx != nelems) {
5719 span.allocCache >>= uint(theBit + 1)
5720 span.freeindex = freeidx
5721 span.allocCount++
5722 nextFreeFastResult = gclinkptr(uintptr(result)*
5723 16 +
5724 span.base())
5725 }
5726 }
5727 }
5728 v := nextFreeFastResult
5729 if v == 0 {
5730 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
5731 }
5732 x := unsafe.Pointer(v)
5733 (*[2]uint64)(x)[0] = 0
5734 (*[2]uint64)(x)[1] = 0
5735
5736 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
5737
5738 c.tiny = uintptr(x)
5739 c.tinyoffset = constsize
5740 }
5741
5742 publicationBarrier()
5743
5744 if writeBarrier.enabled {
5745
5746 gcmarknewobject(span, uintptr(x))
5747 } else {
5748
5749 span.freeIndexForScan = span.freeindex
5750 }
5751
5752 c.nextSample -= int64(elemsize)
5753 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
5754 profilealloc(mp, x, elemsize)
5755 }
5756 mp.mallocing = 0
5757 releasem(mp)
5758
5759 if checkGCTrigger {
5760 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
5761 gcStart(t)
5762 }
5763 }
5764
5765 if raceenabled {
5766
5767 x = add(x, elemsize-constsize)
5768 }
5769 if valgrindenabled {
5770 valgrindMalloc(x, size)
5771 }
5772
5773 if gcBlackenEnabled != 0 && elemsize != 0 {
5774 if assistG := getg().m.curg; assistG != nil {
5775 assistG.gcAssistBytes -= int64(elemsize - size)
5776 }
5777 }
5778
5779 if debug.malloc {
5780 postMallocgcDebug(x, elemsize, typ)
5781 }
5782 return x
5783 }
5784
5785 func mallocgcTinySize10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
5786
5787 gp := getg()
5788 if goexperiment.RuntimeSecret && gp.secret > 0 {
5789 return mallocgcSmallNoScanSC2(size, typ, needzero)
5790 }
5791
5792 if doubleCheckMalloc {
5793 if gcphase == _GCmarktermination {
5794 throw("mallocgc called with gcphase == _GCmarktermination")
5795 }
5796 }
5797
5798 lockRankMayQueueFinalizer()
5799
5800 if debug.malloc {
5801 if x := preMallocgcDebug(size, typ); x != nil {
5802 return x
5803 }
5804 }
5805
5806 if gcBlackenEnabled != 0 {
5807 deductAssistCredit(size)
5808 }
5809
5810 const constsize = 10
5811
5812 const elemsize = 16
5813
5814 mp := acquirem()
5815 if doubleCheckMalloc {
5816 doubleCheckTiny(constsize, typ, mp)
5817 }
5818 mp.mallocing = 1
5819
5820 c := getMCache(mp)
5821 off := c.tinyoffset
5822
5823 if constsize&7 == 0 {
5824 off = alignUp(off, 8)
5825 } else if goarch.PtrSize == 4 && constsize == 12 {
5826
5827 off = alignUp(off, 8)
5828 } else if constsize&3 == 0 {
5829 off = alignUp(off, 4)
5830 } else if constsize&1 == 0 {
5831 off = alignUp(off, 2)
5832 }
5833 if off+constsize <= maxTinySize && c.tiny != 0 {
5834
5835 x := unsafe.Pointer(c.tiny + off)
5836 c.tinyoffset = off + constsize
5837 c.tinyAllocs++
5838 mp.mallocing = 0
5839 releasem(mp)
5840 const elemsize = 0
5841 {
5842
5843 if valgrindenabled {
5844 valgrindMalloc(x, size)
5845 }
5846
5847 if gcBlackenEnabled != 0 && elemsize != 0 {
5848 if assistG := getg().m.curg; assistG != nil {
5849 assistG.gcAssistBytes -= int64(elemsize - size)
5850 }
5851 }
5852
5853 if debug.malloc {
5854 postMallocgcDebug(x, elemsize, typ)
5855 }
5856 return x
5857 }
5858
5859 }
5860
5861 checkGCTrigger := false
5862 span := c.alloc[tinySpanClass]
5863
5864 const nbytes = 8192
5865 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
5866 16,
5867 )
5868 var nextFreeFastResult gclinkptr
5869 if span.allocCache != 0 {
5870 theBit := sys.TrailingZeros64(span.allocCache)
5871 result := span.freeindex + uint16(theBit)
5872 if result < nelems {
5873 freeidx := result + 1
5874 if !(freeidx%64 == 0 && freeidx != nelems) {
5875 span.allocCache >>= uint(theBit + 1)
5876 span.freeindex = freeidx
5877 span.allocCount++
5878 nextFreeFastResult = gclinkptr(uintptr(result)*
5879 16 +
5880 span.base())
5881 }
5882 }
5883 }
5884 v := nextFreeFastResult
5885 if v == 0 {
5886 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
5887 }
5888 x := unsafe.Pointer(v)
5889 (*[2]uint64)(x)[0] = 0
5890 (*[2]uint64)(x)[1] = 0
5891
5892 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
5893
5894 c.tiny = uintptr(x)
5895 c.tinyoffset = constsize
5896 }
5897
5898 publicationBarrier()
5899
5900 if writeBarrier.enabled {
5901
5902 gcmarknewobject(span, uintptr(x))
5903 } else {
5904
5905 span.freeIndexForScan = span.freeindex
5906 }
5907
5908 c.nextSample -= int64(elemsize)
5909 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
5910 profilealloc(mp, x, elemsize)
5911 }
5912 mp.mallocing = 0
5913 releasem(mp)
5914
5915 if checkGCTrigger {
5916 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
5917 gcStart(t)
5918 }
5919 }
5920
5921 if raceenabled {
5922
5923 x = add(x, elemsize-constsize)
5924 }
5925 if valgrindenabled {
5926 valgrindMalloc(x, size)
5927 }
5928
5929 if gcBlackenEnabled != 0 && elemsize != 0 {
5930 if assistG := getg().m.curg; assistG != nil {
5931 assistG.gcAssistBytes -= int64(elemsize - size)
5932 }
5933 }
5934
5935 if debug.malloc {
5936 postMallocgcDebug(x, elemsize, typ)
5937 }
5938 return x
5939 }
5940
5941 func mallocgcTinySize11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
5942
5943 gp := getg()
5944 if goexperiment.RuntimeSecret && gp.secret > 0 {
5945 return mallocgcSmallNoScanSC2(size, typ, needzero)
5946 }
5947
5948 if doubleCheckMalloc {
5949 if gcphase == _GCmarktermination {
5950 throw("mallocgc called with gcphase == _GCmarktermination")
5951 }
5952 }
5953
5954 lockRankMayQueueFinalizer()
5955
5956 if debug.malloc {
5957 if x := preMallocgcDebug(size, typ); x != nil {
5958 return x
5959 }
5960 }
5961
5962 if gcBlackenEnabled != 0 {
5963 deductAssistCredit(size)
5964 }
5965
5966 const constsize = 11
5967
5968 const elemsize = 16
5969
5970 mp := acquirem()
5971 if doubleCheckMalloc {
5972 doubleCheckTiny(constsize, typ, mp)
5973 }
5974 mp.mallocing = 1
5975
5976 c := getMCache(mp)
5977 off := c.tinyoffset
5978
5979 if constsize&7 == 0 {
5980 off = alignUp(off, 8)
5981 } else if goarch.PtrSize == 4 && constsize == 12 {
5982
5983 off = alignUp(off, 8)
5984 } else if constsize&3 == 0 {
5985 off = alignUp(off, 4)
5986 } else if constsize&1 == 0 {
5987 off = alignUp(off, 2)
5988 }
5989 if off+constsize <= maxTinySize && c.tiny != 0 {
5990
5991 x := unsafe.Pointer(c.tiny + off)
5992 c.tinyoffset = off + constsize
5993 c.tinyAllocs++
5994 mp.mallocing = 0
5995 releasem(mp)
5996 const elemsize = 0
5997 {
5998
5999 if valgrindenabled {
6000 valgrindMalloc(x, size)
6001 }
6002
6003 if gcBlackenEnabled != 0 && elemsize != 0 {
6004 if assistG := getg().m.curg; assistG != nil {
6005 assistG.gcAssistBytes -= int64(elemsize - size)
6006 }
6007 }
6008
6009 if debug.malloc {
6010 postMallocgcDebug(x, elemsize, typ)
6011 }
6012 return x
6013 }
6014
6015 }
6016
6017 checkGCTrigger := false
6018 span := c.alloc[tinySpanClass]
6019
6020 const nbytes = 8192
6021 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
6022 16,
6023 )
6024 var nextFreeFastResult gclinkptr
6025 if span.allocCache != 0 {
6026 theBit := sys.TrailingZeros64(span.allocCache)
6027 result := span.freeindex + uint16(theBit)
6028 if result < nelems {
6029 freeidx := result + 1
6030 if !(freeidx%64 == 0 && freeidx != nelems) {
6031 span.allocCache >>= uint(theBit + 1)
6032 span.freeindex = freeidx
6033 span.allocCount++
6034 nextFreeFastResult = gclinkptr(uintptr(result)*
6035 16 +
6036 span.base())
6037 }
6038 }
6039 }
6040 v := nextFreeFastResult
6041 if v == 0 {
6042 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
6043 }
6044 x := unsafe.Pointer(v)
6045 (*[2]uint64)(x)[0] = 0
6046 (*[2]uint64)(x)[1] = 0
6047
6048 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
6049
6050 c.tiny = uintptr(x)
6051 c.tinyoffset = constsize
6052 }
6053
6054 publicationBarrier()
6055
6056 if writeBarrier.enabled {
6057
6058 gcmarknewobject(span, uintptr(x))
6059 } else {
6060
6061 span.freeIndexForScan = span.freeindex
6062 }
6063
6064 c.nextSample -= int64(elemsize)
6065 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
6066 profilealloc(mp, x, elemsize)
6067 }
6068 mp.mallocing = 0
6069 releasem(mp)
6070
6071 if checkGCTrigger {
6072 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
6073 gcStart(t)
6074 }
6075 }
6076
6077 if raceenabled {
6078
6079 x = add(x, elemsize-constsize)
6080 }
6081 if valgrindenabled {
6082 valgrindMalloc(x, size)
6083 }
6084
6085 if gcBlackenEnabled != 0 && elemsize != 0 {
6086 if assistG := getg().m.curg; assistG != nil {
6087 assistG.gcAssistBytes -= int64(elemsize - size)
6088 }
6089 }
6090
6091 if debug.malloc {
6092 postMallocgcDebug(x, elemsize, typ)
6093 }
6094 return x
6095 }
6096
6097 func mallocgcTinySize12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
6098
6099 gp := getg()
6100 if goexperiment.RuntimeSecret && gp.secret > 0 {
6101 return mallocgcSmallNoScanSC2(size, typ, needzero)
6102 }
6103
6104 if doubleCheckMalloc {
6105 if gcphase == _GCmarktermination {
6106 throw("mallocgc called with gcphase == _GCmarktermination")
6107 }
6108 }
6109
6110 lockRankMayQueueFinalizer()
6111
6112 if debug.malloc {
6113 if x := preMallocgcDebug(size, typ); x != nil {
6114 return x
6115 }
6116 }
6117
6118 if gcBlackenEnabled != 0 {
6119 deductAssistCredit(size)
6120 }
6121
6122 const constsize = 12
6123
6124 const elemsize = 16
6125
6126 mp := acquirem()
6127 if doubleCheckMalloc {
6128 doubleCheckTiny(constsize, typ, mp)
6129 }
6130 mp.mallocing = 1
6131
6132 c := getMCache(mp)
6133 off := c.tinyoffset
6134
6135 if constsize&7 == 0 {
6136 off = alignUp(off, 8)
6137 } else if goarch.PtrSize == 4 && constsize == 12 {
6138
6139 off = alignUp(off, 8)
6140 } else if constsize&3 == 0 {
6141 off = alignUp(off, 4)
6142 } else if constsize&1 == 0 {
6143 off = alignUp(off, 2)
6144 }
6145 if off+constsize <= maxTinySize && c.tiny != 0 {
6146
6147 x := unsafe.Pointer(c.tiny + off)
6148 c.tinyoffset = off + constsize
6149 c.tinyAllocs++
6150 mp.mallocing = 0
6151 releasem(mp)
6152 const elemsize = 0
6153 {
6154
6155 if valgrindenabled {
6156 valgrindMalloc(x, size)
6157 }
6158
6159 if gcBlackenEnabled != 0 && elemsize != 0 {
6160 if assistG := getg().m.curg; assistG != nil {
6161 assistG.gcAssistBytes -= int64(elemsize - size)
6162 }
6163 }
6164
6165 if debug.malloc {
6166 postMallocgcDebug(x, elemsize, typ)
6167 }
6168 return x
6169 }
6170
6171 }
6172
6173 checkGCTrigger := false
6174 span := c.alloc[tinySpanClass]
6175
6176 const nbytes = 8192
6177 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
6178 16,
6179 )
6180 var nextFreeFastResult gclinkptr
6181 if span.allocCache != 0 {
6182 theBit := sys.TrailingZeros64(span.allocCache)
6183 result := span.freeindex + uint16(theBit)
6184 if result < nelems {
6185 freeidx := result + 1
6186 if !(freeidx%64 == 0 && freeidx != nelems) {
6187 span.allocCache >>= uint(theBit + 1)
6188 span.freeindex = freeidx
6189 span.allocCount++
6190 nextFreeFastResult = gclinkptr(uintptr(result)*
6191 16 +
6192 span.base())
6193 }
6194 }
6195 }
6196 v := nextFreeFastResult
6197 if v == 0 {
6198 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
6199 }
6200 x := unsafe.Pointer(v)
6201 (*[2]uint64)(x)[0] = 0
6202 (*[2]uint64)(x)[1] = 0
6203
6204 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
6205
6206 c.tiny = uintptr(x)
6207 c.tinyoffset = constsize
6208 }
6209
6210 publicationBarrier()
6211
6212 if writeBarrier.enabled {
6213
6214 gcmarknewobject(span, uintptr(x))
6215 } else {
6216
6217 span.freeIndexForScan = span.freeindex
6218 }
6219
6220 c.nextSample -= int64(elemsize)
6221 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
6222 profilealloc(mp, x, elemsize)
6223 }
6224 mp.mallocing = 0
6225 releasem(mp)
6226
6227 if checkGCTrigger {
6228 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
6229 gcStart(t)
6230 }
6231 }
6232
6233 if raceenabled {
6234
6235 x = add(x, elemsize-constsize)
6236 }
6237 if valgrindenabled {
6238 valgrindMalloc(x, size)
6239 }
6240
6241 if gcBlackenEnabled != 0 && elemsize != 0 {
6242 if assistG := getg().m.curg; assistG != nil {
6243 assistG.gcAssistBytes -= int64(elemsize - size)
6244 }
6245 }
6246
6247 if debug.malloc {
6248 postMallocgcDebug(x, elemsize, typ)
6249 }
6250 return x
6251 }
6252
6253 func mallocgcTinySize13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
6254
6255 gp := getg()
6256 if goexperiment.RuntimeSecret && gp.secret > 0 {
6257 return mallocgcSmallNoScanSC2(size, typ, needzero)
6258 }
6259
6260 if doubleCheckMalloc {
6261 if gcphase == _GCmarktermination {
6262 throw("mallocgc called with gcphase == _GCmarktermination")
6263 }
6264 }
6265
6266 lockRankMayQueueFinalizer()
6267
6268 if debug.malloc {
6269 if x := preMallocgcDebug(size, typ); x != nil {
6270 return x
6271 }
6272 }
6273
6274 if gcBlackenEnabled != 0 {
6275 deductAssistCredit(size)
6276 }
6277
6278 const constsize = 13
6279
6280 const elemsize = 16
6281
6282 mp := acquirem()
6283 if doubleCheckMalloc {
6284 doubleCheckTiny(constsize, typ, mp)
6285 }
6286 mp.mallocing = 1
6287
6288 c := getMCache(mp)
6289 off := c.tinyoffset
6290
6291 if constsize&7 == 0 {
6292 off = alignUp(off, 8)
6293 } else if goarch.PtrSize == 4 && constsize == 12 {
6294
6295 off = alignUp(off, 8)
6296 } else if constsize&3 == 0 {
6297 off = alignUp(off, 4)
6298 } else if constsize&1 == 0 {
6299 off = alignUp(off, 2)
6300 }
6301 if off+constsize <= maxTinySize && c.tiny != 0 {
6302
6303 x := unsafe.Pointer(c.tiny + off)
6304 c.tinyoffset = off + constsize
6305 c.tinyAllocs++
6306 mp.mallocing = 0
6307 releasem(mp)
6308 const elemsize = 0
6309 {
6310
6311 if valgrindenabled {
6312 valgrindMalloc(x, size)
6313 }
6314
6315 if gcBlackenEnabled != 0 && elemsize != 0 {
6316 if assistG := getg().m.curg; assistG != nil {
6317 assistG.gcAssistBytes -= int64(elemsize - size)
6318 }
6319 }
6320
6321 if debug.malloc {
6322 postMallocgcDebug(x, elemsize, typ)
6323 }
6324 return x
6325 }
6326
6327 }
6328
6329 checkGCTrigger := false
6330 span := c.alloc[tinySpanClass]
6331
6332 const nbytes = 8192
6333 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
6334 16,
6335 )
6336 var nextFreeFastResult gclinkptr
6337 if span.allocCache != 0 {
6338 theBit := sys.TrailingZeros64(span.allocCache)
6339 result := span.freeindex + uint16(theBit)
6340 if result < nelems {
6341 freeidx := result + 1
6342 if !(freeidx%64 == 0 && freeidx != nelems) {
6343 span.allocCache >>= uint(theBit + 1)
6344 span.freeindex = freeidx
6345 span.allocCount++
6346 nextFreeFastResult = gclinkptr(uintptr(result)*
6347 16 +
6348 span.base())
6349 }
6350 }
6351 }
6352 v := nextFreeFastResult
6353 if v == 0 {
6354 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
6355 }
6356 x := unsafe.Pointer(v)
6357 (*[2]uint64)(x)[0] = 0
6358 (*[2]uint64)(x)[1] = 0
6359
6360 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
6361
6362 c.tiny = uintptr(x)
6363 c.tinyoffset = constsize
6364 }
6365
6366 publicationBarrier()
6367
6368 if writeBarrier.enabled {
6369
6370 gcmarknewobject(span, uintptr(x))
6371 } else {
6372
6373 span.freeIndexForScan = span.freeindex
6374 }
6375
6376 c.nextSample -= int64(elemsize)
6377 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
6378 profilealloc(mp, x, elemsize)
6379 }
6380 mp.mallocing = 0
6381 releasem(mp)
6382
6383 if checkGCTrigger {
6384 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
6385 gcStart(t)
6386 }
6387 }
6388
6389 if raceenabled {
6390
6391 x = add(x, elemsize-constsize)
6392 }
6393 if valgrindenabled {
6394 valgrindMalloc(x, size)
6395 }
6396
6397 if gcBlackenEnabled != 0 && elemsize != 0 {
6398 if assistG := getg().m.curg; assistG != nil {
6399 assistG.gcAssistBytes -= int64(elemsize - size)
6400 }
6401 }
6402
6403 if debug.malloc {
6404 postMallocgcDebug(x, elemsize, typ)
6405 }
6406 return x
6407 }
6408
6409 func mallocgcTinySize14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
6410
6411 gp := getg()
6412 if goexperiment.RuntimeSecret && gp.secret > 0 {
6413 return mallocgcSmallNoScanSC2(size, typ, needzero)
6414 }
6415
6416 if doubleCheckMalloc {
6417 if gcphase == _GCmarktermination {
6418 throw("mallocgc called with gcphase == _GCmarktermination")
6419 }
6420 }
6421
6422 lockRankMayQueueFinalizer()
6423
6424 if debug.malloc {
6425 if x := preMallocgcDebug(size, typ); x != nil {
6426 return x
6427 }
6428 }
6429
6430 if gcBlackenEnabled != 0 {
6431 deductAssistCredit(size)
6432 }
6433
6434 const constsize = 14
6435
6436 const elemsize = 16
6437
6438 mp := acquirem()
6439 if doubleCheckMalloc {
6440 doubleCheckTiny(constsize, typ, mp)
6441 }
6442 mp.mallocing = 1
6443
6444 c := getMCache(mp)
6445 off := c.tinyoffset
6446
6447 if constsize&7 == 0 {
6448 off = alignUp(off, 8)
6449 } else if goarch.PtrSize == 4 && constsize == 12 {
6450
6451 off = alignUp(off, 8)
6452 } else if constsize&3 == 0 {
6453 off = alignUp(off, 4)
6454 } else if constsize&1 == 0 {
6455 off = alignUp(off, 2)
6456 }
6457 if off+constsize <= maxTinySize && c.tiny != 0 {
6458
6459 x := unsafe.Pointer(c.tiny + off)
6460 c.tinyoffset = off + constsize
6461 c.tinyAllocs++
6462 mp.mallocing = 0
6463 releasem(mp)
6464 const elemsize = 0
6465 {
6466
6467 if valgrindenabled {
6468 valgrindMalloc(x, size)
6469 }
6470
6471 if gcBlackenEnabled != 0 && elemsize != 0 {
6472 if assistG := getg().m.curg; assistG != nil {
6473 assistG.gcAssistBytes -= int64(elemsize - size)
6474 }
6475 }
6476
6477 if debug.malloc {
6478 postMallocgcDebug(x, elemsize, typ)
6479 }
6480 return x
6481 }
6482
6483 }
6484
6485 checkGCTrigger := false
6486 span := c.alloc[tinySpanClass]
6487
6488 const nbytes = 8192
6489 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
6490 16,
6491 )
6492 var nextFreeFastResult gclinkptr
6493 if span.allocCache != 0 {
6494 theBit := sys.TrailingZeros64(span.allocCache)
6495 result := span.freeindex + uint16(theBit)
6496 if result < nelems {
6497 freeidx := result + 1
6498 if !(freeidx%64 == 0 && freeidx != nelems) {
6499 span.allocCache >>= uint(theBit + 1)
6500 span.freeindex = freeidx
6501 span.allocCount++
6502 nextFreeFastResult = gclinkptr(uintptr(result)*
6503 16 +
6504 span.base())
6505 }
6506 }
6507 }
6508 v := nextFreeFastResult
6509 if v == 0 {
6510 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
6511 }
6512 x := unsafe.Pointer(v)
6513 (*[2]uint64)(x)[0] = 0
6514 (*[2]uint64)(x)[1] = 0
6515
6516 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
6517
6518 c.tiny = uintptr(x)
6519 c.tinyoffset = constsize
6520 }
6521
6522 publicationBarrier()
6523
6524 if writeBarrier.enabled {
6525
6526 gcmarknewobject(span, uintptr(x))
6527 } else {
6528
6529 span.freeIndexForScan = span.freeindex
6530 }
6531
6532 c.nextSample -= int64(elemsize)
6533 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
6534 profilealloc(mp, x, elemsize)
6535 }
6536 mp.mallocing = 0
6537 releasem(mp)
6538
6539 if checkGCTrigger {
6540 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
6541 gcStart(t)
6542 }
6543 }
6544
6545 if raceenabled {
6546
6547 x = add(x, elemsize-constsize)
6548 }
6549 if valgrindenabled {
6550 valgrindMalloc(x, size)
6551 }
6552
6553 if gcBlackenEnabled != 0 && elemsize != 0 {
6554 if assistG := getg().m.curg; assistG != nil {
6555 assistG.gcAssistBytes -= int64(elemsize - size)
6556 }
6557 }
6558
6559 if debug.malloc {
6560 postMallocgcDebug(x, elemsize, typ)
6561 }
6562 return x
6563 }
6564
6565 func mallocgcTinySize15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
6566
6567 gp := getg()
6568 if goexperiment.RuntimeSecret && gp.secret > 0 {
6569 return mallocgcSmallNoScanSC2(size, typ, needzero)
6570 }
6571
6572 if doubleCheckMalloc {
6573 if gcphase == _GCmarktermination {
6574 throw("mallocgc called with gcphase == _GCmarktermination")
6575 }
6576 }
6577
6578 lockRankMayQueueFinalizer()
6579
6580 if debug.malloc {
6581 if x := preMallocgcDebug(size, typ); x != nil {
6582 return x
6583 }
6584 }
6585
6586 if gcBlackenEnabled != 0 {
6587 deductAssistCredit(size)
6588 }
6589
6590 const constsize = 15
6591
6592 const elemsize = 16
6593
6594 mp := acquirem()
6595 if doubleCheckMalloc {
6596 doubleCheckTiny(constsize, typ, mp)
6597 }
6598 mp.mallocing = 1
6599
6600 c := getMCache(mp)
6601 off := c.tinyoffset
6602
6603 if constsize&7 == 0 {
6604 off = alignUp(off, 8)
6605 } else if goarch.PtrSize == 4 && constsize == 12 {
6606
6607 off = alignUp(off, 8)
6608 } else if constsize&3 == 0 {
6609 off = alignUp(off, 4)
6610 } else if constsize&1 == 0 {
6611 off = alignUp(off, 2)
6612 }
6613 if off+constsize <= maxTinySize && c.tiny != 0 {
6614
6615 x := unsafe.Pointer(c.tiny + off)
6616 c.tinyoffset = off + constsize
6617 c.tinyAllocs++
6618 mp.mallocing = 0
6619 releasem(mp)
6620 const elemsize = 0
6621 {
6622
6623 if valgrindenabled {
6624 valgrindMalloc(x, size)
6625 }
6626
6627 if gcBlackenEnabled != 0 && elemsize != 0 {
6628 if assistG := getg().m.curg; assistG != nil {
6629 assistG.gcAssistBytes -= int64(elemsize - size)
6630 }
6631 }
6632
6633 if debug.malloc {
6634 postMallocgcDebug(x, elemsize, typ)
6635 }
6636 return x
6637 }
6638
6639 }
6640
6641 checkGCTrigger := false
6642 span := c.alloc[tinySpanClass]
6643
6644 const nbytes = 8192
6645 const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
6646 16,
6647 )
6648 var nextFreeFastResult gclinkptr
6649 if span.allocCache != 0 {
6650 theBit := sys.TrailingZeros64(span.allocCache)
6651 result := span.freeindex + uint16(theBit)
6652 if result < nelems {
6653 freeidx := result + 1
6654 if !(freeidx%64 == 0 && freeidx != nelems) {
6655 span.allocCache >>= uint(theBit + 1)
6656 span.freeindex = freeidx
6657 span.allocCount++
6658 nextFreeFastResult = gclinkptr(uintptr(result)*
6659 16 +
6660 span.base())
6661 }
6662 }
6663 }
6664 v := nextFreeFastResult
6665 if v == 0 {
6666 v, span, checkGCTrigger = c.nextFree(tinySpanClass)
6667 }
6668 x := unsafe.Pointer(v)
6669 (*[2]uint64)(x)[0] = 0
6670 (*[2]uint64)(x)[1] = 0
6671
6672 if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
6673
6674 c.tiny = uintptr(x)
6675 c.tinyoffset = constsize
6676 }
6677
6678 publicationBarrier()
6679
6680 if writeBarrier.enabled {
6681
6682 gcmarknewobject(span, uintptr(x))
6683 } else {
6684
6685 span.freeIndexForScan = span.freeindex
6686 }
6687
6688 c.nextSample -= int64(elemsize)
6689 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
6690 profilealloc(mp, x, elemsize)
6691 }
6692 mp.mallocing = 0
6693 releasem(mp)
6694
6695 if checkGCTrigger {
6696 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
6697 gcStart(t)
6698 }
6699 }
6700
6701 if raceenabled {
6702
6703 x = add(x, elemsize-constsize)
6704 }
6705 if valgrindenabled {
6706 valgrindMalloc(x, size)
6707 }
6708
6709 if gcBlackenEnabled != 0 && elemsize != 0 {
6710 if assistG := getg().m.curg; assistG != nil {
6711 assistG.gcAssistBytes -= int64(elemsize - size)
6712 }
6713 }
6714
6715 if debug.malloc {
6716 postMallocgcDebug(x, elemsize, typ)
6717 }
6718 return x
6719 }
6720
6721 func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
6722
6723 if doubleCheckMalloc {
6724 if gcphase == _GCmarktermination {
6725 throw("mallocgc called with gcphase == _GCmarktermination")
6726 }
6727 }
6728
6729 lockRankMayQueueFinalizer()
6730
6731 if debug.malloc {
6732 if x := preMallocgcDebug(size, typ); x != nil {
6733 return x
6734 }
6735 }
6736
6737 if gcBlackenEnabled != 0 {
6738 deductAssistCredit(size)
6739 }
6740
6741 const sizeclass = 2
6742
6743 const elemsize = 16
6744
6745 mp := acquirem()
6746 if doubleCheckMalloc {
6747 doubleCheckSmallNoScan(typ, mp)
6748 }
6749 mp.mallocing = 1
6750
6751 checkGCTrigger := false
6752 c := getMCache(mp)
6753 const spc = spanClass(sizeclass<<1) | spanClass(1)
6754 span := c.alloc[spc]
6755
6756 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
6757
6758 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
6759 mp.mallocing = 0
6760 releasem(mp)
6761 x := v
6762 {
6763
6764 gp := getg()
6765 if goexperiment.RuntimeSecret && gp.secret > 0 {
6766
6767 addSecret(x, size)
6768 }
6769
6770 if valgrindenabled {
6771 valgrindMalloc(x, size)
6772 }
6773
6774 if gcBlackenEnabled != 0 && elemsize != 0 {
6775 if assistG := getg().m.curg; assistG != nil {
6776 assistG.gcAssistBytes -= int64(elemsize - size)
6777 }
6778 }
6779
6780 if debug.malloc {
6781 postMallocgcDebug(x, elemsize, typ)
6782 }
6783 return x
6784 }
6785
6786 }
6787
6788 var nextFreeFastResult gclinkptr
6789 if span.allocCache != 0 {
6790 theBit := sys.TrailingZeros64(span.allocCache)
6791 result := span.freeindex + uint16(theBit)
6792 if result < span.nelems {
6793 freeidx := result + 1
6794 if !(freeidx%64 == 0 && freeidx != span.nelems) {
6795 span.allocCache >>= uint(theBit + 1)
6796 span.freeindex = freeidx
6797 span.allocCount++
6798 nextFreeFastResult = gclinkptr(uintptr(result)*
6799 16 +
6800 span.base())
6801 }
6802 }
6803 }
6804 v := nextFreeFastResult
6805 if v == 0 {
6806 v, span, checkGCTrigger = c.nextFree(spc)
6807 }
6808 x := unsafe.Pointer(v)
6809 if needzero && span.needzero != 0 {
6810 memclrNoHeapPointers(x, elemsize)
6811 }
6812
6813 publicationBarrier()
6814
6815 if writeBarrier.enabled {
6816
6817 gcmarknewobject(span, uintptr(x))
6818 } else {
6819
6820 span.freeIndexForScan = span.freeindex
6821 }
6822
6823 c.nextSample -= int64(elemsize)
6824 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
6825 profilealloc(mp, x, elemsize)
6826 }
6827 mp.mallocing = 0
6828 releasem(mp)
6829
6830 if checkGCTrigger {
6831 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
6832 gcStart(t)
6833 }
6834 }
6835 gp := getg()
6836 if goexperiment.RuntimeSecret && gp.secret > 0 {
6837
6838 addSecret(x, size)
6839 }
6840
6841 if valgrindenabled {
6842 valgrindMalloc(x, size)
6843 }
6844
6845 if gcBlackenEnabled != 0 && elemsize != 0 {
6846 if assistG := getg().m.curg; assistG != nil {
6847 assistG.gcAssistBytes -= int64(elemsize - size)
6848 }
6849 }
6850
6851 if debug.malloc {
6852 postMallocgcDebug(x, elemsize, typ)
6853 }
6854 return x
6855 }
6856
6857 func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
6858
6859 if doubleCheckMalloc {
6860 if gcphase == _GCmarktermination {
6861 throw("mallocgc called with gcphase == _GCmarktermination")
6862 }
6863 }
6864
6865 lockRankMayQueueFinalizer()
6866
6867 if debug.malloc {
6868 if x := preMallocgcDebug(size, typ); x != nil {
6869 return x
6870 }
6871 }
6872
6873 if gcBlackenEnabled != 0 {
6874 deductAssistCredit(size)
6875 }
6876
6877 const sizeclass = 3
6878
6879 const elemsize = 24
6880
6881 mp := acquirem()
6882 if doubleCheckMalloc {
6883 doubleCheckSmallNoScan(typ, mp)
6884 }
6885 mp.mallocing = 1
6886
6887 checkGCTrigger := false
6888 c := getMCache(mp)
6889 const spc = spanClass(sizeclass<<1) | spanClass(1)
6890 span := c.alloc[spc]
6891
6892 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
6893
6894 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
6895 mp.mallocing = 0
6896 releasem(mp)
6897 x := v
6898 {
6899
6900 gp := getg()
6901 if goexperiment.RuntimeSecret && gp.secret > 0 {
6902
6903 addSecret(x, size)
6904 }
6905
6906 if valgrindenabled {
6907 valgrindMalloc(x, size)
6908 }
6909
6910 if gcBlackenEnabled != 0 && elemsize != 0 {
6911 if assistG := getg().m.curg; assistG != nil {
6912 assistG.gcAssistBytes -= int64(elemsize - size)
6913 }
6914 }
6915
6916 if debug.malloc {
6917 postMallocgcDebug(x, elemsize, typ)
6918 }
6919 return x
6920 }
6921
6922 }
6923
6924 var nextFreeFastResult gclinkptr
6925 if span.allocCache != 0 {
6926 theBit := sys.TrailingZeros64(span.allocCache)
6927 result := span.freeindex + uint16(theBit)
6928 if result < span.nelems {
6929 freeidx := result + 1
6930 if !(freeidx%64 == 0 && freeidx != span.nelems) {
6931 span.allocCache >>= uint(theBit + 1)
6932 span.freeindex = freeidx
6933 span.allocCount++
6934 nextFreeFastResult = gclinkptr(uintptr(result)*
6935 24 +
6936 span.base())
6937 }
6938 }
6939 }
6940 v := nextFreeFastResult
6941 if v == 0 {
6942 v, span, checkGCTrigger = c.nextFree(spc)
6943 }
6944 x := unsafe.Pointer(v)
6945 if needzero && span.needzero != 0 {
6946 memclrNoHeapPointers(x, elemsize)
6947 }
6948
6949 publicationBarrier()
6950
6951 if writeBarrier.enabled {
6952
6953 gcmarknewobject(span, uintptr(x))
6954 } else {
6955
6956 span.freeIndexForScan = span.freeindex
6957 }
6958
6959 c.nextSample -= int64(elemsize)
6960 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
6961 profilealloc(mp, x, elemsize)
6962 }
6963 mp.mallocing = 0
6964 releasem(mp)
6965
6966 if checkGCTrigger {
6967 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
6968 gcStart(t)
6969 }
6970 }
6971 gp := getg()
6972 if goexperiment.RuntimeSecret && gp.secret > 0 {
6973
6974 addSecret(x, size)
6975 }
6976
6977 if valgrindenabled {
6978 valgrindMalloc(x, size)
6979 }
6980
6981 if gcBlackenEnabled != 0 && elemsize != 0 {
6982 if assistG := getg().m.curg; assistG != nil {
6983 assistG.gcAssistBytes -= int64(elemsize - size)
6984 }
6985 }
6986
6987 if debug.malloc {
6988 postMallocgcDebug(x, elemsize, typ)
6989 }
6990 return x
6991 }
6992
6993 func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
6994
6995 if doubleCheckMalloc {
6996 if gcphase == _GCmarktermination {
6997 throw("mallocgc called with gcphase == _GCmarktermination")
6998 }
6999 }
7000
7001 lockRankMayQueueFinalizer()
7002
7003 if debug.malloc {
7004 if x := preMallocgcDebug(size, typ); x != nil {
7005 return x
7006 }
7007 }
7008
7009 if gcBlackenEnabled != 0 {
7010 deductAssistCredit(size)
7011 }
7012
7013 const sizeclass = 4
7014
7015 const elemsize = 32
7016
7017 mp := acquirem()
7018 if doubleCheckMalloc {
7019 doubleCheckSmallNoScan(typ, mp)
7020 }
7021 mp.mallocing = 1
7022
7023 checkGCTrigger := false
7024 c := getMCache(mp)
7025 const spc = spanClass(sizeclass<<1) | spanClass(1)
7026 span := c.alloc[spc]
7027
7028 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
7029
7030 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
7031 mp.mallocing = 0
7032 releasem(mp)
7033 x := v
7034 {
7035
7036 gp := getg()
7037 if goexperiment.RuntimeSecret && gp.secret > 0 {
7038
7039 addSecret(x, size)
7040 }
7041
7042 if valgrindenabled {
7043 valgrindMalloc(x, size)
7044 }
7045
7046 if gcBlackenEnabled != 0 && elemsize != 0 {
7047 if assistG := getg().m.curg; assistG != nil {
7048 assistG.gcAssistBytes -= int64(elemsize - size)
7049 }
7050 }
7051
7052 if debug.malloc {
7053 postMallocgcDebug(x, elemsize, typ)
7054 }
7055 return x
7056 }
7057
7058 }
7059
7060 var nextFreeFastResult gclinkptr
7061 if span.allocCache != 0 {
7062 theBit := sys.TrailingZeros64(span.allocCache)
7063 result := span.freeindex + uint16(theBit)
7064 if result < span.nelems {
7065 freeidx := result + 1
7066 if !(freeidx%64 == 0 && freeidx != span.nelems) {
7067 span.allocCache >>= uint(theBit + 1)
7068 span.freeindex = freeidx
7069 span.allocCount++
7070 nextFreeFastResult = gclinkptr(uintptr(result)*
7071 32 +
7072 span.base())
7073 }
7074 }
7075 }
7076 v := nextFreeFastResult
7077 if v == 0 {
7078 v, span, checkGCTrigger = c.nextFree(spc)
7079 }
7080 x := unsafe.Pointer(v)
7081 if needzero && span.needzero != 0 {
7082 memclrNoHeapPointers(x, elemsize)
7083 }
7084
7085 publicationBarrier()
7086
7087 if writeBarrier.enabled {
7088
7089 gcmarknewobject(span, uintptr(x))
7090 } else {
7091
7092 span.freeIndexForScan = span.freeindex
7093 }
7094
7095 c.nextSample -= int64(elemsize)
7096 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
7097 profilealloc(mp, x, elemsize)
7098 }
7099 mp.mallocing = 0
7100 releasem(mp)
7101
7102 if checkGCTrigger {
7103 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
7104 gcStart(t)
7105 }
7106 }
7107 gp := getg()
7108 if goexperiment.RuntimeSecret && gp.secret > 0 {
7109
7110 addSecret(x, size)
7111 }
7112
7113 if valgrindenabled {
7114 valgrindMalloc(x, size)
7115 }
7116
7117 if gcBlackenEnabled != 0 && elemsize != 0 {
7118 if assistG := getg().m.curg; assistG != nil {
7119 assistG.gcAssistBytes -= int64(elemsize - size)
7120 }
7121 }
7122
7123 if debug.malloc {
7124 postMallocgcDebug(x, elemsize, typ)
7125 }
7126 return x
7127 }
7128
7129 func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
7130
7131 if doubleCheckMalloc {
7132 if gcphase == _GCmarktermination {
7133 throw("mallocgc called with gcphase == _GCmarktermination")
7134 }
7135 }
7136
7137 lockRankMayQueueFinalizer()
7138
7139 if debug.malloc {
7140 if x := preMallocgcDebug(size, typ); x != nil {
7141 return x
7142 }
7143 }
7144
7145 if gcBlackenEnabled != 0 {
7146 deductAssistCredit(size)
7147 }
7148
7149 const sizeclass = 5
7150
7151 const elemsize = 48
7152
7153 mp := acquirem()
7154 if doubleCheckMalloc {
7155 doubleCheckSmallNoScan(typ, mp)
7156 }
7157 mp.mallocing = 1
7158
7159 checkGCTrigger := false
7160 c := getMCache(mp)
7161 const spc = spanClass(sizeclass<<1) | spanClass(1)
7162 span := c.alloc[spc]
7163
7164 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
7165
7166 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
7167 mp.mallocing = 0
7168 releasem(mp)
7169 x := v
7170 {
7171
7172 gp := getg()
7173 if goexperiment.RuntimeSecret && gp.secret > 0 {
7174
7175 addSecret(x, size)
7176 }
7177
7178 if valgrindenabled {
7179 valgrindMalloc(x, size)
7180 }
7181
7182 if gcBlackenEnabled != 0 && elemsize != 0 {
7183 if assistG := getg().m.curg; assistG != nil {
7184 assistG.gcAssistBytes -= int64(elemsize - size)
7185 }
7186 }
7187
7188 if debug.malloc {
7189 postMallocgcDebug(x, elemsize, typ)
7190 }
7191 return x
7192 }
7193
7194 }
7195
7196 var nextFreeFastResult gclinkptr
7197 if span.allocCache != 0 {
7198 theBit := sys.TrailingZeros64(span.allocCache)
7199 result := span.freeindex + uint16(theBit)
7200 if result < span.nelems {
7201 freeidx := result + 1
7202 if !(freeidx%64 == 0 && freeidx != span.nelems) {
7203 span.allocCache >>= uint(theBit + 1)
7204 span.freeindex = freeidx
7205 span.allocCount++
7206 nextFreeFastResult = gclinkptr(uintptr(result)*
7207 48 +
7208 span.base())
7209 }
7210 }
7211 }
7212 v := nextFreeFastResult
7213 if v == 0 {
7214 v, span, checkGCTrigger = c.nextFree(spc)
7215 }
7216 x := unsafe.Pointer(v)
7217 if needzero && span.needzero != 0 {
7218 memclrNoHeapPointers(x, elemsize)
7219 }
7220
7221 publicationBarrier()
7222
7223 if writeBarrier.enabled {
7224
7225 gcmarknewobject(span, uintptr(x))
7226 } else {
7227
7228 span.freeIndexForScan = span.freeindex
7229 }
7230
7231 c.nextSample -= int64(elemsize)
7232 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
7233 profilealloc(mp, x, elemsize)
7234 }
7235 mp.mallocing = 0
7236 releasem(mp)
7237
7238 if checkGCTrigger {
7239 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
7240 gcStart(t)
7241 }
7242 }
7243 gp := getg()
7244 if goexperiment.RuntimeSecret && gp.secret > 0 {
7245
7246 addSecret(x, size)
7247 }
7248
7249 if valgrindenabled {
7250 valgrindMalloc(x, size)
7251 }
7252
7253 if gcBlackenEnabled != 0 && elemsize != 0 {
7254 if assistG := getg().m.curg; assistG != nil {
7255 assistG.gcAssistBytes -= int64(elemsize - size)
7256 }
7257 }
7258
7259 if debug.malloc {
7260 postMallocgcDebug(x, elemsize, typ)
7261 }
7262 return x
7263 }
7264
7265 func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
7266
7267 if doubleCheckMalloc {
7268 if gcphase == _GCmarktermination {
7269 throw("mallocgc called with gcphase == _GCmarktermination")
7270 }
7271 }
7272
7273 lockRankMayQueueFinalizer()
7274
7275 if debug.malloc {
7276 if x := preMallocgcDebug(size, typ); x != nil {
7277 return x
7278 }
7279 }
7280
7281 if gcBlackenEnabled != 0 {
7282 deductAssistCredit(size)
7283 }
7284
7285 const sizeclass = 6
7286
7287 const elemsize = 64
7288
7289 mp := acquirem()
7290 if doubleCheckMalloc {
7291 doubleCheckSmallNoScan(typ, mp)
7292 }
7293 mp.mallocing = 1
7294
7295 checkGCTrigger := false
7296 c := getMCache(mp)
7297 const spc = spanClass(sizeclass<<1) | spanClass(1)
7298 span := c.alloc[spc]
7299
7300 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
7301
7302 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
7303 mp.mallocing = 0
7304 releasem(mp)
7305 x := v
7306 {
7307
7308 gp := getg()
7309 if goexperiment.RuntimeSecret && gp.secret > 0 {
7310
7311 addSecret(x, size)
7312 }
7313
7314 if valgrindenabled {
7315 valgrindMalloc(x, size)
7316 }
7317
7318 if gcBlackenEnabled != 0 && elemsize != 0 {
7319 if assistG := getg().m.curg; assistG != nil {
7320 assistG.gcAssistBytes -= int64(elemsize - size)
7321 }
7322 }
7323
7324 if debug.malloc {
7325 postMallocgcDebug(x, elemsize, typ)
7326 }
7327 return x
7328 }
7329
7330 }
7331
7332 var nextFreeFastResult gclinkptr
7333 if span.allocCache != 0 {
7334 theBit := sys.TrailingZeros64(span.allocCache)
7335 result := span.freeindex + uint16(theBit)
7336 if result < span.nelems {
7337 freeidx := result + 1
7338 if !(freeidx%64 == 0 && freeidx != span.nelems) {
7339 span.allocCache >>= uint(theBit + 1)
7340 span.freeindex = freeidx
7341 span.allocCount++
7342 nextFreeFastResult = gclinkptr(uintptr(result)*
7343 64 +
7344 span.base())
7345 }
7346 }
7347 }
7348 v := nextFreeFastResult
7349 if v == 0 {
7350 v, span, checkGCTrigger = c.nextFree(spc)
7351 }
7352 x := unsafe.Pointer(v)
7353 if needzero && span.needzero != 0 {
7354 memclrNoHeapPointers(x, elemsize)
7355 }
7356
7357 publicationBarrier()
7358
7359 if writeBarrier.enabled {
7360
7361 gcmarknewobject(span, uintptr(x))
7362 } else {
7363
7364 span.freeIndexForScan = span.freeindex
7365 }
7366
7367 c.nextSample -= int64(elemsize)
7368 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
7369 profilealloc(mp, x, elemsize)
7370 }
7371 mp.mallocing = 0
7372 releasem(mp)
7373
7374 if checkGCTrigger {
7375 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
7376 gcStart(t)
7377 }
7378 }
7379 gp := getg()
7380 if goexperiment.RuntimeSecret && gp.secret > 0 {
7381
7382 addSecret(x, size)
7383 }
7384
7385 if valgrindenabled {
7386 valgrindMalloc(x, size)
7387 }
7388
7389 if gcBlackenEnabled != 0 && elemsize != 0 {
7390 if assistG := getg().m.curg; assistG != nil {
7391 assistG.gcAssistBytes -= int64(elemsize - size)
7392 }
7393 }
7394
7395 if debug.malloc {
7396 postMallocgcDebug(x, elemsize, typ)
7397 }
7398 return x
7399 }
7400
7401 func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
7402
7403 if doubleCheckMalloc {
7404 if gcphase == _GCmarktermination {
7405 throw("mallocgc called with gcphase == _GCmarktermination")
7406 }
7407 }
7408
7409 lockRankMayQueueFinalizer()
7410
7411 if debug.malloc {
7412 if x := preMallocgcDebug(size, typ); x != nil {
7413 return x
7414 }
7415 }
7416
7417 if gcBlackenEnabled != 0 {
7418 deductAssistCredit(size)
7419 }
7420
7421 const sizeclass = 7
7422
7423 const elemsize = 80
7424
7425 mp := acquirem()
7426 if doubleCheckMalloc {
7427 doubleCheckSmallNoScan(typ, mp)
7428 }
7429 mp.mallocing = 1
7430
7431 checkGCTrigger := false
7432 c := getMCache(mp)
7433 const spc = spanClass(sizeclass<<1) | spanClass(1)
7434 span := c.alloc[spc]
7435
7436 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
7437
7438 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
7439 mp.mallocing = 0
7440 releasem(mp)
7441 x := v
7442 {
7443
7444 gp := getg()
7445 if goexperiment.RuntimeSecret && gp.secret > 0 {
7446
7447 addSecret(x, size)
7448 }
7449
7450 if valgrindenabled {
7451 valgrindMalloc(x, size)
7452 }
7453
7454 if gcBlackenEnabled != 0 && elemsize != 0 {
7455 if assistG := getg().m.curg; assistG != nil {
7456 assistG.gcAssistBytes -= int64(elemsize - size)
7457 }
7458 }
7459
7460 if debug.malloc {
7461 postMallocgcDebug(x, elemsize, typ)
7462 }
7463 return x
7464 }
7465
7466 }
7467
7468 var nextFreeFastResult gclinkptr
7469 if span.allocCache != 0 {
7470 theBit := sys.TrailingZeros64(span.allocCache)
7471 result := span.freeindex + uint16(theBit)
7472 if result < span.nelems {
7473 freeidx := result + 1
7474 if !(freeidx%64 == 0 && freeidx != span.nelems) {
7475 span.allocCache >>= uint(theBit + 1)
7476 span.freeindex = freeidx
7477 span.allocCount++
7478 nextFreeFastResult = gclinkptr(uintptr(result)*
7479 80 +
7480 span.base())
7481 }
7482 }
7483 }
7484 v := nextFreeFastResult
7485 if v == 0 {
7486 v, span, checkGCTrigger = c.nextFree(spc)
7487 }
7488 x := unsafe.Pointer(v)
7489 if needzero && span.needzero != 0 {
7490 memclrNoHeapPointers(x, elemsize)
7491 }
7492
7493 publicationBarrier()
7494
7495 if writeBarrier.enabled {
7496
7497 gcmarknewobject(span, uintptr(x))
7498 } else {
7499
7500 span.freeIndexForScan = span.freeindex
7501 }
7502
7503 c.nextSample -= int64(elemsize)
7504 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
7505 profilealloc(mp, x, elemsize)
7506 }
7507 mp.mallocing = 0
7508 releasem(mp)
7509
7510 if checkGCTrigger {
7511 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
7512 gcStart(t)
7513 }
7514 }
7515 gp := getg()
7516 if goexperiment.RuntimeSecret && gp.secret > 0 {
7517
7518 addSecret(x, size)
7519 }
7520
7521 if valgrindenabled {
7522 valgrindMalloc(x, size)
7523 }
7524
7525 if gcBlackenEnabled != 0 && elemsize != 0 {
7526 if assistG := getg().m.curg; assistG != nil {
7527 assistG.gcAssistBytes -= int64(elemsize - size)
7528 }
7529 }
7530
7531 if debug.malloc {
7532 postMallocgcDebug(x, elemsize, typ)
7533 }
7534 return x
7535 }
7536
7537 func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
7538
7539 if doubleCheckMalloc {
7540 if gcphase == _GCmarktermination {
7541 throw("mallocgc called with gcphase == _GCmarktermination")
7542 }
7543 }
7544
7545 lockRankMayQueueFinalizer()
7546
7547 if debug.malloc {
7548 if x := preMallocgcDebug(size, typ); x != nil {
7549 return x
7550 }
7551 }
7552
7553 if gcBlackenEnabled != 0 {
7554 deductAssistCredit(size)
7555 }
7556
7557 const sizeclass = 8
7558
7559 const elemsize = 96
7560
7561 mp := acquirem()
7562 if doubleCheckMalloc {
7563 doubleCheckSmallNoScan(typ, mp)
7564 }
7565 mp.mallocing = 1
7566
7567 checkGCTrigger := false
7568 c := getMCache(mp)
7569 const spc = spanClass(sizeclass<<1) | spanClass(1)
7570 span := c.alloc[spc]
7571
7572 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
7573
7574 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
7575 mp.mallocing = 0
7576 releasem(mp)
7577 x := v
7578 {
7579
7580 gp := getg()
7581 if goexperiment.RuntimeSecret && gp.secret > 0 {
7582
7583 addSecret(x, size)
7584 }
7585
7586 if valgrindenabled {
7587 valgrindMalloc(x, size)
7588 }
7589
7590 if gcBlackenEnabled != 0 && elemsize != 0 {
7591 if assistG := getg().m.curg; assistG != nil {
7592 assistG.gcAssistBytes -= int64(elemsize - size)
7593 }
7594 }
7595
7596 if debug.malloc {
7597 postMallocgcDebug(x, elemsize, typ)
7598 }
7599 return x
7600 }
7601
7602 }
7603
7604 var nextFreeFastResult gclinkptr
7605 if span.allocCache != 0 {
7606 theBit := sys.TrailingZeros64(span.allocCache)
7607 result := span.freeindex + uint16(theBit)
7608 if result < span.nelems {
7609 freeidx := result + 1
7610 if !(freeidx%64 == 0 && freeidx != span.nelems) {
7611 span.allocCache >>= uint(theBit + 1)
7612 span.freeindex = freeidx
7613 span.allocCount++
7614 nextFreeFastResult = gclinkptr(uintptr(result)*
7615 96 +
7616 span.base())
7617 }
7618 }
7619 }
7620 v := nextFreeFastResult
7621 if v == 0 {
7622 v, span, checkGCTrigger = c.nextFree(spc)
7623 }
7624 x := unsafe.Pointer(v)
7625 if needzero && span.needzero != 0 {
7626 memclrNoHeapPointers(x, elemsize)
7627 }
7628
7629 publicationBarrier()
7630
7631 if writeBarrier.enabled {
7632
7633 gcmarknewobject(span, uintptr(x))
7634 } else {
7635
7636 span.freeIndexForScan = span.freeindex
7637 }
7638
7639 c.nextSample -= int64(elemsize)
7640 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
7641 profilealloc(mp, x, elemsize)
7642 }
7643 mp.mallocing = 0
7644 releasem(mp)
7645
7646 if checkGCTrigger {
7647 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
7648 gcStart(t)
7649 }
7650 }
7651 gp := getg()
7652 if goexperiment.RuntimeSecret && gp.secret > 0 {
7653
7654 addSecret(x, size)
7655 }
7656
7657 if valgrindenabled {
7658 valgrindMalloc(x, size)
7659 }
7660
7661 if gcBlackenEnabled != 0 && elemsize != 0 {
7662 if assistG := getg().m.curg; assistG != nil {
7663 assistG.gcAssistBytes -= int64(elemsize - size)
7664 }
7665 }
7666
7667 if debug.malloc {
7668 postMallocgcDebug(x, elemsize, typ)
7669 }
7670 return x
7671 }
7672
7673 func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
7674
7675 if doubleCheckMalloc {
7676 if gcphase == _GCmarktermination {
7677 throw("mallocgc called with gcphase == _GCmarktermination")
7678 }
7679 }
7680
7681 lockRankMayQueueFinalizer()
7682
7683 if debug.malloc {
7684 if x := preMallocgcDebug(size, typ); x != nil {
7685 return x
7686 }
7687 }
7688
7689 if gcBlackenEnabled != 0 {
7690 deductAssistCredit(size)
7691 }
7692
7693 const sizeclass = 9
7694
7695 const elemsize = 112
7696
7697 mp := acquirem()
7698 if doubleCheckMalloc {
7699 doubleCheckSmallNoScan(typ, mp)
7700 }
7701 mp.mallocing = 1
7702
7703 checkGCTrigger := false
7704 c := getMCache(mp)
7705 const spc = spanClass(sizeclass<<1) | spanClass(1)
7706 span := c.alloc[spc]
7707
7708 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
7709
7710 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
7711 mp.mallocing = 0
7712 releasem(mp)
7713 x := v
7714 {
7715
7716 gp := getg()
7717 if goexperiment.RuntimeSecret && gp.secret > 0 {
7718
7719 addSecret(x, size)
7720 }
7721
7722 if valgrindenabled {
7723 valgrindMalloc(x, size)
7724 }
7725
7726 if gcBlackenEnabled != 0 && elemsize != 0 {
7727 if assistG := getg().m.curg; assistG != nil {
7728 assistG.gcAssistBytes -= int64(elemsize - size)
7729 }
7730 }
7731
7732 if debug.malloc {
7733 postMallocgcDebug(x, elemsize, typ)
7734 }
7735 return x
7736 }
7737
7738 }
7739
7740 var nextFreeFastResult gclinkptr
7741 if span.allocCache != 0 {
7742 theBit := sys.TrailingZeros64(span.allocCache)
7743 result := span.freeindex + uint16(theBit)
7744 if result < span.nelems {
7745 freeidx := result + 1
7746 if !(freeidx%64 == 0 && freeidx != span.nelems) {
7747 span.allocCache >>= uint(theBit + 1)
7748 span.freeindex = freeidx
7749 span.allocCount++
7750 nextFreeFastResult = gclinkptr(uintptr(result)*
7751 112 +
7752 span.base())
7753 }
7754 }
7755 }
7756 v := nextFreeFastResult
7757 if v == 0 {
7758 v, span, checkGCTrigger = c.nextFree(spc)
7759 }
7760 x := unsafe.Pointer(v)
7761 if needzero && span.needzero != 0 {
7762 memclrNoHeapPointers(x, elemsize)
7763 }
7764
7765 publicationBarrier()
7766
7767 if writeBarrier.enabled {
7768
7769 gcmarknewobject(span, uintptr(x))
7770 } else {
7771
7772 span.freeIndexForScan = span.freeindex
7773 }
7774
7775 c.nextSample -= int64(elemsize)
7776 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
7777 profilealloc(mp, x, elemsize)
7778 }
7779 mp.mallocing = 0
7780 releasem(mp)
7781
7782 if checkGCTrigger {
7783 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
7784 gcStart(t)
7785 }
7786 }
7787 gp := getg()
7788 if goexperiment.RuntimeSecret && gp.secret > 0 {
7789
7790 addSecret(x, size)
7791 }
7792
7793 if valgrindenabled {
7794 valgrindMalloc(x, size)
7795 }
7796
7797 if gcBlackenEnabled != 0 && elemsize != 0 {
7798 if assistG := getg().m.curg; assistG != nil {
7799 assistG.gcAssistBytes -= int64(elemsize - size)
7800 }
7801 }
7802
7803 if debug.malloc {
7804 postMallocgcDebug(x, elemsize, typ)
7805 }
7806 return x
7807 }
7808
7809 func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
7810
7811 if doubleCheckMalloc {
7812 if gcphase == _GCmarktermination {
7813 throw("mallocgc called with gcphase == _GCmarktermination")
7814 }
7815 }
7816
7817 lockRankMayQueueFinalizer()
7818
7819 if debug.malloc {
7820 if x := preMallocgcDebug(size, typ); x != nil {
7821 return x
7822 }
7823 }
7824
7825 if gcBlackenEnabled != 0 {
7826 deductAssistCredit(size)
7827 }
7828
7829 const sizeclass = 10
7830
7831 const elemsize = 128
7832
7833 mp := acquirem()
7834 if doubleCheckMalloc {
7835 doubleCheckSmallNoScan(typ, mp)
7836 }
7837 mp.mallocing = 1
7838
7839 checkGCTrigger := false
7840 c := getMCache(mp)
7841 const spc = spanClass(sizeclass<<1) | spanClass(1)
7842 span := c.alloc[spc]
7843
7844 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
7845
7846 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
7847 mp.mallocing = 0
7848 releasem(mp)
7849 x := v
7850 {
7851
7852 gp := getg()
7853 if goexperiment.RuntimeSecret && gp.secret > 0 {
7854
7855 addSecret(x, size)
7856 }
7857
7858 if valgrindenabled {
7859 valgrindMalloc(x, size)
7860 }
7861
7862 if gcBlackenEnabled != 0 && elemsize != 0 {
7863 if assistG := getg().m.curg; assistG != nil {
7864 assistG.gcAssistBytes -= int64(elemsize - size)
7865 }
7866 }
7867
7868 if debug.malloc {
7869 postMallocgcDebug(x, elemsize, typ)
7870 }
7871 return x
7872 }
7873
7874 }
7875
7876 var nextFreeFastResult gclinkptr
7877 if span.allocCache != 0 {
7878 theBit := sys.TrailingZeros64(span.allocCache)
7879 result := span.freeindex + uint16(theBit)
7880 if result < span.nelems {
7881 freeidx := result + 1
7882 if !(freeidx%64 == 0 && freeidx != span.nelems) {
7883 span.allocCache >>= uint(theBit + 1)
7884 span.freeindex = freeidx
7885 span.allocCount++
7886 nextFreeFastResult = gclinkptr(uintptr(result)*
7887 128 +
7888 span.base())
7889 }
7890 }
7891 }
7892 v := nextFreeFastResult
7893 if v == 0 {
7894 v, span, checkGCTrigger = c.nextFree(spc)
7895 }
7896 x := unsafe.Pointer(v)
7897 if needzero && span.needzero != 0 {
7898 memclrNoHeapPointers(x, elemsize)
7899 }
7900
7901 publicationBarrier()
7902
7903 if writeBarrier.enabled {
7904
7905 gcmarknewobject(span, uintptr(x))
7906 } else {
7907
7908 span.freeIndexForScan = span.freeindex
7909 }
7910
7911 c.nextSample -= int64(elemsize)
7912 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
7913 profilealloc(mp, x, elemsize)
7914 }
7915 mp.mallocing = 0
7916 releasem(mp)
7917
7918 if checkGCTrigger {
7919 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
7920 gcStart(t)
7921 }
7922 }
7923 gp := getg()
7924 if goexperiment.RuntimeSecret && gp.secret > 0 {
7925
7926 addSecret(x, size)
7927 }
7928
7929 if valgrindenabled {
7930 valgrindMalloc(x, size)
7931 }
7932
7933 if gcBlackenEnabled != 0 && elemsize != 0 {
7934 if assistG := getg().m.curg; assistG != nil {
7935 assistG.gcAssistBytes -= int64(elemsize - size)
7936 }
7937 }
7938
7939 if debug.malloc {
7940 postMallocgcDebug(x, elemsize, typ)
7941 }
7942 return x
7943 }
7944
7945 func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
7946
7947 if doubleCheckMalloc {
7948 if gcphase == _GCmarktermination {
7949 throw("mallocgc called with gcphase == _GCmarktermination")
7950 }
7951 }
7952
7953 lockRankMayQueueFinalizer()
7954
7955 if debug.malloc {
7956 if x := preMallocgcDebug(size, typ); x != nil {
7957 return x
7958 }
7959 }
7960
7961 if gcBlackenEnabled != 0 {
7962 deductAssistCredit(size)
7963 }
7964
7965 const sizeclass = 11
7966
7967 const elemsize = 144
7968
7969 mp := acquirem()
7970 if doubleCheckMalloc {
7971 doubleCheckSmallNoScan(typ, mp)
7972 }
7973 mp.mallocing = 1
7974
7975 checkGCTrigger := false
7976 c := getMCache(mp)
7977 const spc = spanClass(sizeclass<<1) | spanClass(1)
7978 span := c.alloc[spc]
7979
7980 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
7981
7982 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
7983 mp.mallocing = 0
7984 releasem(mp)
7985 x := v
7986 {
7987
7988 gp := getg()
7989 if goexperiment.RuntimeSecret && gp.secret > 0 {
7990
7991 addSecret(x, size)
7992 }
7993
7994 if valgrindenabled {
7995 valgrindMalloc(x, size)
7996 }
7997
7998 if gcBlackenEnabled != 0 && elemsize != 0 {
7999 if assistG := getg().m.curg; assistG != nil {
8000 assistG.gcAssistBytes -= int64(elemsize - size)
8001 }
8002 }
8003
8004 if debug.malloc {
8005 postMallocgcDebug(x, elemsize, typ)
8006 }
8007 return x
8008 }
8009
8010 }
8011
8012 var nextFreeFastResult gclinkptr
8013 if span.allocCache != 0 {
8014 theBit := sys.TrailingZeros64(span.allocCache)
8015 result := span.freeindex + uint16(theBit)
8016 if result < span.nelems {
8017 freeidx := result + 1
8018 if !(freeidx%64 == 0 && freeidx != span.nelems) {
8019 span.allocCache >>= uint(theBit + 1)
8020 span.freeindex = freeidx
8021 span.allocCount++
8022 nextFreeFastResult = gclinkptr(uintptr(result)*
8023 144 +
8024 span.base())
8025 }
8026 }
8027 }
8028 v := nextFreeFastResult
8029 if v == 0 {
8030 v, span, checkGCTrigger = c.nextFree(spc)
8031 }
8032 x := unsafe.Pointer(v)
8033 if needzero && span.needzero != 0 {
8034 memclrNoHeapPointers(x, elemsize)
8035 }
8036
8037 publicationBarrier()
8038
8039 if writeBarrier.enabled {
8040
8041 gcmarknewobject(span, uintptr(x))
8042 } else {
8043
8044 span.freeIndexForScan = span.freeindex
8045 }
8046
8047 c.nextSample -= int64(elemsize)
8048 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
8049 profilealloc(mp, x, elemsize)
8050 }
8051 mp.mallocing = 0
8052 releasem(mp)
8053
8054 if checkGCTrigger {
8055 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
8056 gcStart(t)
8057 }
8058 }
8059 gp := getg()
8060 if goexperiment.RuntimeSecret && gp.secret > 0 {
8061
8062 addSecret(x, size)
8063 }
8064
8065 if valgrindenabled {
8066 valgrindMalloc(x, size)
8067 }
8068
8069 if gcBlackenEnabled != 0 && elemsize != 0 {
8070 if assistG := getg().m.curg; assistG != nil {
8071 assistG.gcAssistBytes -= int64(elemsize - size)
8072 }
8073 }
8074
8075 if debug.malloc {
8076 postMallocgcDebug(x, elemsize, typ)
8077 }
8078 return x
8079 }
8080
8081 func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
8082
8083 if doubleCheckMalloc {
8084 if gcphase == _GCmarktermination {
8085 throw("mallocgc called with gcphase == _GCmarktermination")
8086 }
8087 }
8088
8089 lockRankMayQueueFinalizer()
8090
8091 if debug.malloc {
8092 if x := preMallocgcDebug(size, typ); x != nil {
8093 return x
8094 }
8095 }
8096
8097 if gcBlackenEnabled != 0 {
8098 deductAssistCredit(size)
8099 }
8100
8101 const sizeclass = 12
8102
8103 const elemsize = 160
8104
8105 mp := acquirem()
8106 if doubleCheckMalloc {
8107 doubleCheckSmallNoScan(typ, mp)
8108 }
8109 mp.mallocing = 1
8110
8111 checkGCTrigger := false
8112 c := getMCache(mp)
8113 const spc = spanClass(sizeclass<<1) | spanClass(1)
8114 span := c.alloc[spc]
8115
8116 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
8117
8118 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
8119 mp.mallocing = 0
8120 releasem(mp)
8121 x := v
8122 {
8123
8124 gp := getg()
8125 if goexperiment.RuntimeSecret && gp.secret > 0 {
8126
8127 addSecret(x, size)
8128 }
8129
8130 if valgrindenabled {
8131 valgrindMalloc(x, size)
8132 }
8133
8134 if gcBlackenEnabled != 0 && elemsize != 0 {
8135 if assistG := getg().m.curg; assistG != nil {
8136 assistG.gcAssistBytes -= int64(elemsize - size)
8137 }
8138 }
8139
8140 if debug.malloc {
8141 postMallocgcDebug(x, elemsize, typ)
8142 }
8143 return x
8144 }
8145
8146 }
8147
8148 var nextFreeFastResult gclinkptr
8149 if span.allocCache != 0 {
8150 theBit := sys.TrailingZeros64(span.allocCache)
8151 result := span.freeindex + uint16(theBit)
8152 if result < span.nelems {
8153 freeidx := result + 1
8154 if !(freeidx%64 == 0 && freeidx != span.nelems) {
8155 span.allocCache >>= uint(theBit + 1)
8156 span.freeindex = freeidx
8157 span.allocCount++
8158 nextFreeFastResult = gclinkptr(uintptr(result)*
8159 160 +
8160 span.base())
8161 }
8162 }
8163 }
8164 v := nextFreeFastResult
8165 if v == 0 {
8166 v, span, checkGCTrigger = c.nextFree(spc)
8167 }
8168 x := unsafe.Pointer(v)
8169 if needzero && span.needzero != 0 {
8170 memclrNoHeapPointers(x, elemsize)
8171 }
8172
8173 publicationBarrier()
8174
8175 if writeBarrier.enabled {
8176
8177 gcmarknewobject(span, uintptr(x))
8178 } else {
8179
8180 span.freeIndexForScan = span.freeindex
8181 }
8182
8183 c.nextSample -= int64(elemsize)
8184 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
8185 profilealloc(mp, x, elemsize)
8186 }
8187 mp.mallocing = 0
8188 releasem(mp)
8189
8190 if checkGCTrigger {
8191 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
8192 gcStart(t)
8193 }
8194 }
8195 gp := getg()
8196 if goexperiment.RuntimeSecret && gp.secret > 0 {
8197
8198 addSecret(x, size)
8199 }
8200
8201 if valgrindenabled {
8202 valgrindMalloc(x, size)
8203 }
8204
8205 if gcBlackenEnabled != 0 && elemsize != 0 {
8206 if assistG := getg().m.curg; assistG != nil {
8207 assistG.gcAssistBytes -= int64(elemsize - size)
8208 }
8209 }
8210
8211 if debug.malloc {
8212 postMallocgcDebug(x, elemsize, typ)
8213 }
8214 return x
8215 }
8216
8217 func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
8218
8219 if doubleCheckMalloc {
8220 if gcphase == _GCmarktermination {
8221 throw("mallocgc called with gcphase == _GCmarktermination")
8222 }
8223 }
8224
8225 lockRankMayQueueFinalizer()
8226
8227 if debug.malloc {
8228 if x := preMallocgcDebug(size, typ); x != nil {
8229 return x
8230 }
8231 }
8232
8233 if gcBlackenEnabled != 0 {
8234 deductAssistCredit(size)
8235 }
8236
8237 const sizeclass = 13
8238
8239 const elemsize = 176
8240
8241 mp := acquirem()
8242 if doubleCheckMalloc {
8243 doubleCheckSmallNoScan(typ, mp)
8244 }
8245 mp.mallocing = 1
8246
8247 checkGCTrigger := false
8248 c := getMCache(mp)
8249 const spc = spanClass(sizeclass<<1) | spanClass(1)
8250 span := c.alloc[spc]
8251
8252 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
8253
8254 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
8255 mp.mallocing = 0
8256 releasem(mp)
8257 x := v
8258 {
8259
8260 gp := getg()
8261 if goexperiment.RuntimeSecret && gp.secret > 0 {
8262
8263 addSecret(x, size)
8264 }
8265
8266 if valgrindenabled {
8267 valgrindMalloc(x, size)
8268 }
8269
8270 if gcBlackenEnabled != 0 && elemsize != 0 {
8271 if assistG := getg().m.curg; assistG != nil {
8272 assistG.gcAssistBytes -= int64(elemsize - size)
8273 }
8274 }
8275
8276 if debug.malloc {
8277 postMallocgcDebug(x, elemsize, typ)
8278 }
8279 return x
8280 }
8281
8282 }
8283
8284 var nextFreeFastResult gclinkptr
8285 if span.allocCache != 0 {
8286 theBit := sys.TrailingZeros64(span.allocCache)
8287 result := span.freeindex + uint16(theBit)
8288 if result < span.nelems {
8289 freeidx := result + 1
8290 if !(freeidx%64 == 0 && freeidx != span.nelems) {
8291 span.allocCache >>= uint(theBit + 1)
8292 span.freeindex = freeidx
8293 span.allocCount++
8294 nextFreeFastResult = gclinkptr(uintptr(result)*
8295 176 +
8296 span.base())
8297 }
8298 }
8299 }
8300 v := nextFreeFastResult
8301 if v == 0 {
8302 v, span, checkGCTrigger = c.nextFree(spc)
8303 }
8304 x := unsafe.Pointer(v)
8305 if needzero && span.needzero != 0 {
8306 memclrNoHeapPointers(x, elemsize)
8307 }
8308
8309 publicationBarrier()
8310
8311 if writeBarrier.enabled {
8312
8313 gcmarknewobject(span, uintptr(x))
8314 } else {
8315
8316 span.freeIndexForScan = span.freeindex
8317 }
8318
8319 c.nextSample -= int64(elemsize)
8320 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
8321 profilealloc(mp, x, elemsize)
8322 }
8323 mp.mallocing = 0
8324 releasem(mp)
8325
8326 if checkGCTrigger {
8327 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
8328 gcStart(t)
8329 }
8330 }
8331 gp := getg()
8332 if goexperiment.RuntimeSecret && gp.secret > 0 {
8333
8334 addSecret(x, size)
8335 }
8336
8337 if valgrindenabled {
8338 valgrindMalloc(x, size)
8339 }
8340
8341 if gcBlackenEnabled != 0 && elemsize != 0 {
8342 if assistG := getg().m.curg; assistG != nil {
8343 assistG.gcAssistBytes -= int64(elemsize - size)
8344 }
8345 }
8346
8347 if debug.malloc {
8348 postMallocgcDebug(x, elemsize, typ)
8349 }
8350 return x
8351 }
8352
8353 func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
8354
8355 if doubleCheckMalloc {
8356 if gcphase == _GCmarktermination {
8357 throw("mallocgc called with gcphase == _GCmarktermination")
8358 }
8359 }
8360
8361 lockRankMayQueueFinalizer()
8362
8363 if debug.malloc {
8364 if x := preMallocgcDebug(size, typ); x != nil {
8365 return x
8366 }
8367 }
8368
8369 if gcBlackenEnabled != 0 {
8370 deductAssistCredit(size)
8371 }
8372
8373 const sizeclass = 14
8374
8375 const elemsize = 192
8376
8377 mp := acquirem()
8378 if doubleCheckMalloc {
8379 doubleCheckSmallNoScan(typ, mp)
8380 }
8381 mp.mallocing = 1
8382
8383 checkGCTrigger := false
8384 c := getMCache(mp)
8385 const spc = spanClass(sizeclass<<1) | spanClass(1)
8386 span := c.alloc[spc]
8387
8388 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
8389
8390 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
8391 mp.mallocing = 0
8392 releasem(mp)
8393 x := v
8394 {
8395
8396 gp := getg()
8397 if goexperiment.RuntimeSecret && gp.secret > 0 {
8398
8399 addSecret(x, size)
8400 }
8401
8402 if valgrindenabled {
8403 valgrindMalloc(x, size)
8404 }
8405
8406 if gcBlackenEnabled != 0 && elemsize != 0 {
8407 if assistG := getg().m.curg; assistG != nil {
8408 assistG.gcAssistBytes -= int64(elemsize - size)
8409 }
8410 }
8411
8412 if debug.malloc {
8413 postMallocgcDebug(x, elemsize, typ)
8414 }
8415 return x
8416 }
8417
8418 }
8419
8420 var nextFreeFastResult gclinkptr
8421 if span.allocCache != 0 {
8422 theBit := sys.TrailingZeros64(span.allocCache)
8423 result := span.freeindex + uint16(theBit)
8424 if result < span.nelems {
8425 freeidx := result + 1
8426 if !(freeidx%64 == 0 && freeidx != span.nelems) {
8427 span.allocCache >>= uint(theBit + 1)
8428 span.freeindex = freeidx
8429 span.allocCount++
8430 nextFreeFastResult = gclinkptr(uintptr(result)*
8431 192 +
8432 span.base())
8433 }
8434 }
8435 }
8436 v := nextFreeFastResult
8437 if v == 0 {
8438 v, span, checkGCTrigger = c.nextFree(spc)
8439 }
8440 x := unsafe.Pointer(v)
8441 if needzero && span.needzero != 0 {
8442 memclrNoHeapPointers(x, elemsize)
8443 }
8444
8445 publicationBarrier()
8446
8447 if writeBarrier.enabled {
8448
8449 gcmarknewobject(span, uintptr(x))
8450 } else {
8451
8452 span.freeIndexForScan = span.freeindex
8453 }
8454
8455 c.nextSample -= int64(elemsize)
8456 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
8457 profilealloc(mp, x, elemsize)
8458 }
8459 mp.mallocing = 0
8460 releasem(mp)
8461
8462 if checkGCTrigger {
8463 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
8464 gcStart(t)
8465 }
8466 }
8467 gp := getg()
8468 if goexperiment.RuntimeSecret && gp.secret > 0 {
8469
8470 addSecret(x, size)
8471 }
8472
8473 if valgrindenabled {
8474 valgrindMalloc(x, size)
8475 }
8476
8477 if gcBlackenEnabled != 0 && elemsize != 0 {
8478 if assistG := getg().m.curg; assistG != nil {
8479 assistG.gcAssistBytes -= int64(elemsize - size)
8480 }
8481 }
8482
8483 if debug.malloc {
8484 postMallocgcDebug(x, elemsize, typ)
8485 }
8486 return x
8487 }
8488
8489 func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
8490
8491 if doubleCheckMalloc {
8492 if gcphase == _GCmarktermination {
8493 throw("mallocgc called with gcphase == _GCmarktermination")
8494 }
8495 }
8496
8497 lockRankMayQueueFinalizer()
8498
8499 if debug.malloc {
8500 if x := preMallocgcDebug(size, typ); x != nil {
8501 return x
8502 }
8503 }
8504
8505 if gcBlackenEnabled != 0 {
8506 deductAssistCredit(size)
8507 }
8508
8509 const sizeclass = 15
8510
8511 const elemsize = 208
8512
8513 mp := acquirem()
8514 if doubleCheckMalloc {
8515 doubleCheckSmallNoScan(typ, mp)
8516 }
8517 mp.mallocing = 1
8518
8519 checkGCTrigger := false
8520 c := getMCache(mp)
8521 const spc = spanClass(sizeclass<<1) | spanClass(1)
8522 span := c.alloc[spc]
8523
8524 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
8525
8526 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
8527 mp.mallocing = 0
8528 releasem(mp)
8529 x := v
8530 {
8531
8532 gp := getg()
8533 if goexperiment.RuntimeSecret && gp.secret > 0 {
8534
8535 addSecret(x, size)
8536 }
8537
8538 if valgrindenabled {
8539 valgrindMalloc(x, size)
8540 }
8541
8542 if gcBlackenEnabled != 0 && elemsize != 0 {
8543 if assistG := getg().m.curg; assistG != nil {
8544 assistG.gcAssistBytes -= int64(elemsize - size)
8545 }
8546 }
8547
8548 if debug.malloc {
8549 postMallocgcDebug(x, elemsize, typ)
8550 }
8551 return x
8552 }
8553
8554 }
8555
8556 var nextFreeFastResult gclinkptr
8557 if span.allocCache != 0 {
8558 theBit := sys.TrailingZeros64(span.allocCache)
8559 result := span.freeindex + uint16(theBit)
8560 if result < span.nelems {
8561 freeidx := result + 1
8562 if !(freeidx%64 == 0 && freeidx != span.nelems) {
8563 span.allocCache >>= uint(theBit + 1)
8564 span.freeindex = freeidx
8565 span.allocCount++
8566 nextFreeFastResult = gclinkptr(uintptr(result)*
8567 208 +
8568 span.base())
8569 }
8570 }
8571 }
8572 v := nextFreeFastResult
8573 if v == 0 {
8574 v, span, checkGCTrigger = c.nextFree(spc)
8575 }
8576 x := unsafe.Pointer(v)
8577 if needzero && span.needzero != 0 {
8578 memclrNoHeapPointers(x, elemsize)
8579 }
8580
8581 publicationBarrier()
8582
8583 if writeBarrier.enabled {
8584
8585 gcmarknewobject(span, uintptr(x))
8586 } else {
8587
8588 span.freeIndexForScan = span.freeindex
8589 }
8590
8591 c.nextSample -= int64(elemsize)
8592 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
8593 profilealloc(mp, x, elemsize)
8594 }
8595 mp.mallocing = 0
8596 releasem(mp)
8597
8598 if checkGCTrigger {
8599 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
8600 gcStart(t)
8601 }
8602 }
8603 gp := getg()
8604 if goexperiment.RuntimeSecret && gp.secret > 0 {
8605
8606 addSecret(x, size)
8607 }
8608
8609 if valgrindenabled {
8610 valgrindMalloc(x, size)
8611 }
8612
8613 if gcBlackenEnabled != 0 && elemsize != 0 {
8614 if assistG := getg().m.curg; assistG != nil {
8615 assistG.gcAssistBytes -= int64(elemsize - size)
8616 }
8617 }
8618
8619 if debug.malloc {
8620 postMallocgcDebug(x, elemsize, typ)
8621 }
8622 return x
8623 }
8624
8625 func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
8626
8627 if doubleCheckMalloc {
8628 if gcphase == _GCmarktermination {
8629 throw("mallocgc called with gcphase == _GCmarktermination")
8630 }
8631 }
8632
8633 lockRankMayQueueFinalizer()
8634
8635 if debug.malloc {
8636 if x := preMallocgcDebug(size, typ); x != nil {
8637 return x
8638 }
8639 }
8640
8641 if gcBlackenEnabled != 0 {
8642 deductAssistCredit(size)
8643 }
8644
8645 const sizeclass = 16
8646
8647 const elemsize = 224
8648
8649 mp := acquirem()
8650 if doubleCheckMalloc {
8651 doubleCheckSmallNoScan(typ, mp)
8652 }
8653 mp.mallocing = 1
8654
8655 checkGCTrigger := false
8656 c := getMCache(mp)
8657 const spc = spanClass(sizeclass<<1) | spanClass(1)
8658 span := c.alloc[spc]
8659
8660 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
8661
8662 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
8663 mp.mallocing = 0
8664 releasem(mp)
8665 x := v
8666 {
8667
8668 gp := getg()
8669 if goexperiment.RuntimeSecret && gp.secret > 0 {
8670
8671 addSecret(x, size)
8672 }
8673
8674 if valgrindenabled {
8675 valgrindMalloc(x, size)
8676 }
8677
8678 if gcBlackenEnabled != 0 && elemsize != 0 {
8679 if assistG := getg().m.curg; assistG != nil {
8680 assistG.gcAssistBytes -= int64(elemsize - size)
8681 }
8682 }
8683
8684 if debug.malloc {
8685 postMallocgcDebug(x, elemsize, typ)
8686 }
8687 return x
8688 }
8689
8690 }
8691
8692 var nextFreeFastResult gclinkptr
8693 if span.allocCache != 0 {
8694 theBit := sys.TrailingZeros64(span.allocCache)
8695 result := span.freeindex + uint16(theBit)
8696 if result < span.nelems {
8697 freeidx := result + 1
8698 if !(freeidx%64 == 0 && freeidx != span.nelems) {
8699 span.allocCache >>= uint(theBit + 1)
8700 span.freeindex = freeidx
8701 span.allocCount++
8702 nextFreeFastResult = gclinkptr(uintptr(result)*
8703 224 +
8704 span.base())
8705 }
8706 }
8707 }
8708 v := nextFreeFastResult
8709 if v == 0 {
8710 v, span, checkGCTrigger = c.nextFree(spc)
8711 }
8712 x := unsafe.Pointer(v)
8713 if needzero && span.needzero != 0 {
8714 memclrNoHeapPointers(x, elemsize)
8715 }
8716
8717 publicationBarrier()
8718
8719 if writeBarrier.enabled {
8720
8721 gcmarknewobject(span, uintptr(x))
8722 } else {
8723
8724 span.freeIndexForScan = span.freeindex
8725 }
8726
8727 c.nextSample -= int64(elemsize)
8728 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
8729 profilealloc(mp, x, elemsize)
8730 }
8731 mp.mallocing = 0
8732 releasem(mp)
8733
8734 if checkGCTrigger {
8735 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
8736 gcStart(t)
8737 }
8738 }
8739 gp := getg()
8740 if goexperiment.RuntimeSecret && gp.secret > 0 {
8741
8742 addSecret(x, size)
8743 }
8744
8745 if valgrindenabled {
8746 valgrindMalloc(x, size)
8747 }
8748
8749 if gcBlackenEnabled != 0 && elemsize != 0 {
8750 if assistG := getg().m.curg; assistG != nil {
8751 assistG.gcAssistBytes -= int64(elemsize - size)
8752 }
8753 }
8754
8755 if debug.malloc {
8756 postMallocgcDebug(x, elemsize, typ)
8757 }
8758 return x
8759 }
8760
8761 func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
8762
8763 if doubleCheckMalloc {
8764 if gcphase == _GCmarktermination {
8765 throw("mallocgc called with gcphase == _GCmarktermination")
8766 }
8767 }
8768
8769 lockRankMayQueueFinalizer()
8770
8771 if debug.malloc {
8772 if x := preMallocgcDebug(size, typ); x != nil {
8773 return x
8774 }
8775 }
8776
8777 if gcBlackenEnabled != 0 {
8778 deductAssistCredit(size)
8779 }
8780
8781 const sizeclass = 17
8782
8783 const elemsize = 240
8784
8785 mp := acquirem()
8786 if doubleCheckMalloc {
8787 doubleCheckSmallNoScan(typ, mp)
8788 }
8789 mp.mallocing = 1
8790
8791 checkGCTrigger := false
8792 c := getMCache(mp)
8793 const spc = spanClass(sizeclass<<1) | spanClass(1)
8794 span := c.alloc[spc]
8795
8796 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
8797
8798 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
8799 mp.mallocing = 0
8800 releasem(mp)
8801 x := v
8802 {
8803
8804 gp := getg()
8805 if goexperiment.RuntimeSecret && gp.secret > 0 {
8806
8807 addSecret(x, size)
8808 }
8809
8810 if valgrindenabled {
8811 valgrindMalloc(x, size)
8812 }
8813
8814 if gcBlackenEnabled != 0 && elemsize != 0 {
8815 if assistG := getg().m.curg; assistG != nil {
8816 assistG.gcAssistBytes -= int64(elemsize - size)
8817 }
8818 }
8819
8820 if debug.malloc {
8821 postMallocgcDebug(x, elemsize, typ)
8822 }
8823 return x
8824 }
8825
8826 }
8827
8828 var nextFreeFastResult gclinkptr
8829 if span.allocCache != 0 {
8830 theBit := sys.TrailingZeros64(span.allocCache)
8831 result := span.freeindex + uint16(theBit)
8832 if result < span.nelems {
8833 freeidx := result + 1
8834 if !(freeidx%64 == 0 && freeidx != span.nelems) {
8835 span.allocCache >>= uint(theBit + 1)
8836 span.freeindex = freeidx
8837 span.allocCount++
8838 nextFreeFastResult = gclinkptr(uintptr(result)*
8839 240 +
8840 span.base())
8841 }
8842 }
8843 }
8844 v := nextFreeFastResult
8845 if v == 0 {
8846 v, span, checkGCTrigger = c.nextFree(spc)
8847 }
8848 x := unsafe.Pointer(v)
8849 if needzero && span.needzero != 0 {
8850 memclrNoHeapPointers(x, elemsize)
8851 }
8852
8853 publicationBarrier()
8854
8855 if writeBarrier.enabled {
8856
8857 gcmarknewobject(span, uintptr(x))
8858 } else {
8859
8860 span.freeIndexForScan = span.freeindex
8861 }
8862
8863 c.nextSample -= int64(elemsize)
8864 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
8865 profilealloc(mp, x, elemsize)
8866 }
8867 mp.mallocing = 0
8868 releasem(mp)
8869
8870 if checkGCTrigger {
8871 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
8872 gcStart(t)
8873 }
8874 }
8875 gp := getg()
8876 if goexperiment.RuntimeSecret && gp.secret > 0 {
8877
8878 addSecret(x, size)
8879 }
8880
8881 if valgrindenabled {
8882 valgrindMalloc(x, size)
8883 }
8884
8885 if gcBlackenEnabled != 0 && elemsize != 0 {
8886 if assistG := getg().m.curg; assistG != nil {
8887 assistG.gcAssistBytes -= int64(elemsize - size)
8888 }
8889 }
8890
8891 if debug.malloc {
8892 postMallocgcDebug(x, elemsize, typ)
8893 }
8894 return x
8895 }
8896
8897 func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
8898
8899 if doubleCheckMalloc {
8900 if gcphase == _GCmarktermination {
8901 throw("mallocgc called with gcphase == _GCmarktermination")
8902 }
8903 }
8904
8905 lockRankMayQueueFinalizer()
8906
8907 if debug.malloc {
8908 if x := preMallocgcDebug(size, typ); x != nil {
8909 return x
8910 }
8911 }
8912
8913 if gcBlackenEnabled != 0 {
8914 deductAssistCredit(size)
8915 }
8916
8917 const sizeclass = 18
8918
8919 const elemsize = 256
8920
8921 mp := acquirem()
8922 if doubleCheckMalloc {
8923 doubleCheckSmallNoScan(typ, mp)
8924 }
8925 mp.mallocing = 1
8926
8927 checkGCTrigger := false
8928 c := getMCache(mp)
8929 const spc = spanClass(sizeclass<<1) | spanClass(1)
8930 span := c.alloc[spc]
8931
8932 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
8933
8934 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
8935 mp.mallocing = 0
8936 releasem(mp)
8937 x := v
8938 {
8939
8940 gp := getg()
8941 if goexperiment.RuntimeSecret && gp.secret > 0 {
8942
8943 addSecret(x, size)
8944 }
8945
8946 if valgrindenabled {
8947 valgrindMalloc(x, size)
8948 }
8949
8950 if gcBlackenEnabled != 0 && elemsize != 0 {
8951 if assistG := getg().m.curg; assistG != nil {
8952 assistG.gcAssistBytes -= int64(elemsize - size)
8953 }
8954 }
8955
8956 if debug.malloc {
8957 postMallocgcDebug(x, elemsize, typ)
8958 }
8959 return x
8960 }
8961
8962 }
8963
8964 var nextFreeFastResult gclinkptr
8965 if span.allocCache != 0 {
8966 theBit := sys.TrailingZeros64(span.allocCache)
8967 result := span.freeindex + uint16(theBit)
8968 if result < span.nelems {
8969 freeidx := result + 1
8970 if !(freeidx%64 == 0 && freeidx != span.nelems) {
8971 span.allocCache >>= uint(theBit + 1)
8972 span.freeindex = freeidx
8973 span.allocCount++
8974 nextFreeFastResult = gclinkptr(uintptr(result)*
8975 256 +
8976 span.base())
8977 }
8978 }
8979 }
8980 v := nextFreeFastResult
8981 if v == 0 {
8982 v, span, checkGCTrigger = c.nextFree(spc)
8983 }
8984 x := unsafe.Pointer(v)
8985 if needzero && span.needzero != 0 {
8986 memclrNoHeapPointers(x, elemsize)
8987 }
8988
8989 publicationBarrier()
8990
8991 if writeBarrier.enabled {
8992
8993 gcmarknewobject(span, uintptr(x))
8994 } else {
8995
8996 span.freeIndexForScan = span.freeindex
8997 }
8998
8999 c.nextSample -= int64(elemsize)
9000 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
9001 profilealloc(mp, x, elemsize)
9002 }
9003 mp.mallocing = 0
9004 releasem(mp)
9005
9006 if checkGCTrigger {
9007 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
9008 gcStart(t)
9009 }
9010 }
9011 gp := getg()
9012 if goexperiment.RuntimeSecret && gp.secret > 0 {
9013
9014 addSecret(x, size)
9015 }
9016
9017 if valgrindenabled {
9018 valgrindMalloc(x, size)
9019 }
9020
9021 if gcBlackenEnabled != 0 && elemsize != 0 {
9022 if assistG := getg().m.curg; assistG != nil {
9023 assistG.gcAssistBytes -= int64(elemsize - size)
9024 }
9025 }
9026
9027 if debug.malloc {
9028 postMallocgcDebug(x, elemsize, typ)
9029 }
9030 return x
9031 }
9032
9033 func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
9034
9035 if doubleCheckMalloc {
9036 if gcphase == _GCmarktermination {
9037 throw("mallocgc called with gcphase == _GCmarktermination")
9038 }
9039 }
9040
9041 lockRankMayQueueFinalizer()
9042
9043 if debug.malloc {
9044 if x := preMallocgcDebug(size, typ); x != nil {
9045 return x
9046 }
9047 }
9048
9049 if gcBlackenEnabled != 0 {
9050 deductAssistCredit(size)
9051 }
9052
9053 const sizeclass = 19
9054
9055 const elemsize = 288
9056
9057 mp := acquirem()
9058 if doubleCheckMalloc {
9059 doubleCheckSmallNoScan(typ, mp)
9060 }
9061 mp.mallocing = 1
9062
9063 checkGCTrigger := false
9064 c := getMCache(mp)
9065 const spc = spanClass(sizeclass<<1) | spanClass(1)
9066 span := c.alloc[spc]
9067
9068 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
9069
9070 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
9071 mp.mallocing = 0
9072 releasem(mp)
9073 x := v
9074 {
9075
9076 gp := getg()
9077 if goexperiment.RuntimeSecret && gp.secret > 0 {
9078
9079 addSecret(x, size)
9080 }
9081
9082 if valgrindenabled {
9083 valgrindMalloc(x, size)
9084 }
9085
9086 if gcBlackenEnabled != 0 && elemsize != 0 {
9087 if assistG := getg().m.curg; assistG != nil {
9088 assistG.gcAssistBytes -= int64(elemsize - size)
9089 }
9090 }
9091
9092 if debug.malloc {
9093 postMallocgcDebug(x, elemsize, typ)
9094 }
9095 return x
9096 }
9097
9098 }
9099
9100 var nextFreeFastResult gclinkptr
9101 if span.allocCache != 0 {
9102 theBit := sys.TrailingZeros64(span.allocCache)
9103 result := span.freeindex + uint16(theBit)
9104 if result < span.nelems {
9105 freeidx := result + 1
9106 if !(freeidx%64 == 0 && freeidx != span.nelems) {
9107 span.allocCache >>= uint(theBit + 1)
9108 span.freeindex = freeidx
9109 span.allocCount++
9110 nextFreeFastResult = gclinkptr(uintptr(result)*
9111 288 +
9112 span.base())
9113 }
9114 }
9115 }
9116 v := nextFreeFastResult
9117 if v == 0 {
9118 v, span, checkGCTrigger = c.nextFree(spc)
9119 }
9120 x := unsafe.Pointer(v)
9121 if needzero && span.needzero != 0 {
9122 memclrNoHeapPointers(x, elemsize)
9123 }
9124
9125 publicationBarrier()
9126
9127 if writeBarrier.enabled {
9128
9129 gcmarknewobject(span, uintptr(x))
9130 } else {
9131
9132 span.freeIndexForScan = span.freeindex
9133 }
9134
9135 c.nextSample -= int64(elemsize)
9136 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
9137 profilealloc(mp, x, elemsize)
9138 }
9139 mp.mallocing = 0
9140 releasem(mp)
9141
9142 if checkGCTrigger {
9143 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
9144 gcStart(t)
9145 }
9146 }
9147 gp := getg()
9148 if goexperiment.RuntimeSecret && gp.secret > 0 {
9149
9150 addSecret(x, size)
9151 }
9152
9153 if valgrindenabled {
9154 valgrindMalloc(x, size)
9155 }
9156
9157 if gcBlackenEnabled != 0 && elemsize != 0 {
9158 if assistG := getg().m.curg; assistG != nil {
9159 assistG.gcAssistBytes -= int64(elemsize - size)
9160 }
9161 }
9162
9163 if debug.malloc {
9164 postMallocgcDebug(x, elemsize, typ)
9165 }
9166 return x
9167 }
9168
9169 func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
9170
9171 if doubleCheckMalloc {
9172 if gcphase == _GCmarktermination {
9173 throw("mallocgc called with gcphase == _GCmarktermination")
9174 }
9175 }
9176
9177 lockRankMayQueueFinalizer()
9178
9179 if debug.malloc {
9180 if x := preMallocgcDebug(size, typ); x != nil {
9181 return x
9182 }
9183 }
9184
9185 if gcBlackenEnabled != 0 {
9186 deductAssistCredit(size)
9187 }
9188
9189 const sizeclass = 20
9190
9191 const elemsize = 320
9192
9193 mp := acquirem()
9194 if doubleCheckMalloc {
9195 doubleCheckSmallNoScan(typ, mp)
9196 }
9197 mp.mallocing = 1
9198
9199 checkGCTrigger := false
9200 c := getMCache(mp)
9201 const spc = spanClass(sizeclass<<1) | spanClass(1)
9202 span := c.alloc[spc]
9203
9204 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
9205
9206 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
9207 mp.mallocing = 0
9208 releasem(mp)
9209 x := v
9210 {
9211
9212 gp := getg()
9213 if goexperiment.RuntimeSecret && gp.secret > 0 {
9214
9215 addSecret(x, size)
9216 }
9217
9218 if valgrindenabled {
9219 valgrindMalloc(x, size)
9220 }
9221
9222 if gcBlackenEnabled != 0 && elemsize != 0 {
9223 if assistG := getg().m.curg; assistG != nil {
9224 assistG.gcAssistBytes -= int64(elemsize - size)
9225 }
9226 }
9227
9228 if debug.malloc {
9229 postMallocgcDebug(x, elemsize, typ)
9230 }
9231 return x
9232 }
9233
9234 }
9235
9236 var nextFreeFastResult gclinkptr
9237 if span.allocCache != 0 {
9238 theBit := sys.TrailingZeros64(span.allocCache)
9239 result := span.freeindex + uint16(theBit)
9240 if result < span.nelems {
9241 freeidx := result + 1
9242 if !(freeidx%64 == 0 && freeidx != span.nelems) {
9243 span.allocCache >>= uint(theBit + 1)
9244 span.freeindex = freeidx
9245 span.allocCount++
9246 nextFreeFastResult = gclinkptr(uintptr(result)*
9247 320 +
9248 span.base())
9249 }
9250 }
9251 }
9252 v := nextFreeFastResult
9253 if v == 0 {
9254 v, span, checkGCTrigger = c.nextFree(spc)
9255 }
9256 x := unsafe.Pointer(v)
9257 if needzero && span.needzero != 0 {
9258 memclrNoHeapPointers(x, elemsize)
9259 }
9260
9261 publicationBarrier()
9262
9263 if writeBarrier.enabled {
9264
9265 gcmarknewobject(span, uintptr(x))
9266 } else {
9267
9268 span.freeIndexForScan = span.freeindex
9269 }
9270
9271 c.nextSample -= int64(elemsize)
9272 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
9273 profilealloc(mp, x, elemsize)
9274 }
9275 mp.mallocing = 0
9276 releasem(mp)
9277
9278 if checkGCTrigger {
9279 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
9280 gcStart(t)
9281 }
9282 }
9283 gp := getg()
9284 if goexperiment.RuntimeSecret && gp.secret > 0 {
9285
9286 addSecret(x, size)
9287 }
9288
9289 if valgrindenabled {
9290 valgrindMalloc(x, size)
9291 }
9292
9293 if gcBlackenEnabled != 0 && elemsize != 0 {
9294 if assistG := getg().m.curg; assistG != nil {
9295 assistG.gcAssistBytes -= int64(elemsize - size)
9296 }
9297 }
9298
9299 if debug.malloc {
9300 postMallocgcDebug(x, elemsize, typ)
9301 }
9302 return x
9303 }
9304
9305 func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
9306
9307 if doubleCheckMalloc {
9308 if gcphase == _GCmarktermination {
9309 throw("mallocgc called with gcphase == _GCmarktermination")
9310 }
9311 }
9312
9313 lockRankMayQueueFinalizer()
9314
9315 if debug.malloc {
9316 if x := preMallocgcDebug(size, typ); x != nil {
9317 return x
9318 }
9319 }
9320
9321 if gcBlackenEnabled != 0 {
9322 deductAssistCredit(size)
9323 }
9324
9325 const sizeclass = 21
9326
9327 const elemsize = 352
9328
9329 mp := acquirem()
9330 if doubleCheckMalloc {
9331 doubleCheckSmallNoScan(typ, mp)
9332 }
9333 mp.mallocing = 1
9334
9335 checkGCTrigger := false
9336 c := getMCache(mp)
9337 const spc = spanClass(sizeclass<<1) | spanClass(1)
9338 span := c.alloc[spc]
9339
9340 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
9341
9342 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
9343 mp.mallocing = 0
9344 releasem(mp)
9345 x := v
9346 {
9347
9348 gp := getg()
9349 if goexperiment.RuntimeSecret && gp.secret > 0 {
9350
9351 addSecret(x, size)
9352 }
9353
9354 if valgrindenabled {
9355 valgrindMalloc(x, size)
9356 }
9357
9358 if gcBlackenEnabled != 0 && elemsize != 0 {
9359 if assistG := getg().m.curg; assistG != nil {
9360 assistG.gcAssistBytes -= int64(elemsize - size)
9361 }
9362 }
9363
9364 if debug.malloc {
9365 postMallocgcDebug(x, elemsize, typ)
9366 }
9367 return x
9368 }
9369
9370 }
9371
9372 var nextFreeFastResult gclinkptr
9373 if span.allocCache != 0 {
9374 theBit := sys.TrailingZeros64(span.allocCache)
9375 result := span.freeindex + uint16(theBit)
9376 if result < span.nelems {
9377 freeidx := result + 1
9378 if !(freeidx%64 == 0 && freeidx != span.nelems) {
9379 span.allocCache >>= uint(theBit + 1)
9380 span.freeindex = freeidx
9381 span.allocCount++
9382 nextFreeFastResult = gclinkptr(uintptr(result)*
9383 352 +
9384 span.base())
9385 }
9386 }
9387 }
9388 v := nextFreeFastResult
9389 if v == 0 {
9390 v, span, checkGCTrigger = c.nextFree(spc)
9391 }
9392 x := unsafe.Pointer(v)
9393 if needzero && span.needzero != 0 {
9394 memclrNoHeapPointers(x, elemsize)
9395 }
9396
9397 publicationBarrier()
9398
9399 if writeBarrier.enabled {
9400
9401 gcmarknewobject(span, uintptr(x))
9402 } else {
9403
9404 span.freeIndexForScan = span.freeindex
9405 }
9406
9407 c.nextSample -= int64(elemsize)
9408 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
9409 profilealloc(mp, x, elemsize)
9410 }
9411 mp.mallocing = 0
9412 releasem(mp)
9413
9414 if checkGCTrigger {
9415 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
9416 gcStart(t)
9417 }
9418 }
9419 gp := getg()
9420 if goexperiment.RuntimeSecret && gp.secret > 0 {
9421
9422 addSecret(x, size)
9423 }
9424
9425 if valgrindenabled {
9426 valgrindMalloc(x, size)
9427 }
9428
9429 if gcBlackenEnabled != 0 && elemsize != 0 {
9430 if assistG := getg().m.curg; assistG != nil {
9431 assistG.gcAssistBytes -= int64(elemsize - size)
9432 }
9433 }
9434
9435 if debug.malloc {
9436 postMallocgcDebug(x, elemsize, typ)
9437 }
9438 return x
9439 }
9440
9441 func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
9442
9443 if doubleCheckMalloc {
9444 if gcphase == _GCmarktermination {
9445 throw("mallocgc called with gcphase == _GCmarktermination")
9446 }
9447 }
9448
9449 lockRankMayQueueFinalizer()
9450
9451 if debug.malloc {
9452 if x := preMallocgcDebug(size, typ); x != nil {
9453 return x
9454 }
9455 }
9456
9457 if gcBlackenEnabled != 0 {
9458 deductAssistCredit(size)
9459 }
9460
9461 const sizeclass = 22
9462
9463 const elemsize = 384
9464
9465 mp := acquirem()
9466 if doubleCheckMalloc {
9467 doubleCheckSmallNoScan(typ, mp)
9468 }
9469 mp.mallocing = 1
9470
9471 checkGCTrigger := false
9472 c := getMCache(mp)
9473 const spc = spanClass(sizeclass<<1) | spanClass(1)
9474 span := c.alloc[spc]
9475
9476 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
9477
9478 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
9479 mp.mallocing = 0
9480 releasem(mp)
9481 x := v
9482 {
9483
9484 gp := getg()
9485 if goexperiment.RuntimeSecret && gp.secret > 0 {
9486
9487 addSecret(x, size)
9488 }
9489
9490 if valgrindenabled {
9491 valgrindMalloc(x, size)
9492 }
9493
9494 if gcBlackenEnabled != 0 && elemsize != 0 {
9495 if assistG := getg().m.curg; assistG != nil {
9496 assistG.gcAssistBytes -= int64(elemsize - size)
9497 }
9498 }
9499
9500 if debug.malloc {
9501 postMallocgcDebug(x, elemsize, typ)
9502 }
9503 return x
9504 }
9505
9506 }
9507
9508 var nextFreeFastResult gclinkptr
9509 if span.allocCache != 0 {
9510 theBit := sys.TrailingZeros64(span.allocCache)
9511 result := span.freeindex + uint16(theBit)
9512 if result < span.nelems {
9513 freeidx := result + 1
9514 if !(freeidx%64 == 0 && freeidx != span.nelems) {
9515 span.allocCache >>= uint(theBit + 1)
9516 span.freeindex = freeidx
9517 span.allocCount++
9518 nextFreeFastResult = gclinkptr(uintptr(result)*
9519 384 +
9520 span.base())
9521 }
9522 }
9523 }
9524 v := nextFreeFastResult
9525 if v == 0 {
9526 v, span, checkGCTrigger = c.nextFree(spc)
9527 }
9528 x := unsafe.Pointer(v)
9529 if needzero && span.needzero != 0 {
9530 memclrNoHeapPointers(x, elemsize)
9531 }
9532
9533 publicationBarrier()
9534
9535 if writeBarrier.enabled {
9536
9537 gcmarknewobject(span, uintptr(x))
9538 } else {
9539
9540 span.freeIndexForScan = span.freeindex
9541 }
9542
9543 c.nextSample -= int64(elemsize)
9544 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
9545 profilealloc(mp, x, elemsize)
9546 }
9547 mp.mallocing = 0
9548 releasem(mp)
9549
9550 if checkGCTrigger {
9551 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
9552 gcStart(t)
9553 }
9554 }
9555 gp := getg()
9556 if goexperiment.RuntimeSecret && gp.secret > 0 {
9557
9558 addSecret(x, size)
9559 }
9560
9561 if valgrindenabled {
9562 valgrindMalloc(x, size)
9563 }
9564
9565 if gcBlackenEnabled != 0 && elemsize != 0 {
9566 if assistG := getg().m.curg; assistG != nil {
9567 assistG.gcAssistBytes -= int64(elemsize - size)
9568 }
9569 }
9570
9571 if debug.malloc {
9572 postMallocgcDebug(x, elemsize, typ)
9573 }
9574 return x
9575 }
9576
9577 func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
9578
9579 if doubleCheckMalloc {
9580 if gcphase == _GCmarktermination {
9581 throw("mallocgc called with gcphase == _GCmarktermination")
9582 }
9583 }
9584
9585 lockRankMayQueueFinalizer()
9586
9587 if debug.malloc {
9588 if x := preMallocgcDebug(size, typ); x != nil {
9589 return x
9590 }
9591 }
9592
9593 if gcBlackenEnabled != 0 {
9594 deductAssistCredit(size)
9595 }
9596
9597 const sizeclass = 23
9598
9599 const elemsize = 416
9600
9601 mp := acquirem()
9602 if doubleCheckMalloc {
9603 doubleCheckSmallNoScan(typ, mp)
9604 }
9605 mp.mallocing = 1
9606
9607 checkGCTrigger := false
9608 c := getMCache(mp)
9609 const spc = spanClass(sizeclass<<1) | spanClass(1)
9610 span := c.alloc[spc]
9611
9612 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
9613
9614 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
9615 mp.mallocing = 0
9616 releasem(mp)
9617 x := v
9618 {
9619
9620 gp := getg()
9621 if goexperiment.RuntimeSecret && gp.secret > 0 {
9622
9623 addSecret(x, size)
9624 }
9625
9626 if valgrindenabled {
9627 valgrindMalloc(x, size)
9628 }
9629
9630 if gcBlackenEnabled != 0 && elemsize != 0 {
9631 if assistG := getg().m.curg; assistG != nil {
9632 assistG.gcAssistBytes -= int64(elemsize - size)
9633 }
9634 }
9635
9636 if debug.malloc {
9637 postMallocgcDebug(x, elemsize, typ)
9638 }
9639 return x
9640 }
9641
9642 }
9643
9644 var nextFreeFastResult gclinkptr
9645 if span.allocCache != 0 {
9646 theBit := sys.TrailingZeros64(span.allocCache)
9647 result := span.freeindex + uint16(theBit)
9648 if result < span.nelems {
9649 freeidx := result + 1
9650 if !(freeidx%64 == 0 && freeidx != span.nelems) {
9651 span.allocCache >>= uint(theBit + 1)
9652 span.freeindex = freeidx
9653 span.allocCount++
9654 nextFreeFastResult = gclinkptr(uintptr(result)*
9655 416 +
9656 span.base())
9657 }
9658 }
9659 }
9660 v := nextFreeFastResult
9661 if v == 0 {
9662 v, span, checkGCTrigger = c.nextFree(spc)
9663 }
9664 x := unsafe.Pointer(v)
9665 if needzero && span.needzero != 0 {
9666 memclrNoHeapPointers(x, elemsize)
9667 }
9668
9669 publicationBarrier()
9670
9671 if writeBarrier.enabled {
9672
9673 gcmarknewobject(span, uintptr(x))
9674 } else {
9675
9676 span.freeIndexForScan = span.freeindex
9677 }
9678
9679 c.nextSample -= int64(elemsize)
9680 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
9681 profilealloc(mp, x, elemsize)
9682 }
9683 mp.mallocing = 0
9684 releasem(mp)
9685
9686 if checkGCTrigger {
9687 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
9688 gcStart(t)
9689 }
9690 }
9691 gp := getg()
9692 if goexperiment.RuntimeSecret && gp.secret > 0 {
9693
9694 addSecret(x, size)
9695 }
9696
9697 if valgrindenabled {
9698 valgrindMalloc(x, size)
9699 }
9700
9701 if gcBlackenEnabled != 0 && elemsize != 0 {
9702 if assistG := getg().m.curg; assistG != nil {
9703 assistG.gcAssistBytes -= int64(elemsize - size)
9704 }
9705 }
9706
9707 if debug.malloc {
9708 postMallocgcDebug(x, elemsize, typ)
9709 }
9710 return x
9711 }
9712
9713 func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
9714
9715 if doubleCheckMalloc {
9716 if gcphase == _GCmarktermination {
9717 throw("mallocgc called with gcphase == _GCmarktermination")
9718 }
9719 }
9720
9721 lockRankMayQueueFinalizer()
9722
9723 if debug.malloc {
9724 if x := preMallocgcDebug(size, typ); x != nil {
9725 return x
9726 }
9727 }
9728
9729 if gcBlackenEnabled != 0 {
9730 deductAssistCredit(size)
9731 }
9732
9733 const sizeclass = 24
9734
9735 const elemsize = 448
9736
9737 mp := acquirem()
9738 if doubleCheckMalloc {
9739 doubleCheckSmallNoScan(typ, mp)
9740 }
9741 mp.mallocing = 1
9742
9743 checkGCTrigger := false
9744 c := getMCache(mp)
9745 const spc = spanClass(sizeclass<<1) | spanClass(1)
9746 span := c.alloc[spc]
9747
9748 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
9749
9750 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
9751 mp.mallocing = 0
9752 releasem(mp)
9753 x := v
9754 {
9755
9756 gp := getg()
9757 if goexperiment.RuntimeSecret && gp.secret > 0 {
9758
9759 addSecret(x, size)
9760 }
9761
9762 if valgrindenabled {
9763 valgrindMalloc(x, size)
9764 }
9765
9766 if gcBlackenEnabled != 0 && elemsize != 0 {
9767 if assistG := getg().m.curg; assistG != nil {
9768 assistG.gcAssistBytes -= int64(elemsize - size)
9769 }
9770 }
9771
9772 if debug.malloc {
9773 postMallocgcDebug(x, elemsize, typ)
9774 }
9775 return x
9776 }
9777
9778 }
9779
9780 var nextFreeFastResult gclinkptr
9781 if span.allocCache != 0 {
9782 theBit := sys.TrailingZeros64(span.allocCache)
9783 result := span.freeindex + uint16(theBit)
9784 if result < span.nelems {
9785 freeidx := result + 1
9786 if !(freeidx%64 == 0 && freeidx != span.nelems) {
9787 span.allocCache >>= uint(theBit + 1)
9788 span.freeindex = freeidx
9789 span.allocCount++
9790 nextFreeFastResult = gclinkptr(uintptr(result)*
9791 448 +
9792 span.base())
9793 }
9794 }
9795 }
9796 v := nextFreeFastResult
9797 if v == 0 {
9798 v, span, checkGCTrigger = c.nextFree(spc)
9799 }
9800 x := unsafe.Pointer(v)
9801 if needzero && span.needzero != 0 {
9802 memclrNoHeapPointers(x, elemsize)
9803 }
9804
9805 publicationBarrier()
9806
9807 if writeBarrier.enabled {
9808
9809 gcmarknewobject(span, uintptr(x))
9810 } else {
9811
9812 span.freeIndexForScan = span.freeindex
9813 }
9814
9815 c.nextSample -= int64(elemsize)
9816 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
9817 profilealloc(mp, x, elemsize)
9818 }
9819 mp.mallocing = 0
9820 releasem(mp)
9821
9822 if checkGCTrigger {
9823 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
9824 gcStart(t)
9825 }
9826 }
9827 gp := getg()
9828 if goexperiment.RuntimeSecret && gp.secret > 0 {
9829
9830 addSecret(x, size)
9831 }
9832
9833 if valgrindenabled {
9834 valgrindMalloc(x, size)
9835 }
9836
9837 if gcBlackenEnabled != 0 && elemsize != 0 {
9838 if assistG := getg().m.curg; assistG != nil {
9839 assistG.gcAssistBytes -= int64(elemsize - size)
9840 }
9841 }
9842
9843 if debug.malloc {
9844 postMallocgcDebug(x, elemsize, typ)
9845 }
9846 return x
9847 }
9848
9849 func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
9850
9851 if doubleCheckMalloc {
9852 if gcphase == _GCmarktermination {
9853 throw("mallocgc called with gcphase == _GCmarktermination")
9854 }
9855 }
9856
9857 lockRankMayQueueFinalizer()
9858
9859 if debug.malloc {
9860 if x := preMallocgcDebug(size, typ); x != nil {
9861 return x
9862 }
9863 }
9864
9865 if gcBlackenEnabled != 0 {
9866 deductAssistCredit(size)
9867 }
9868
9869 const sizeclass = 25
9870
9871 const elemsize = 480
9872
9873 mp := acquirem()
9874 if doubleCheckMalloc {
9875 doubleCheckSmallNoScan(typ, mp)
9876 }
9877 mp.mallocing = 1
9878
9879 checkGCTrigger := false
9880 c := getMCache(mp)
9881 const spc = spanClass(sizeclass<<1) | spanClass(1)
9882 span := c.alloc[spc]
9883
9884 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
9885
9886 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
9887 mp.mallocing = 0
9888 releasem(mp)
9889 x := v
9890 {
9891
9892 gp := getg()
9893 if goexperiment.RuntimeSecret && gp.secret > 0 {
9894
9895 addSecret(x, size)
9896 }
9897
9898 if valgrindenabled {
9899 valgrindMalloc(x, size)
9900 }
9901
9902 if gcBlackenEnabled != 0 && elemsize != 0 {
9903 if assistG := getg().m.curg; assistG != nil {
9904 assistG.gcAssistBytes -= int64(elemsize - size)
9905 }
9906 }
9907
9908 if debug.malloc {
9909 postMallocgcDebug(x, elemsize, typ)
9910 }
9911 return x
9912 }
9913
9914 }
9915
9916 var nextFreeFastResult gclinkptr
9917 if span.allocCache != 0 {
9918 theBit := sys.TrailingZeros64(span.allocCache)
9919 result := span.freeindex + uint16(theBit)
9920 if result < span.nelems {
9921 freeidx := result + 1
9922 if !(freeidx%64 == 0 && freeidx != span.nelems) {
9923 span.allocCache >>= uint(theBit + 1)
9924 span.freeindex = freeidx
9925 span.allocCount++
9926 nextFreeFastResult = gclinkptr(uintptr(result)*
9927 480 +
9928 span.base())
9929 }
9930 }
9931 }
9932 v := nextFreeFastResult
9933 if v == 0 {
9934 v, span, checkGCTrigger = c.nextFree(spc)
9935 }
9936 x := unsafe.Pointer(v)
9937 if needzero && span.needzero != 0 {
9938 memclrNoHeapPointers(x, elemsize)
9939 }
9940
9941 publicationBarrier()
9942
9943 if writeBarrier.enabled {
9944
9945 gcmarknewobject(span, uintptr(x))
9946 } else {
9947
9948 span.freeIndexForScan = span.freeindex
9949 }
9950
9951 c.nextSample -= int64(elemsize)
9952 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
9953 profilealloc(mp, x, elemsize)
9954 }
9955 mp.mallocing = 0
9956 releasem(mp)
9957
9958 if checkGCTrigger {
9959 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
9960 gcStart(t)
9961 }
9962 }
9963 gp := getg()
9964 if goexperiment.RuntimeSecret && gp.secret > 0 {
9965
9966 addSecret(x, size)
9967 }
9968
9969 if valgrindenabled {
9970 valgrindMalloc(x, size)
9971 }
9972
9973 if gcBlackenEnabled != 0 && elemsize != 0 {
9974 if assistG := getg().m.curg; assistG != nil {
9975 assistG.gcAssistBytes -= int64(elemsize - size)
9976 }
9977 }
9978
9979 if debug.malloc {
9980 postMallocgcDebug(x, elemsize, typ)
9981 }
9982 return x
9983 }
9984
9985 func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
9986
9987 if doubleCheckMalloc {
9988 if gcphase == _GCmarktermination {
9989 throw("mallocgc called with gcphase == _GCmarktermination")
9990 }
9991 }
9992
9993 lockRankMayQueueFinalizer()
9994
9995 if debug.malloc {
9996 if x := preMallocgcDebug(size, typ); x != nil {
9997 return x
9998 }
9999 }
10000
10001 if gcBlackenEnabled != 0 {
10002 deductAssistCredit(size)
10003 }
10004
10005 const sizeclass = 26
10006
10007 const elemsize = 512
10008
10009 mp := acquirem()
10010 if doubleCheckMalloc {
10011 doubleCheckSmallNoScan(typ, mp)
10012 }
10013 mp.mallocing = 1
10014
10015 checkGCTrigger := false
10016 c := getMCache(mp)
10017 const spc = spanClass(sizeclass<<1) | spanClass(1)
10018 span := c.alloc[spc]
10019
10020 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
10021
10022 v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
10023 mp.mallocing = 0
10024 releasem(mp)
10025 x := v
10026 {
10027
10028 gp := getg()
10029 if goexperiment.RuntimeSecret && gp.secret > 0 {
10030
10031 addSecret(x, size)
10032 }
10033
10034 if valgrindenabled {
10035 valgrindMalloc(x, size)
10036 }
10037
10038 if gcBlackenEnabled != 0 && elemsize != 0 {
10039 if assistG := getg().m.curg; assistG != nil {
10040 assistG.gcAssistBytes -= int64(elemsize - size)
10041 }
10042 }
10043
10044 if debug.malloc {
10045 postMallocgcDebug(x, elemsize, typ)
10046 }
10047 return x
10048 }
10049
10050 }
10051
10052 var nextFreeFastResult gclinkptr
10053 if span.allocCache != 0 {
10054 theBit := sys.TrailingZeros64(span.allocCache)
10055 result := span.freeindex + uint16(theBit)
10056 if result < span.nelems {
10057 freeidx := result + 1
10058 if !(freeidx%64 == 0 && freeidx != span.nelems) {
10059 span.allocCache >>= uint(theBit + 1)
10060 span.freeindex = freeidx
10061 span.allocCount++
10062 nextFreeFastResult = gclinkptr(uintptr(result)*
10063 512 +
10064 span.base())
10065 }
10066 }
10067 }
10068 v := nextFreeFastResult
10069 if v == 0 {
10070 v, span, checkGCTrigger = c.nextFree(spc)
10071 }
10072 x := unsafe.Pointer(v)
10073 if needzero && span.needzero != 0 {
10074 memclrNoHeapPointers(x, elemsize)
10075 }
10076
10077 publicationBarrier()
10078
10079 if writeBarrier.enabled {
10080
10081 gcmarknewobject(span, uintptr(x))
10082 } else {
10083
10084 span.freeIndexForScan = span.freeindex
10085 }
10086
10087 c.nextSample -= int64(elemsize)
10088 if c.nextSample < 0 || MemProfileRate != c.memProfRate {
10089 profilealloc(mp, x, elemsize)
10090 }
10091 mp.mallocing = 0
10092 releasem(mp)
10093
10094 if checkGCTrigger {
10095 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
10096 gcStart(t)
10097 }
10098 }
10099 gp := getg()
10100 if goexperiment.RuntimeSecret && gp.secret > 0 {
10101
10102 addSecret(x, size)
10103 }
10104
10105 if valgrindenabled {
10106 valgrindMalloc(x, size)
10107 }
10108
10109 if gcBlackenEnabled != 0 && elemsize != 0 {
10110 if assistG := getg().m.curg; assistG != nil {
10111 assistG.gcAssistBytes -= int64(elemsize - size)
10112 }
10113 }
10114
10115 if debug.malloc {
10116 postMallocgcDebug(x, elemsize, typ)
10117 }
10118 return x
10119 }
10120
View as plain text