Source file src/runtime/malloc.go
1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator. 6 // 7 // This was originally based on tcmalloc, but has diverged quite a bit. 8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10 // The main allocator works in runs of pages. 11 // Small allocation sizes (up to and including 32 kB) are 12 // rounded to one of about 70 size classes, each of which 13 // has its own free set of objects of exactly that size. 14 // Any free page of memory can be split into a set of objects 15 // of one size class, which are then managed using a free bitmap. 16 // 17 // The allocator's data structures are: 18 // 19 // fixalloc: a free-list allocator for fixed-size off-heap objects, 20 // used to manage storage used by the allocator. 21 // mheap: the malloc heap, managed at page (8192-byte) granularity. 22 // mspan: a run of in-use pages managed by the mheap. 23 // mcentral: collects all spans of a given size class. 24 // mcache: a per-P cache of mspans with free space. 25 // mstats: allocation statistics. 26 // 27 // Allocating a small object proceeds up a hierarchy of caches: 28 // 29 // 1. Round the size up to one of the small size classes 30 // and look in the corresponding mspan in this P's mcache. 31 // Scan the mspan's free bitmap to find a free slot. 32 // If there is a free slot, allocate it. 33 // This can all be done without acquiring a lock. 34 // 35 // 2. If the mspan has no free slots, obtain a new mspan 36 // from the mcentral's list of mspans of the required size 37 // class that have free space. 38 // Obtaining a whole span amortizes the cost of locking 39 // the mcentral. 40 // 41 // 3. If the mcentral's mspan list is empty, obtain a run 42 // of pages from the mheap to use for the mspan. 43 // 44 // 4. If the mheap is empty or has no page runs large enough, 45 // allocate a new group of pages (at least 1MB) from the 46 // operating system. Allocating a large run of pages 47 // amortizes the cost of talking to the operating system. 48 // 49 // Sweeping an mspan and freeing objects on it proceeds up a similar 50 // hierarchy: 51 // 52 // 1. If the mspan is being swept in response to allocation, it 53 // is returned to the mcache to satisfy the allocation. 54 // 55 // 2. Otherwise, if the mspan still has allocated objects in it, 56 // it is placed on the mcentral free list for the mspan's size 57 // class. 58 // 59 // 3. Otherwise, if all objects in the mspan are free, the mspan's 60 // pages are returned to the mheap and the mspan is now dead. 61 // 62 // Allocating and freeing a large object uses the mheap 63 // directly, bypassing the mcache and mcentral. 64 // 65 // If mspan.needzero is false, then free object slots in the mspan are 66 // already zeroed. Otherwise if needzero is true, objects are zeroed as 67 // they are allocated. There are various benefits to delaying zeroing 68 // this way: 69 // 70 // 1. Stack frame allocation can avoid zeroing altogether. 71 // 72 // 2. It exhibits better temporal locality, since the program is 73 // probably about to write to the memory. 74 // 75 // 3. We don't zero pages that never get reused. 76 77 // Virtual memory layout 78 // 79 // The heap consists of a set of arenas, which are 64MB on 64-bit and 80 // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also 81 // aligned to the arena size. 82 // 83 // Each arena has an associated heapArena object that stores the 84 // metadata for that arena: the heap bitmap for all words in the arena 85 // and the span map for all pages in the arena. heapArena objects are 86 // themselves allocated off-heap. 87 // 88 // Since arenas are aligned, the address space can be viewed as a 89 // series of arena frames. The arena map (mheap_.arenas) maps from 90 // arena frame number to *heapArena, or nil for parts of the address 91 // space not backed by the Go heap. The arena map is structured as a 92 // two-level array consisting of a "L1" arena map and many "L2" arena 93 // maps; however, since arenas are large, on many architectures, the 94 // arena map consists of a single, large L2 map. 95 // 96 // The arena map covers the entire possible address space, allowing 97 // the Go heap to use any part of the address space. The allocator 98 // attempts to keep arenas contiguous so that large spans (and hence 99 // large objects) can cross arenas. 100 101 package runtime 102 103 import ( 104 "internal/goarch" 105 "internal/goexperiment" 106 "internal/goos" 107 "internal/runtime/atomic" 108 "internal/runtime/gc" 109 "internal/runtime/math" 110 "internal/runtime/sys" 111 "unsafe" 112 ) 113 114 const ( 115 maxTinySize = _TinySize 116 tinySizeClass = _TinySizeClass 117 maxSmallSize = gc.MaxSmallSize 118 pageSize = 1 << gc.PageShift 119 pageMask = pageSize - 1 120 121 // Unused. Left for viewcore. 122 _PageSize = pageSize 123 minSizeForMallocHeader = gc.MinSizeForMallocHeader 124 mallocHeaderSize = gc.MallocHeaderSize 125 126 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 127 _64bit = 1 << (^uintptr(0) >> 63) / 2 128 129 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 130 _TinySize = gc.TinySize 131 _TinySizeClass = int8(gc.TinySizeClass) 132 133 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 134 135 // Per-P, per order stack segment cache size. 136 _StackCacheSize = 32 * 1024 137 138 // Number of orders that get caching. Order 0 is FixedStack 139 // and each successive order is twice as large. 140 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 141 // will be allocated directly. 142 // Since FixedStack is different on different systems, we 143 // must vary NumStackOrders to keep the same maximum cached size. 144 // OS | FixedStack | NumStackOrders 145 // -----------------+------------+--------------- 146 // linux/darwin/bsd | 2KB | 4 147 // windows/32 | 4KB | 3 148 // windows/64 | 8KB | 2 149 // plan9 | 4KB | 3 150 _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9 151 152 // heapAddrBits is the number of bits in a heap address. On 153 // amd64, addresses are sign-extended beyond heapAddrBits. On 154 // other arches, they are zero-extended. 155 // 156 // On most 64-bit platforms, we limit this to 48 bits based on a 157 // combination of hardware and OS limitations. 158 // 159 // amd64 hardware limits addresses to 48 bits, sign-extended 160 // to 64 bits. Addresses where the top 16 bits are not either 161 // all 0 or all 1 are "non-canonical" and invalid. Because of 162 // these "negative" addresses, we offset addresses by 1<<47 163 // (arenaBaseOffset) on amd64 before computing indexes into 164 // the heap arenas index. In 2017, amd64 hardware added 165 // support for 57 bit addresses; however, currently only Linux 166 // supports this extension and the kernel will never choose an 167 // address above 1<<47 unless mmap is called with a hint 168 // address above 1<<47 (which we never do). 169 // 170 // arm64 hardware (as of ARMv8) limits user addresses to 48 171 // bits, in the range [0, 1<<48). 172 // 173 // ppc64, mips64, and s390x support arbitrary 64 bit addresses 174 // in hardware. On Linux, Go leans on stricter OS limits. Based 175 // on Linux's processor.h, the user address space is limited as 176 // follows on 64-bit architectures: 177 // 178 // Architecture Name Maximum Value (exclusive) 179 // --------------------------------------------------------------------- 180 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) 181 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) 182 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) 183 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) 184 // s390x TASK_SIZE 1<<64 (64 bit addresses) 185 // 186 // These limits may increase over time, but are currently at 187 // most 48 bits except on s390x. On all architectures, Linux 188 // starts placing mmap'd regions at addresses that are 189 // significantly below 48 bits, so even if it's possible to 190 // exceed Go's 48 bit limit, it's extremely unlikely in 191 // practice. 192 // 193 // On 32-bit platforms, we accept the full 32-bit address 194 // space because doing so is cheap. 195 // mips32 only has access to the low 2GB of virtual memory, so 196 // we further limit it to 31 bits. 197 // 198 // On ios/arm64, although 64-bit pointers are presumably 199 // available, pointers are truncated to 33 bits in iOS <14. 200 // Furthermore, only the top 4 GiB of the address space are 201 // actually available to the application. In iOS >=14, more 202 // of the address space is available, and the OS can now 203 // provide addresses outside of those 33 bits. Pick 40 bits 204 // as a reasonable balance between address space usage by the 205 // page allocator, and flexibility for what mmap'd regions 206 // we'll accept for the heap. We can't just move to the full 207 // 48 bits because this uses too much address space for older 208 // iOS versions. 209 // TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64 210 // to a 48-bit address space like every other arm64 platform. 211 // 212 // WebAssembly currently has a limit of 4GB linear memory. 213 heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64 214 215 // maxAlloc is the maximum size of an allocation. On 64-bit, 216 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On 217 // 32-bit, however, this is one less than 1<<32 because the 218 // number of bytes in the address space doesn't actually fit 219 // in a uintptr. 220 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 221 222 // The number of bits in a heap address, the size of heap 223 // arenas, and the L1 and L2 arena map sizes are related by 224 // 225 // (1 << addr bits) = arena size * L1 entries * L2 entries 226 // 227 // Currently, we balance these as follows: 228 // 229 // Platform Addr bits Arena size L1 entries L2 entries 230 // -------------- --------- ---------- ---------- ----------- 231 // */64-bit 48 64MB 1 4M (32MB) 232 // windows/64-bit 48 4MB 64 1M (8MB) 233 // ios/arm64 40 4MB 1 256K (2MB) 234 // */32-bit 32 4MB 1 1024 (4KB) 235 // */mips(le) 31 4MB 1 512 (2KB) 236 // wasm 32 512KB 1 8192 (64KB) 237 238 // heapArenaBytes is the size of a heap arena. The heap 239 // consists of mappings of size heapArenaBytes, aligned to 240 // heapArenaBytes. The initial heap mapping is one arena. 241 // 242 // This is currently 64MB on 64-bit non-Windows, 4MB on 243 // 32-bit and on Windows, and 512KB on Wasm. We use smaller 244 // arenas on Windows because all committed memory is charged 245 // to the process, even if it's not touched. Hence, for 246 // processes with small heaps, the mapped arena space needs 247 // to be commensurate. This is particularly important with 248 // the race detector, since it significantly amplifies the 249 // cost of committed memory. We use smaller arenas on Wasm 250 // because some Wasm programs have very small heap, and 251 // everything in the Wasm linear memory is charged. 252 heapArenaBytes = 1 << logHeapArenaBytes 253 254 heapArenaWords = heapArenaBytes / goarch.PtrSize 255 256 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, 257 // prefer using heapArenaBytes where possible (we need the 258 // constant to compute some other constants). 259 logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (9+10)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64 260 261 // heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs. 262 heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize) 263 264 pagesPerArena = heapArenaBytes / pageSize 265 266 // arenaL1Bits is the number of bits of the arena number 267 // covered by the first level arena map. 268 // 269 // This number should be small, since the first level arena 270 // map requires PtrSize*(1<<arenaL1Bits) of space in the 271 // binary's BSS. It can be zero, in which case the first level 272 // index is effectively unused. There is a performance benefit 273 // to this, since the generated code can be more efficient, 274 // but comes at the cost of having a large L2 mapping. 275 // 276 // We use the L1 map on 64-bit Windows because the arena size 277 // is small, but the address space is still 48 bits, and 278 // there's a high cost to having a large L2. 279 arenaL1Bits = 6 * (_64bit * goos.IsWindows) 280 281 // arenaL2Bits is the number of bits of the arena number 282 // covered by the second level arena index. 283 // 284 // The size of each arena map allocation is proportional to 285 // 1<<arenaL2Bits, so it's important that this not be too 286 // large. 48 bits leads to 32MB arena index allocations, which 287 // is about the practical threshold. 288 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits 289 290 // arenaL1Shift is the number of bits to shift an arena frame 291 // number by to compute an index into the first level arena map. 292 arenaL1Shift = arenaL2Bits 293 294 // arenaBits is the total bits in a combined arena map index. 295 // This is split between the index into the L1 arena map and 296 // the L2 arena map. 297 arenaBits = arenaL1Bits + arenaL2Bits 298 299 // arenaBaseOffset is the pointer value that corresponds to 300 // index 0 in the heap arena map. 301 // 302 // On amd64, the address space is 48 bits, sign extended to 64 303 // bits. This offset lets us handle "negative" addresses (or 304 // high addresses if viewed as unsigned). 305 // 306 // On aix/ppc64, this offset allows to keep the heapAddrBits to 307 // 48. Otherwise, it would be 60 in order to handle mmap addresses 308 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this 309 // case, the memory reserved in (s *pageAlloc).init for chunks 310 // is causing important slowdowns. 311 // 312 // On other platforms, the user address space is contiguous 313 // and starts at 0, so no offset is necessary. 314 arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix 315 // A typed version of this constant that will make it into DWARF (for viewcore). 316 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset) 317 318 // Max number of threads to run garbage collection. 319 // 2, 3, and 4 are all plausible maximums depending 320 // on the hardware details of the machine. The garbage 321 // collector scales well to 32 cpus. 322 _MaxGcproc = 32 323 324 // minLegalPointer is the smallest possible legal pointer. 325 // This is the smallest possible architectural page size, 326 // since we assume that the first page is never mapped. 327 // 328 // This should agree with minZeroPage in the compiler. 329 minLegalPointer uintptr = 4096 330 331 // minHeapForMetadataHugePages sets a threshold on when certain kinds of 332 // heap metadata, currently the arenas map L2 entries and page alloc bitmap 333 // mappings, are allowed to be backed by huge pages. If the heap goal ever 334 // exceeds this threshold, then huge pages are enabled. 335 // 336 // These numbers are chosen with the assumption that huge pages are on the 337 // order of a few MiB in size. 338 // 339 // The kind of metadata this applies to has a very low overhead when compared 340 // to address space used, but their constant overheads for small heaps would 341 // be very high if they were to be backed by huge pages (e.g. a few MiB makes 342 // a huge difference for an 8 MiB heap, but barely any difference for a 1 GiB 343 // heap). The benefit of huge pages is also not worth it for small heaps, 344 // because only a very, very small part of the metadata is used for small heaps. 345 // 346 // N.B. If the heap goal exceeds the threshold then shrinks to a very small size 347 // again, then huge pages will still be enabled for this mapping. The reason is that 348 // there's no point unless we're also returning the physical memory for these 349 // metadata mappings back to the OS. That would be quite complex to do in general 350 // as the heap is likely fragmented after a reduction in heap size. 351 minHeapForMetadataHugePages = 1 << 30 352 353 // randomizeHeapBase indicates if the heap base address should be randomized. 354 // See comment in mallocinit for how the randomization is performed. 355 randomizeHeapBase = goexperiment.RandomizedHeapBase64 && goarch.PtrSize == 8 && !isSbrkPlatform && !raceenabled && !msanenabled && !asanenabled 356 357 // randHeapBasePrefixMask is used to extract the top byte of the randomized 358 // heap base address. 359 randHeapBasePrefixMask = ^uintptr(0xff << (heapAddrBits - 8)) 360 ) 361 362 // physPageSize is the size in bytes of the OS's physical pages. 363 // Mapping and unmapping operations must be done at multiples of 364 // physPageSize. 365 // 366 // This must be set by the OS init code (typically in osinit) before 367 // mallocinit. 368 var physPageSize uintptr 369 370 // physHugePageSize is the size in bytes of the OS's default physical huge 371 // page size whose allocation is opaque to the application. It is assumed 372 // and verified to be a power of two. 373 // 374 // If set, this must be set by the OS init code (typically in osinit) before 375 // mallocinit. However, setting it at all is optional, and leaving the default 376 // value is always safe (though potentially less efficient). 377 // 378 // Since physHugePageSize is always assumed to be a power of two, 379 // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. 380 // The purpose of physHugePageShift is to avoid doing divisions in 381 // performance critical functions. 382 var ( 383 physHugePageSize uintptr 384 physHugePageShift uint 385 ) 386 387 var ( 388 // heapRandSeed is a random value that is populated in mallocinit if 389 // randomizeHeapBase is set. It is used in mallocinit, and mheap.grow, to 390 // randomize the base heap address. 391 heapRandSeed uintptr 392 heapRandSeedBitsRemaining int 393 ) 394 395 func nextHeapRandBits(bits int) uintptr { 396 if bits > heapRandSeedBitsRemaining { 397 throw("not enough heapRandSeed bits remaining") 398 } 399 r := heapRandSeed >> (64 - bits) 400 heapRandSeed <<= bits 401 heapRandSeedBitsRemaining -= bits 402 return r 403 } 404 405 func mallocinit() { 406 if gc.SizeClassToSize[tinySizeClass] != maxTinySize { 407 throw("bad TinySizeClass") 408 } 409 410 if heapArenaBitmapWords&(heapArenaBitmapWords-1) != 0 { 411 // heapBits expects modular arithmetic on bitmap 412 // addresses to work. 413 throw("heapArenaBitmapWords not a power of 2") 414 } 415 416 // Check physPageSize. 417 if physPageSize == 0 { 418 // The OS init code failed to fetch the physical page size. 419 throw("failed to get system page size") 420 } 421 if physPageSize > maxPhysPageSize { 422 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n") 423 throw("bad system page size") 424 } 425 if physPageSize < minPhysPageSize { 426 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 427 throw("bad system page size") 428 } 429 if physPageSize&(physPageSize-1) != 0 { 430 print("system page size (", physPageSize, ") must be a power of 2\n") 431 throw("bad system page size") 432 } 433 if physHugePageSize&(physHugePageSize-1) != 0 { 434 print("system huge page size (", physHugePageSize, ") must be a power of 2\n") 435 throw("bad system huge page size") 436 } 437 if physHugePageSize > maxPhysHugePageSize { 438 // physHugePageSize is greater than the maximum supported huge page size. 439 // Don't throw here, like in the other cases, since a system configured 440 // in this way isn't wrong, we just don't have the code to support them. 441 // Instead, silently set the huge page size to zero. 442 physHugePageSize = 0 443 } 444 if physHugePageSize != 0 { 445 // Since physHugePageSize is a power of 2, it suffices to increase 446 // physHugePageShift until 1<<physHugePageShift == physHugePageSize. 447 for 1<<physHugePageShift != physHugePageSize { 448 physHugePageShift++ 449 } 450 } 451 if pagesPerArena%pagesPerSpanRoot != 0 { 452 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n") 453 throw("bad pagesPerSpanRoot") 454 } 455 if pagesPerArena%pagesPerReclaimerChunk != 0 { 456 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n") 457 throw("bad pagesPerReclaimerChunk") 458 } 459 // Check that the minimum size (exclusive) for a malloc header is also 460 // a size class boundary. This is important to making sure checks align 461 // across different parts of the runtime. 462 // 463 // While we're here, also check to make sure all these size classes' 464 // span sizes are one page. Some code relies on this. 465 minSizeForMallocHeaderIsSizeClass := false 466 sizeClassesUpToMinSizeForMallocHeaderAreOnePage := true 467 for i := 0; i < len(gc.SizeClassToSize); i++ { 468 if gc.SizeClassToNPages[i] > 1 { 469 sizeClassesUpToMinSizeForMallocHeaderAreOnePage = false 470 } 471 if gc.MinSizeForMallocHeader == uintptr(gc.SizeClassToSize[i]) { 472 minSizeForMallocHeaderIsSizeClass = true 473 break 474 } 475 } 476 if !minSizeForMallocHeaderIsSizeClass { 477 throw("min size of malloc header is not a size class boundary") 478 } 479 if !sizeClassesUpToMinSizeForMallocHeaderAreOnePage { 480 throw("expected all size classes up to min size for malloc header to fit in one-page spans") 481 } 482 // Check that the pointer bitmap for all small sizes without a malloc header 483 // fits in a word. 484 if gc.MinSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize { 485 throw("max pointer/scan bitmap size for headerless objects is too large") 486 } 487 488 if minTagBits > tagBits { 489 throw("tagBits too small") 490 } 491 492 // Initialize the heap. 493 mheap_.init() 494 mcache0 = allocmcache() 495 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas) 496 lockInit(&profInsertLock, lockRankProfInsert) 497 lockInit(&profBlockLock, lockRankProfBlock) 498 lockInit(&profMemActiveLock, lockRankProfMemActive) 499 for i := range profMemFutureLock { 500 lockInit(&profMemFutureLock[i], lockRankProfMemFuture) 501 } 502 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc) 503 504 // Create initial arena growth hints. 505 if isSbrkPlatform { 506 // Don't generate hints on sbrk platforms. We can 507 // only grow the break sequentially. 508 } else if goarch.PtrSize == 8 { 509 // On a 64-bit machine, we pick the following hints 510 // because: 511 // 512 // 1. Starting from the middle of the address space 513 // makes it easier to grow out a contiguous range 514 // without running in to some other mapping. 515 // 516 // 2. This makes Go heap addresses more easily 517 // recognizable when debugging. 518 // 519 // 3. Stack scanning in gccgo is still conservative, 520 // so it's important that addresses be distinguishable 521 // from other data. 522 // 523 // Starting at 0x00c0 means that the valid memory addresses 524 // will begin 0x00c0, 0x00c1, ... 525 // In little-endian, that's c0 00, c1 00, ... None of those are valid 526 // UTF-8 sequences, and they are otherwise as far away from 527 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 528 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 529 // on OS X during thread allocations. 0x00c0 causes conflicts with 530 // AddressSanitizer which reserves all memory up to 0x0100. 531 // These choices reduce the odds of a conservative garbage collector 532 // not collecting memory because some non-pointer block of memory 533 // had a bit pattern that matched a memory address. 534 // 535 // However, on arm64, we ignore all this advice above and slam the 536 // allocation at 0x40 << 32 because when using 4k pages with 3-level 537 // translation buffers, the user address space is limited to 39 bits 538 // On ios/arm64, the address space is even smaller. 539 // 540 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit. 541 // processes. 542 // 543 // Space mapped for user arenas comes immediately after the range 544 // originally reserved for the regular heap when race mode is not 545 // enabled because user arena chunks can never be used for regular heap 546 // allocations and we want to avoid fragmenting the address space. 547 // 548 // In race mode we have no choice but to just use the same hints because 549 // the race detector requires that the heap be mapped contiguously. 550 // 551 // If randomizeHeapBase is set, we attempt to randomize the base address 552 // as much as possible. We do this by generating a random uint64 via 553 // bootstrapRand and using it's bits to randomize portions of the base 554 // address as follows: 555 // * We first generate a random heapArenaBytes aligned address that we use for 556 // generating the hints. 557 // * On the first call to mheap.grow, we then generate a random PallocChunkBytes 558 // aligned offset into the mmap'd heap region, which we use as the base for 559 // the heap region. 560 // * We then select a page offset in that PallocChunkBytes region to start the 561 // heap at, and mark all the pages up to that offset as allocated. 562 // 563 // Our final randomized "heap base address" becomes the first byte of 564 // the first available page returned by the page allocator. This results 565 // in an address with at least heapAddrBits-gc.PageShift-2-(1*goarch.IsAmd64) 566 // bits of entropy. 567 568 var randHeapBase uintptr 569 var randHeapBasePrefix byte 570 // heapAddrBits is 48 on most platforms, but we only use 47 of those 571 // bits in order to provide a good amount of room for the heap to grow 572 // contiguously. On amd64, there are 48 bits, but the top bit is sign 573 // extended, so we throw away another bit, just to be safe. 574 randHeapAddrBits := heapAddrBits - 1 - (goarch.IsAmd64 * 1) 575 if randomizeHeapBase { 576 // Generate a random value, and take the bottom heapAddrBits-logHeapArenaBytes 577 // bits, using them as the top bits for randHeapBase. 578 heapRandSeed, heapRandSeedBitsRemaining = uintptr(bootstrapRand()), 64 579 580 topBits := (randHeapAddrBits - logHeapArenaBytes) 581 randHeapBase = nextHeapRandBits(topBits) << (randHeapAddrBits - topBits) 582 randHeapBase = alignUp(randHeapBase, heapArenaBytes) 583 randHeapBasePrefix = byte(randHeapBase >> (randHeapAddrBits - 8)) 584 } 585 586 var vmaSize int 587 if GOARCH == "riscv64" { 588 // Identify which memory layout is in use based on the system 589 // stack address, knowing that the bottom half of virtual memory 590 // is user space. This should result in 39, 48 or 57. It may be 591 // possible to use RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS at some 592 // point in the future - for now use the system stack address. 593 vmaSize = sys.Len64(uint64(getg().m.g0.stack.hi)) + 1 594 if raceenabled && vmaSize != 39 && vmaSize != 48 { 595 println("vma size = ", vmaSize) 596 throw("riscv64 vma size is unknown and race mode is enabled") 597 } 598 } 599 600 for i := 0x7f; i >= 0; i-- { 601 var p uintptr 602 switch { 603 case raceenabled && GOARCH == "riscv64" && vmaSize == 39: 604 p = uintptr(i)<<28 | uintptrMask&(0x0013<<28) 605 if p >= uintptrMask&0x000f00000000 { 606 continue 607 } 608 case raceenabled: 609 // The TSAN runtime requires the heap 610 // to be in the range [0x00c000000000, 611 // 0x00e000000000). 612 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32) 613 if p >= uintptrMask&0x00e000000000 { 614 continue 615 } 616 case randomizeHeapBase: 617 prefix := uintptr(randHeapBasePrefix+byte(i)) << (randHeapAddrBits - 8) 618 p = prefix | (randHeapBase & randHeapBasePrefixMask) 619 case GOARCH == "arm64" && GOOS == "ios": 620 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 621 case GOARCH == "arm64": 622 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 623 case GOARCH == "riscv64" && vmaSize == 39: 624 p = uintptr(i)<<32 | uintptrMask&(0x0013<<28) 625 case GOOS == "aix": 626 if i == 0 { 627 // We don't use addresses directly after 0x0A00000000000000 628 // to avoid collisions with others mmaps done by non-go programs. 629 continue 630 } 631 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) 632 default: 633 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 634 } 635 // Switch to generating hints for user arenas if we've gone 636 // through about half the hints. In race mode, take only about 637 // a quarter; we don't have very much space to work with. 638 hintList := &mheap_.arenaHints 639 if (!raceenabled && i > 0x3f) || (raceenabled && i > 0x5f) { 640 hintList = &mheap_.userArena.arenaHints 641 } 642 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 643 hint.addr = p 644 hint.next, *hintList = *hintList, hint 645 } 646 } else { 647 // On a 32-bit machine, we're much more concerned 648 // about keeping the usable heap contiguous. 649 // Hence: 650 // 651 // 1. We reserve space for all heapArenas up front so 652 // they don't get interleaved with the heap. They're 653 // ~258MB, so this isn't too bad. (We could reserve a 654 // smaller amount of space up front if this is a 655 // problem.) 656 // 657 // 2. We hint the heap to start right above the end of 658 // the binary so we have the best chance of keeping it 659 // contiguous. 660 // 661 // 3. We try to stake out a reasonably large initial 662 // heap reservation. 663 664 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) 665 meta := uintptr(sysReserve(nil, arenaMetaSize, "heap reservation")) 666 if meta != 0 { 667 mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true) 668 } 669 670 // We want to start the arena low, but if we're linked 671 // against C code, it's possible global constructors 672 // have called malloc and adjusted the process' brk. 673 // Query the brk so we can avoid trying to map the 674 // region over it (which will cause the kernel to put 675 // the region somewhere else, likely at a high 676 // address). 677 procBrk := sbrk0() 678 679 // If we ask for the end of the data segment but the 680 // operating system requires a little more space 681 // before we can start allocating, it will give out a 682 // slightly higher pointer. Except QEMU, which is 683 // buggy, as usual: it won't adjust the pointer 684 // upward. So adjust it upward a little bit ourselves: 685 // 1/4 MB to get away from the running binary image. 686 p := firstmoduledata.end 687 if p < procBrk { 688 p = procBrk 689 } 690 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end { 691 p = mheap_.heapArenaAlloc.end 692 } 693 p = alignUp(p+(256<<10), heapArenaBytes) 694 // Because we're worried about fragmentation on 695 // 32-bit, we try to make a large initial reservation. 696 arenaSizes := []uintptr{ 697 512 << 20, 698 256 << 20, 699 128 << 20, 700 } 701 for _, arenaSize := range arenaSizes { 702 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes, "heap reservation") 703 if a != nil { 704 mheap_.arena.init(uintptr(a), size, false) 705 p = mheap_.arena.end // For hint below 706 break 707 } 708 } 709 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 710 hint.addr = p 711 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 712 713 // Place the hint for user arenas just after the large reservation. 714 // 715 // While this potentially competes with the hint above, in practice we probably 716 // aren't going to be getting this far anyway on 32-bit platforms. 717 userArenaHint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 718 userArenaHint.addr = p 719 userArenaHint.next, mheap_.userArena.arenaHints = mheap_.userArena.arenaHints, userArenaHint 720 } 721 // Initialize the memory limit here because the allocator is going to look at it 722 // but we haven't called gcinit yet and we're definitely going to allocate memory before then. 723 gcController.memoryLimit.Store(math.MaxInt64) 724 } 725 726 // sysAlloc allocates heap arena space for at least n bytes. The 727 // returned pointer is always heapArenaBytes-aligned and backed by 728 // h.arenas metadata. The returned size is always a multiple of 729 // heapArenaBytes. sysAlloc returns nil on failure. 730 // There is no corresponding free function. 731 // 732 // hintList is a list of hint addresses for where to allocate new 733 // heap arenas. It must be non-nil. 734 // 735 // sysAlloc returns a memory region in the Reserved state. This region must 736 // be transitioned to Prepared and then Ready before use. 737 // 738 // arenaList is the list the arena should be added to. 739 // 740 // h must be locked. 741 func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx) (v unsafe.Pointer, size uintptr) { 742 assertLockHeld(&h.lock) 743 744 n = alignUp(n, heapArenaBytes) 745 746 if hintList == &h.arenaHints { 747 // First, try the arena pre-reservation. 748 // Newly-used mappings are considered released. 749 // 750 // Only do this if we're using the regular heap arena hints. 751 // This behavior is only for the heap. 752 v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased, "heap") 753 if v != nil { 754 size = n 755 goto mapped 756 } 757 } 758 759 // Try to grow the heap at a hint address. 760 for *hintList != nil { 761 hint := *hintList 762 p := hint.addr 763 if hint.down { 764 p -= n 765 } 766 if p+n < p { 767 // We can't use this, so don't ask. 768 v = nil 769 } else if arenaIndex(p+n-1) >= 1<<arenaBits { 770 // Outside addressable heap. Can't use. 771 v = nil 772 } else { 773 v = sysReserve(unsafe.Pointer(p), n, "heap reservation") 774 } 775 if p == uintptr(v) { 776 // Success. Update the hint. 777 if !hint.down { 778 p += n 779 } 780 hint.addr = p 781 size = n 782 break 783 } 784 // Failed. Discard this hint and try the next. 785 // 786 // TODO: This would be cleaner if sysReserve could be 787 // told to only return the requested address. In 788 // particular, this is already how Windows behaves, so 789 // it would simplify things there. 790 if v != nil { 791 sysUnreserve(v, n) 792 } 793 *hintList = hint.next 794 h.arenaHintAlloc.free(unsafe.Pointer(hint)) 795 } 796 797 if size == 0 { 798 if raceenabled { 799 // The race detector assumes the heap lives in 800 // [0x00c000000000, 0x00e000000000), but we 801 // just ran out of hints in this region. Give 802 // a nice failure. 803 throw("too many address space collisions for -race mode") 804 } 805 806 // All of the hints failed, so we'll take any 807 // (sufficiently aligned) address the kernel will give 808 // us. 809 v, size = sysReserveAligned(nil, n, heapArenaBytes, "heap") 810 if v == nil { 811 return nil, 0 812 } 813 814 // Create new hints for extending this region. 815 hint := (*arenaHint)(h.arenaHintAlloc.alloc()) 816 hint.addr, hint.down = uintptr(v), true 817 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 818 hint = (*arenaHint)(h.arenaHintAlloc.alloc()) 819 hint.addr = uintptr(v) + size 820 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 821 } 822 823 // Check for bad pointers or pointers we can't use. 824 { 825 var bad string 826 p := uintptr(v) 827 if p+size < p { 828 bad = "region exceeds uintptr range" 829 } else if arenaIndex(p) >= 1<<arenaBits { 830 bad = "base outside usable address space" 831 } else if arenaIndex(p+size-1) >= 1<<arenaBits { 832 bad = "end outside usable address space" 833 } 834 if bad != "" { 835 // This should be impossible on most architectures, 836 // but it would be really confusing to debug. 837 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n") 838 throw("memory reservation exceeds address space limit") 839 } 840 } 841 842 if uintptr(v)&(heapArenaBytes-1) != 0 { 843 throw("misrounded allocation in sysAlloc") 844 } 845 846 mapped: 847 if valgrindenabled { 848 valgrindCreateMempool(v) 849 valgrindMakeMemNoAccess(v, size) 850 } 851 852 // Create arena metadata. 853 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ { 854 l2 := h.arenas[ri.l1()] 855 if l2 == nil { 856 // Allocate an L2 arena map. 857 // 858 // Use sysAllocOS instead of sysAlloc or persistentalloc because there's no 859 // statistic we can comfortably account for this space in. With this structure, 860 // we rely on demand paging to avoid large overheads, but tracking which memory 861 // is paged in is too expensive. Trying to account for the whole region means 862 // that it will appear like an enormous memory overhead in statistics, even though 863 // it is not. 864 l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2), "heap index")) 865 if l2 == nil { 866 throw("out of memory allocating heap arena map") 867 } 868 if h.arenasHugePages { 869 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 870 } else { 871 sysNoHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 872 } 873 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2)) 874 } 875 876 if l2[ri.l2()] != nil { 877 throw("arena already initialized") 878 } 879 var r *heapArena 880 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys, "heap metadata")) 881 if r == nil { 882 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys)) 883 if r == nil { 884 throw("out of memory allocating heap arena metadata") 885 } 886 } 887 888 // Register the arena in allArenas if requested. 889 if len((*arenaList)) == cap((*arenaList)) { 890 size := 2 * uintptr(cap((*arenaList))) * goarch.PtrSize 891 if size == 0 { 892 size = physPageSize 893 } 894 newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys)) 895 if newArray == nil { 896 throw("out of memory allocating allArenas") 897 } 898 oldSlice := (*arenaList) 899 *(*notInHeapSlice)(unsafe.Pointer(&(*arenaList))) = notInHeapSlice{newArray, len((*arenaList)), int(size / goarch.PtrSize)} 900 copy((*arenaList), oldSlice) 901 // Do not free the old backing array because 902 // there may be concurrent readers. Since we 903 // double the array each time, this can lead 904 // to at most 2x waste. 905 } 906 (*arenaList) = (*arenaList)[:len((*arenaList))+1] 907 (*arenaList)[len((*arenaList))-1] = ri 908 909 // Store atomically just in case an object from the 910 // new heap arena becomes visible before the heap lock 911 // is released (which shouldn't happen, but there's 912 // little downside to this). 913 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r)) 914 } 915 916 // Tell the race detector about the new heap memory. 917 if raceenabled { 918 racemapshadow(v, size) 919 } 920 921 return 922 } 923 924 // enableMetadataHugePages enables huge pages for various sources of heap metadata. 925 // 926 // A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant 927 // time, but may take time proportional to the size of the mapped heap beyond that. 928 // 929 // This function is idempotent. 930 // 931 // The heap lock must not be held over this operation, since it will briefly acquire 932 // the heap lock. 933 // 934 // Must be called on the system stack because it acquires the heap lock. 935 // 936 //go:systemstack 937 func (h *mheap) enableMetadataHugePages() { 938 // Enable huge pages for page structure. 939 h.pages.enableChunkHugePages() 940 941 // Grab the lock and set arenasHugePages if it's not. 942 // 943 // Once arenasHugePages is set, all new L2 entries will be eligible for 944 // huge pages. We'll set all the old entries after we release the lock. 945 lock(&h.lock) 946 if h.arenasHugePages { 947 unlock(&h.lock) 948 return 949 } 950 h.arenasHugePages = true 951 unlock(&h.lock) 952 953 // N.B. The arenas L1 map is quite small on all platforms, so it's fine to 954 // just iterate over the whole thing. 955 for i := range h.arenas { 956 l2 := (*[1 << arenaL2Bits]*heapArena)(atomic.Loadp(unsafe.Pointer(&h.arenas[i]))) 957 if l2 == nil { 958 continue 959 } 960 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 961 } 962 } 963 964 // base address for all 0-byte allocations 965 var zerobase uintptr 966 967 // nextFreeFast returns the next free object if one is quickly available. 968 // Otherwise it returns 0. 969 func nextFreeFast(s *mspan) gclinkptr { 970 theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? 971 if theBit < 64 { 972 result := s.freeindex + uint16(theBit) 973 if result < s.nelems { 974 freeidx := result + 1 975 if freeidx%64 == 0 && freeidx != s.nelems { 976 return 0 977 } 978 s.allocCache >>= uint(theBit + 1) 979 s.freeindex = freeidx 980 s.allocCount++ 981 return gclinkptr(uintptr(result)*s.elemsize + s.base()) 982 } 983 } 984 return 0 985 } 986 987 // nextFree returns the next free object from the cached span if one is available. 988 // Otherwise it refills the cache with a span with an available object and 989 // returns that object along with a flag indicating that this was a heavy 990 // weight allocation. If it is a heavy weight allocation the caller must 991 // determine whether a new GC cycle needs to be started or if the GC is active 992 // whether this goroutine needs to assist the GC. 993 // 994 // Must run in a non-preemptible context since otherwise the owner of 995 // c could change. 996 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger bool) { 997 s = c.alloc[spc] 998 checkGCTrigger = false 999 freeIndex := s.nextFreeIndex() 1000 if freeIndex == s.nelems { 1001 // The span is full. 1002 if s.allocCount != s.nelems { 1003 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 1004 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 1005 } 1006 c.refill(spc) 1007 checkGCTrigger = true 1008 s = c.alloc[spc] 1009 1010 freeIndex = s.nextFreeIndex() 1011 } 1012 1013 if freeIndex >= s.nelems { 1014 throw("freeIndex is not valid") 1015 } 1016 1017 v = gclinkptr(uintptr(freeIndex)*s.elemsize + s.base()) 1018 s.allocCount++ 1019 if s.allocCount > s.nelems { 1020 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 1021 throw("s.allocCount > s.nelems") 1022 } 1023 return 1024 } 1025 1026 // doubleCheckMalloc enables a bunch of extra checks to malloc to double-check 1027 // that various invariants are upheld. 1028 // 1029 // We might consider turning these on by default; many of them previously were. 1030 // They account for a few % of mallocgc's cost though, which does matter somewhat 1031 // at scale. (When testing changes to malloc, consider enabling this, and also 1032 // some function-local 'doubleCheck' consts such as in mbitmap.go currently.) 1033 const doubleCheckMalloc = false 1034 1035 // sizeSpecializedMallocEnabled is the set of conditions where we enable the size-specialized 1036 // mallocgc implementation: the experiment must be enabled, and none of the sanitizers should 1037 // be enabled. The tables used to select the size-specialized malloc function do not compile 1038 // properly on plan9, so size-specialized malloc is also disabled on plan9. 1039 const sizeSpecializedMallocEnabled = goexperiment.SizeSpecializedMalloc && GOOS != "plan9" && !asanenabled && !raceenabled && !msanenabled && !valgrindenabled 1040 1041 // runtimeFreegcEnabled is the set of conditions where we enable the runtime.freegc 1042 // implementation and the corresponding allocation-related changes: the experiment must be 1043 // enabled, and none of the memory sanitizers should be enabled. We allow the race detector, 1044 // in contrast to sizeSpecializedMallocEnabled. 1045 // TODO(thepudds): it would be nice to check Valgrind integration, though there are some hints 1046 // there might not be any canned tests in tree for Go's integration with Valgrind. 1047 const runtimeFreegcEnabled = goexperiment.RuntimeFreegc && !asanenabled && !msanenabled && !valgrindenabled 1048 1049 // Allocate an object of size bytes. 1050 // Small objects are allocated from the per-P cache's free lists. 1051 // Large objects (> 32 kB) are allocated straight from the heap. 1052 // 1053 // mallocgc should be an internal detail, 1054 // but widely used packages access it using linkname. 1055 // Notable members of the hall of shame include: 1056 // - github.com/bytedance/gopkg 1057 // - github.com/bytedance/sonic 1058 // - github.com/cloudwego/frugal 1059 // - github.com/cockroachdb/cockroach 1060 // - github.com/cockroachdb/pebble 1061 // - github.com/ugorji/go/codec 1062 // 1063 // Do not remove or change the type signature. 1064 // See go.dev/issue/67401. 1065 // 1066 //go:linkname mallocgc 1067 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 1068 if doubleCheckMalloc { 1069 if gcphase == _GCmarktermination { 1070 throw("mallocgc called with gcphase == _GCmarktermination") 1071 } 1072 } 1073 1074 // Short-circuit zero-sized allocation requests. 1075 if size == 0 { 1076 return unsafe.Pointer(&zerobase) 1077 } 1078 1079 if sizeSpecializedMallocEnabled && heapBitsInSpan(size) { 1080 if typ == nil || !typ.Pointers() { 1081 return mallocNoScanTable[size](size, typ, needzero) 1082 } else { 1083 if !needzero { 1084 throw("objects with pointers must be zeroed") 1085 } 1086 return mallocScanTable[size](size, typ, needzero) 1087 } 1088 } 1089 1090 // It's possible for any malloc to trigger sweeping, which may in 1091 // turn queue finalizers. Record this dynamic lock edge. 1092 // N.B. Compiled away if lockrank experiment is not enabled. 1093 lockRankMayQueueFinalizer() 1094 1095 // Pre-malloc debug hooks. 1096 if debug.malloc { 1097 if x := preMallocgcDebug(size, typ); x != nil { 1098 return x 1099 } 1100 } 1101 1102 // For ASAN, we allocate extra memory around each allocation called the "redzone." 1103 // These "redzones" are marked as unaddressable. 1104 var asanRZ uintptr 1105 if asanenabled { 1106 asanRZ = redZoneSize(size) 1107 size += asanRZ 1108 } 1109 1110 // Assist the GC if needed. (On the reuse path, we currently compensate for this; 1111 // changes here might require changes there.) 1112 if gcBlackenEnabled != 0 { 1113 deductAssistCredit(size) 1114 } 1115 1116 // Actually do the allocation. 1117 var x unsafe.Pointer 1118 var elemsize uintptr 1119 if sizeSpecializedMallocEnabled { 1120 // we know that heapBitsInSpan is false. 1121 if size <= maxSmallSize-gc.MallocHeaderSize { 1122 if typ == nil || !typ.Pointers() { 1123 x, elemsize = mallocgcSmallNoscan(size, typ, needzero) 1124 } else { 1125 if !needzero { 1126 throw("objects with pointers must be zeroed") 1127 } 1128 x, elemsize = mallocgcSmallScanHeader(size, typ) 1129 } 1130 } else { 1131 x, elemsize = mallocgcLarge(size, typ, needzero) 1132 } 1133 } else { 1134 if size <= maxSmallSize-gc.MallocHeaderSize { 1135 if typ == nil || !typ.Pointers() { 1136 // tiny allocations might be kept alive by other co-located values. 1137 // Make sure secret allocations get zeroed by avoiding the tiny allocator 1138 // See go.dev/issue/76356 1139 gp := getg() 1140 if size < maxTinySize && gp.secret == 0 { 1141 x, elemsize = mallocgcTiny(size, typ) 1142 } else { 1143 x, elemsize = mallocgcSmallNoscan(size, typ, needzero) 1144 } 1145 } else { 1146 if !needzero { 1147 throw("objects with pointers must be zeroed") 1148 } 1149 if heapBitsInSpan(size) { 1150 x, elemsize = mallocgcSmallScanNoHeader(size, typ) 1151 } else { 1152 x, elemsize = mallocgcSmallScanHeader(size, typ) 1153 } 1154 } 1155 } else { 1156 x, elemsize = mallocgcLarge(size, typ, needzero) 1157 } 1158 } 1159 1160 gp := getg() 1161 if goexperiment.RuntimeSecret && gp.secret > 0 { 1162 // Mark any object allocated while in secret mode as secret. 1163 // This ensures we zero it immediately when freeing it. 1164 addSecret(x, size) 1165 } 1166 1167 // Notify sanitizers, if enabled. 1168 if raceenabled { 1169 racemalloc(x, size-asanRZ) 1170 } 1171 if msanenabled { 1172 msanmalloc(x, size-asanRZ) 1173 } 1174 if asanenabled { 1175 // Poison the space between the end of the requested size of x 1176 // and the end of the slot. Unpoison the requested allocation. 1177 frag := elemsize - size 1178 if typ != nil && typ.Pointers() && !heapBitsInSpan(elemsize) && size <= maxSmallSize-gc.MallocHeaderSize { 1179 frag -= gc.MallocHeaderSize 1180 } 1181 asanpoison(unsafe.Add(x, size-asanRZ), asanRZ) 1182 asanunpoison(x, size-asanRZ) 1183 } 1184 if valgrindenabled { 1185 valgrindMalloc(x, size-asanRZ) 1186 } 1187 1188 // Adjust our GC assist debt to account for internal fragmentation. 1189 if gcBlackenEnabled != 0 && elemsize != 0 { 1190 if assistG := getg().m.curg; assistG != nil { 1191 assistG.gcAssistBytes -= int64(elemsize - size) 1192 } 1193 } 1194 1195 // Post-malloc debug hooks. 1196 if debug.malloc { 1197 postMallocgcDebug(x, elemsize, typ) 1198 } 1199 return x 1200 } 1201 1202 func mallocgcTiny(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 1203 // Set mp.mallocing to keep from being preempted by GC. 1204 mp := acquirem() 1205 if doubleCheckMalloc { 1206 if mp.mallocing != 0 { 1207 throw("malloc deadlock") 1208 } 1209 if mp.gsignal == getg() { 1210 throw("malloc during signal") 1211 } 1212 if typ != nil && typ.Pointers() { 1213 throw("expected noscan for tiny alloc") 1214 } 1215 } 1216 mp.mallocing = 1 1217 1218 // Tiny allocator. 1219 // 1220 // Tiny allocator combines several tiny allocation requests 1221 // into a single memory block. The resulting memory block 1222 // is freed when all subobjects are unreachable. The subobjects 1223 // must be noscan (don't have pointers), this ensures that 1224 // the amount of potentially wasted memory is bounded. 1225 // 1226 // Size of the memory block used for combining (maxTinySize) is tunable. 1227 // Current setting is 16 bytes, which relates to 2x worst case memory 1228 // wastage (when all but one subobjects are unreachable). 1229 // 8 bytes would result in no wastage at all, but provides less 1230 // opportunities for combining. 1231 // 32 bytes provides more opportunities for combining, 1232 // but can lead to 4x worst case wastage. 1233 // The best case winning is 8x regardless of block size. 1234 // 1235 // Objects obtained from tiny allocator must not be freed explicitly. 1236 // So when an object will be freed explicitly, we ensure that 1237 // its size >= maxTinySize. 1238 // 1239 // SetFinalizer has a special case for objects potentially coming 1240 // from tiny allocator, it such case it allows to set finalizers 1241 // for an inner byte of a memory block. 1242 // 1243 // The main targets of tiny allocator are small strings and 1244 // standalone escaping variables. On a json benchmark 1245 // the allocator reduces number of allocations by ~12% and 1246 // reduces heap size by ~20%. 1247 c := getMCache(mp) 1248 off := c.tinyoffset 1249 // Align tiny pointer for required (conservative) alignment. 1250 if size&7 == 0 { 1251 off = alignUp(off, 8) 1252 } else if goarch.PtrSize == 4 && size == 12 { 1253 // Conservatively align 12-byte objects to 8 bytes on 32-bit 1254 // systems so that objects whose first field is a 64-bit 1255 // value is aligned to 8 bytes and does not cause a fault on 1256 // atomic access. See issue 37262. 1257 // TODO(mknyszek): Remove this workaround if/when issue 36606 1258 // is resolved. 1259 off = alignUp(off, 8) 1260 } else if size&3 == 0 { 1261 off = alignUp(off, 4) 1262 } else if size&1 == 0 { 1263 off = alignUp(off, 2) 1264 } 1265 if off+size <= maxTinySize && c.tiny != 0 { 1266 // The object fits into existing tiny block. 1267 x := unsafe.Pointer(c.tiny + off) 1268 c.tinyoffset = off + size 1269 c.tinyAllocs++ 1270 mp.mallocing = 0 1271 releasem(mp) 1272 return x, 0 1273 } 1274 // Allocate a new maxTinySize block. 1275 checkGCTrigger := false 1276 span := c.alloc[tinySpanClass] 1277 v := nextFreeFast(span) 1278 if v == 0 { 1279 v, span, checkGCTrigger = c.nextFree(tinySpanClass) 1280 } 1281 x := unsafe.Pointer(v) 1282 (*[2]uint64)(x)[0] = 0 // Always zero 1283 (*[2]uint64)(x)[1] = 0 1284 // See if we need to replace the existing tiny block with the new one 1285 // based on amount of remaining free space. 1286 if !raceenabled && (size < c.tinyoffset || c.tiny == 0) { 1287 // Note: disabled when race detector is on, see comment near end of this function. 1288 c.tiny = uintptr(x) 1289 c.tinyoffset = size 1290 } 1291 1292 // Ensure that the stores above that initialize x to 1293 // type-safe memory and set the heap bits occur before 1294 // the caller can make x observable to the garbage 1295 // collector. Otherwise, on weakly ordered machines, 1296 // the garbage collector could follow a pointer to x, 1297 // but see uninitialized memory or stale heap bits. 1298 publicationBarrier() 1299 1300 if writeBarrier.enabled { 1301 // Allocate black during GC. 1302 // All slots hold nil so no scanning is needed. 1303 // This may be racing with GC so do it atomically if there can be 1304 // a race marking the bit. 1305 gcmarknewobject(span, uintptr(x)) 1306 } else { 1307 // Track the last free index before the mark phase. This field 1308 // is only used by the garbage collector. During the mark phase 1309 // this is used by the conservative scanner to filter out objects 1310 // that are both free and recently-allocated. It's safe to do that 1311 // because we allocate-black if the GC is enabled. The conservative 1312 // scanner produces pointers out of thin air, so without additional 1313 // synchronization it might otherwise observe a partially-initialized 1314 // object, which could crash the program. 1315 span.freeIndexForScan = span.freeindex 1316 } 1317 1318 // Note cache c only valid while m acquired; see #47302 1319 // 1320 // N.B. Use the full size because that matches how the GC 1321 // will update the mem profile on the "free" side. 1322 // 1323 // TODO(mknyszek): We should really count the header as part 1324 // of gc_sys or something. The code below just pretends it is 1325 // internal fragmentation and matches the GC's accounting by 1326 // using the whole allocation slot. 1327 c.nextSample -= int64(span.elemsize) 1328 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1329 profilealloc(mp, x, span.elemsize) 1330 } 1331 mp.mallocing = 0 1332 releasem(mp) 1333 1334 if checkGCTrigger { 1335 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1336 gcStart(t) 1337 } 1338 } 1339 1340 if raceenabled { 1341 // Pad tinysize allocations so they are aligned with the end 1342 // of the tinyalloc region. This ensures that any arithmetic 1343 // that goes off the top end of the object will be detectable 1344 // by checkptr (issue 38872). 1345 // Note that we disable tinyalloc when raceenabled for this to work. 1346 // TODO: This padding is only performed when the race detector 1347 // is enabled. It would be nice to enable it if any package 1348 // was compiled with checkptr, but there's no easy way to 1349 // detect that (especially at compile time). 1350 // TODO: enable this padding for all allocations, not just 1351 // tinyalloc ones. It's tricky because of pointer maps. 1352 // Maybe just all noscan objects? 1353 x = add(x, span.elemsize-size) 1354 } 1355 return x, span.elemsize 1356 } 1357 1358 func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { 1359 // Set mp.mallocing to keep from being preempted by GC. 1360 mp := acquirem() 1361 if doubleCheckMalloc { 1362 if mp.mallocing != 0 { 1363 throw("malloc deadlock") 1364 } 1365 if mp.gsignal == getg() { 1366 throw("malloc during signal") 1367 } 1368 if typ != nil && typ.Pointers() { 1369 throw("expected noscan type for noscan alloc") 1370 } 1371 } 1372 mp.mallocing = 1 1373 1374 checkGCTrigger := false 1375 c := getMCache(mp) 1376 var sizeclass uint8 1377 if size <= gc.SmallSizeMax-8 { 1378 sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 1379 } else { 1380 sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)] 1381 } 1382 size = uintptr(gc.SizeClassToSize[sizeclass]) 1383 spc := makeSpanClass(sizeclass, true) 1384 span := c.alloc[spc] 1385 1386 // First, check for a reusable object. 1387 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { 1388 // We have a reusable object, use it. 1389 x := mallocgcSmallNoscanReuse(c, span, spc, size, needzero) 1390 mp.mallocing = 0 1391 releasem(mp) 1392 return x, size 1393 } 1394 1395 v := nextFreeFast(span) 1396 if v == 0 { 1397 v, span, checkGCTrigger = c.nextFree(spc) 1398 } 1399 x := unsafe.Pointer(v) 1400 if needzero && span.needzero != 0 { 1401 memclrNoHeapPointers(x, size) 1402 } 1403 1404 // Ensure that the stores above that initialize x to 1405 // type-safe memory and set the heap bits occur before 1406 // the caller can make x observable to the garbage 1407 // collector. Otherwise, on weakly ordered machines, 1408 // the garbage collector could follow a pointer to x, 1409 // but see uninitialized memory or stale heap bits. 1410 publicationBarrier() 1411 1412 if writeBarrier.enabled { 1413 // Allocate black during GC. 1414 // All slots hold nil so no scanning is needed. 1415 // This may be racing with GC so do it atomically if there can be 1416 // a race marking the bit. 1417 gcmarknewobject(span, uintptr(x)) 1418 } else { 1419 // Track the last free index before the mark phase. This field 1420 // is only used by the garbage collector. During the mark phase 1421 // this is used by the conservative scanner to filter out objects 1422 // that are both free and recently-allocated. It's safe to do that 1423 // because we allocate-black if the GC is enabled. The conservative 1424 // scanner produces pointers out of thin air, so without additional 1425 // synchronization it might otherwise observe a partially-initialized 1426 // object, which could crash the program. 1427 span.freeIndexForScan = span.freeindex 1428 } 1429 1430 // Note cache c only valid while m acquired; see #47302 1431 // 1432 // N.B. Use the full size because that matches how the GC 1433 // will update the mem profile on the "free" side. 1434 // 1435 // TODO(mknyszek): We should really count the header as part 1436 // of gc_sys or something. The code below just pretends it is 1437 // internal fragmentation and matches the GC's accounting by 1438 // using the whole allocation slot. 1439 c.nextSample -= int64(size) 1440 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1441 profilealloc(mp, x, size) 1442 } 1443 mp.mallocing = 0 1444 releasem(mp) 1445 1446 if checkGCTrigger { 1447 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1448 gcStart(t) 1449 } 1450 } 1451 return x, size 1452 } 1453 1454 // mallocgcSmallNoscanReuse returns a previously freed noscan object after preparing it for reuse. 1455 // It must only be called if hasReusableNoscan returned true. 1456 func mallocgcSmallNoscanReuse(c *mcache, span *mspan, spc spanClass, size uintptr, needzero bool) unsafe.Pointer { 1457 // TODO(thepudds): could nextFreeFast, nextFree and nextReusable return unsafe.Pointer? 1458 // Maybe doesn't matter. gclinkptr might be for historical reasons. 1459 v, span := c.nextReusableNoScan(span, spc) 1460 x := unsafe.Pointer(v) 1461 1462 // Compensate for the GC assist credit deducted in mallocgc (before calling us and 1463 // after we return) because this is not a newly allocated object. We use the full slot 1464 // size (elemsize) here because that's what mallocgc deducts overall. Note we only 1465 // adjust this when gcBlackenEnabled is true, which follows mallocgc behavior. 1466 // TODO(thepudds): a follow-up CL adds a more specific test of our assist credit 1467 // handling, including for validating internal fragmentation handling. 1468 if gcBlackenEnabled != 0 { 1469 addAssistCredit(size) 1470 } 1471 1472 // This is a previously used object, so only check needzero (and not span.needzero) 1473 // for clearing. 1474 if needzero { 1475 memclrNoHeapPointers(x, size) 1476 } 1477 1478 // See publicationBarrier comment in mallocgcSmallNoscan. 1479 publicationBarrier() 1480 1481 // Finish and return. Note that we do not update span.freeIndexForScan, profiling info, 1482 // nor do we check gcTrigger. 1483 // TODO(thepudds): the current approach is viable for a GOEXPERIMENT, but 1484 // means we do not profile reused heap objects. Ultimately, we will need a better 1485 // approach for profiling, or at least ensure we are not introducing bias in the 1486 // profiled allocations. 1487 // TODO(thepudds): related, we probably want to adjust how allocs and frees are counted 1488 // in the existing stats. Currently, reused objects are not counted as allocs nor 1489 // frees, but instead roughly appear as if the original heap object lived on. We 1490 // probably will also want some additional runtime/metrics, and generally think about 1491 // user-facing observability & diagnostics, though all this likely can wait for an 1492 // official proposal. 1493 if writeBarrier.enabled { 1494 // Allocate black during GC. 1495 // All slots hold nil so no scanning is needed. 1496 // This may be racing with GC so do it atomically if there can be 1497 // a race marking the bit. 1498 gcmarknewobject(span, uintptr(x)) 1499 } 1500 return x 1501 } 1502 1503 func mallocgcSmallScanNoHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 1504 // Set mp.mallocing to keep from being preempted by GC. 1505 mp := acquirem() 1506 if doubleCheckMalloc { 1507 if mp.mallocing != 0 { 1508 throw("malloc deadlock") 1509 } 1510 if mp.gsignal == getg() { 1511 throw("malloc during signal") 1512 } 1513 if typ == nil || !typ.Pointers() { 1514 throw("noscan allocated in scan-only path") 1515 } 1516 if !heapBitsInSpan(size) { 1517 throw("heap bits in not in span for non-header-only path") 1518 } 1519 } 1520 mp.mallocing = 1 1521 1522 checkGCTrigger := false 1523 c := getMCache(mp) 1524 sizeclass := gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 1525 spc := makeSpanClass(sizeclass, false) 1526 span := c.alloc[spc] 1527 v := nextFreeFast(span) 1528 if v == 0 { 1529 v, span, checkGCTrigger = c.nextFree(spc) 1530 } 1531 x := unsafe.Pointer(v) 1532 if span.needzero != 0 { 1533 memclrNoHeapPointers(x, size) 1534 } 1535 if goarch.PtrSize == 8 && sizeclass == 1 { 1536 // initHeapBits already set the pointer bits for the 8-byte sizeclass 1537 // on 64-bit platforms. 1538 c.scanAlloc += 8 1539 } else { 1540 c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span) 1541 } 1542 size = uintptr(gc.SizeClassToSize[sizeclass]) 1543 1544 // Ensure that the stores above that initialize x to 1545 // type-safe memory and set the heap bits occur before 1546 // the caller can make x observable to the garbage 1547 // collector. Otherwise, on weakly ordered machines, 1548 // the garbage collector could follow a pointer to x, 1549 // but see uninitialized memory or stale heap bits. 1550 publicationBarrier() 1551 1552 if writeBarrier.enabled { 1553 // Allocate black during GC. 1554 // All slots hold nil so no scanning is needed. 1555 // This may be racing with GC so do it atomically if there can be 1556 // a race marking the bit. 1557 gcmarknewobject(span, uintptr(x)) 1558 } else { 1559 // Track the last free index before the mark phase. This field 1560 // is only used by the garbage collector. During the mark phase 1561 // this is used by the conservative scanner to filter out objects 1562 // that are both free and recently-allocated. It's safe to do that 1563 // because we allocate-black if the GC is enabled. The conservative 1564 // scanner produces pointers out of thin air, so without additional 1565 // synchronization it might otherwise observe a partially-initialized 1566 // object, which could crash the program. 1567 span.freeIndexForScan = span.freeindex 1568 } 1569 1570 // Note cache c only valid while m acquired; see #47302 1571 // 1572 // N.B. Use the full size because that matches how the GC 1573 // will update the mem profile on the "free" side. 1574 // 1575 // TODO(mknyszek): We should really count the header as part 1576 // of gc_sys or something. The code below just pretends it is 1577 // internal fragmentation and matches the GC's accounting by 1578 // using the whole allocation slot. 1579 c.nextSample -= int64(size) 1580 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1581 profilealloc(mp, x, size) 1582 } 1583 mp.mallocing = 0 1584 releasem(mp) 1585 1586 if checkGCTrigger { 1587 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1588 gcStart(t) 1589 } 1590 } 1591 return x, size 1592 } 1593 1594 func mallocgcSmallScanHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 1595 // Set mp.mallocing to keep from being preempted by GC. 1596 mp := acquirem() 1597 if doubleCheckMalloc { 1598 if mp.mallocing != 0 { 1599 throw("malloc deadlock") 1600 } 1601 if mp.gsignal == getg() { 1602 throw("malloc during signal") 1603 } 1604 if typ == nil || !typ.Pointers() { 1605 throw("noscan allocated in scan-only path") 1606 } 1607 if heapBitsInSpan(size) { 1608 throw("heap bits in span for header-only path") 1609 } 1610 } 1611 mp.mallocing = 1 1612 1613 checkGCTrigger := false 1614 c := getMCache(mp) 1615 size += gc.MallocHeaderSize 1616 var sizeclass uint8 1617 if size <= gc.SmallSizeMax-8 { 1618 sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 1619 } else { 1620 sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)] 1621 } 1622 size = uintptr(gc.SizeClassToSize[sizeclass]) 1623 spc := makeSpanClass(sizeclass, false) 1624 span := c.alloc[spc] 1625 v := nextFreeFast(span) 1626 if v == 0 { 1627 v, span, checkGCTrigger = c.nextFree(spc) 1628 } 1629 x := unsafe.Pointer(v) 1630 if span.needzero != 0 { 1631 memclrNoHeapPointers(x, size) 1632 } 1633 header := (**_type)(x) 1634 x = add(x, gc.MallocHeaderSize) 1635 c.scanAlloc += heapSetTypeSmallHeader(uintptr(x), size-gc.MallocHeaderSize, typ, header, span) 1636 1637 // Ensure that the stores above that initialize x to 1638 // type-safe memory and set the heap bits occur before 1639 // the caller can make x observable to the garbage 1640 // collector. Otherwise, on weakly ordered machines, 1641 // the garbage collector could follow a pointer to x, 1642 // but see uninitialized memory or stale heap bits. 1643 publicationBarrier() 1644 1645 if writeBarrier.enabled { 1646 // Allocate black during GC. 1647 // All slots hold nil so no scanning is needed. 1648 // This may be racing with GC so do it atomically if there can be 1649 // a race marking the bit. 1650 gcmarknewobject(span, uintptr(x)) 1651 } else { 1652 // Track the last free index before the mark phase. This field 1653 // is only used by the garbage collector. During the mark phase 1654 // this is used by the conservative scanner to filter out objects 1655 // that are both free and recently-allocated. It's safe to do that 1656 // because we allocate-black if the GC is enabled. The conservative 1657 // scanner produces pointers out of thin air, so without additional 1658 // synchronization it might otherwise observe a partially-initialized 1659 // object, which could crash the program. 1660 span.freeIndexForScan = span.freeindex 1661 } 1662 1663 // Note cache c only valid while m acquired; see #47302 1664 // 1665 // N.B. Use the full size because that matches how the GC 1666 // will update the mem profile on the "free" side. 1667 // 1668 // TODO(mknyszek): We should really count the header as part 1669 // of gc_sys or something. The code below just pretends it is 1670 // internal fragmentation and matches the GC's accounting by 1671 // using the whole allocation slot. 1672 c.nextSample -= int64(size) 1673 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1674 profilealloc(mp, x, size) 1675 } 1676 mp.mallocing = 0 1677 releasem(mp) 1678 1679 if checkGCTrigger { 1680 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1681 gcStart(t) 1682 } 1683 } 1684 return x, size 1685 } 1686 1687 func mallocgcLarge(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { 1688 // Set mp.mallocing to keep from being preempted by GC. 1689 mp := acquirem() 1690 if doubleCheckMalloc { 1691 if mp.mallocing != 0 { 1692 throw("malloc deadlock") 1693 } 1694 if mp.gsignal == getg() { 1695 throw("malloc during signal") 1696 } 1697 } 1698 mp.mallocing = 1 1699 1700 c := getMCache(mp) 1701 // For large allocations, keep track of zeroed state so that 1702 // bulk zeroing can be happen later in a preemptible context. 1703 span := c.allocLarge(size, typ == nil || !typ.Pointers()) 1704 span.freeindex = 1 1705 span.allocCount = 1 1706 span.largeType = nil // Tell the GC not to look at this yet. 1707 size = span.elemsize 1708 x := unsafe.Pointer(span.base()) 1709 1710 // Ensure that the store above that sets largeType to 1711 // nil happens before the caller can make x observable 1712 // to the garbage collector. 1713 // 1714 // Otherwise, on weakly ordered machines, the garbage 1715 // collector could follow a pointer to x, but see a stale 1716 // largeType value. 1717 publicationBarrier() 1718 1719 if writeBarrier.enabled { 1720 // Allocate black during GC. 1721 // All slots hold nil so no scanning is needed. 1722 // This may be racing with GC so do it atomically if there can be 1723 // a race marking the bit. 1724 gcmarknewobject(span, uintptr(x)) 1725 } else { 1726 // Track the last free index before the mark phase. This field 1727 // is only used by the garbage collector. During the mark phase 1728 // this is used by the conservative scanner to filter out objects 1729 // that are both free and recently-allocated. It's safe to do that 1730 // because we allocate-black if the GC is enabled. The conservative 1731 // scanner produces pointers out of thin air, so without additional 1732 // synchronization it might otherwise observe a partially-initialized 1733 // object, which could crash the program. 1734 span.freeIndexForScan = span.freeindex 1735 } 1736 1737 // Note cache c only valid while m acquired; see #47302 1738 // 1739 // N.B. Use the full size because that matches how the GC 1740 // will update the mem profile on the "free" side. 1741 // 1742 // TODO(mknyszek): We should really count the header as part 1743 // of gc_sys or something. The code below just pretends it is 1744 // internal fragmentation and matches the GC's accounting by 1745 // using the whole allocation slot. 1746 c.nextSample -= int64(size) 1747 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1748 profilealloc(mp, x, size) 1749 } 1750 mp.mallocing = 0 1751 releasem(mp) 1752 1753 // Check to see if we need to trigger the GC. 1754 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1755 gcStart(t) 1756 } 1757 1758 // Objects can be zeroed late in a context where preemption can occur. 1759 // 1760 // x will keep the memory alive. 1761 if needzero && span.needzero != 0 { 1762 // N.B. size == fullSize always in this case. 1763 memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302 1764 } 1765 1766 // Set the type and run the publication barrier while non-preemptible. We need to make 1767 // sure that between heapSetTypeLarge and publicationBarrier we cannot get preempted, 1768 // otherwise the GC could potentially observe non-zeroed memory but largeType set on weak 1769 // memory architectures. 1770 // 1771 // The GC can also potentially observe non-zeroed memory if conservative scanning spuriously 1772 // observes a partially-allocated object, see the freeIndexForScan update above. This case is 1773 // handled by synchronization inside heapSetTypeLarge. 1774 mp = acquirem() 1775 if typ != nil && typ.Pointers() { 1776 // Finish storing the type information, now that we're certain the memory is zeroed. 1777 getMCache(mp).scanAlloc += heapSetTypeLarge(uintptr(x), size, typ, span) 1778 } 1779 // Publish the object again, now with zeroed memory and initialized type information. 1780 // 1781 // Even if we didn't update any type information, this is necessary to ensure that, for example, 1782 // x written to a global without any synchronization still results in other goroutines observing 1783 // zeroed memory. 1784 publicationBarrier() 1785 releasem(mp) 1786 return x, size 1787 } 1788 1789 func preMallocgcDebug(size uintptr, typ *_type) unsafe.Pointer { 1790 if debug.sbrk != 0 { 1791 align := uintptr(16) 1792 if typ != nil { 1793 // TODO(austin): This should be just 1794 // align = uintptr(typ.align) 1795 // but that's only 4 on 32-bit platforms, 1796 // even if there's a uint64 field in typ (see #599). 1797 // This causes 64-bit atomic accesses to panic. 1798 // Hence, we use stricter alignment that matches 1799 // the normal allocator better. 1800 if size&7 == 0 { 1801 align = 8 1802 } else if size&3 == 0 { 1803 align = 4 1804 } else if size&1 == 0 { 1805 align = 2 1806 } else { 1807 align = 1 1808 } 1809 } 1810 return persistentalloc(size, align, &memstats.other_sys) 1811 } 1812 if inittrace.active && inittrace.id == getg().goid { 1813 // Init functions are executed sequentially in a single goroutine. 1814 inittrace.allocs += 1 1815 } 1816 return nil 1817 } 1818 1819 func postMallocgcDebug(x unsafe.Pointer, elemsize uintptr, typ *_type) { 1820 if inittrace.active && inittrace.id == getg().goid { 1821 // Init functions are executed sequentially in a single goroutine. 1822 inittrace.bytes += uint64(elemsize) 1823 } 1824 1825 if traceAllocFreeEnabled() { 1826 trace := traceAcquire() 1827 if trace.ok() { 1828 trace.HeapObjectAlloc(uintptr(x), typ) 1829 traceRelease(trace) 1830 } 1831 } 1832 1833 // N.B. elemsize == 0 indicates a tiny allocation, since no new slot was 1834 // allocated to fulfill this call to mallocgc. This means checkfinalizer 1835 // will only flag an error if there is actually any risk. If an allocation 1836 // has the tiny block to itself, it will not get flagged, because we won't 1837 // mark the block as a tiny block. 1838 if debug.checkfinalizers != 0 && elemsize == 0 { 1839 setTinyBlockContext(unsafe.Pointer(alignDown(uintptr(x), maxTinySize))) 1840 } 1841 } 1842 1843 // deductAssistCredit reduces the current G's assist credit 1844 // by size bytes, and assists the GC if necessary. 1845 // 1846 // Caller must be preemptible. 1847 func deductAssistCredit(size uintptr) { 1848 // Charge the current user G for this allocation. 1849 assistG := getg() 1850 if assistG.m.curg != nil { 1851 assistG = assistG.m.curg 1852 } 1853 // Charge the allocation against the G. We'll account 1854 // for internal fragmentation at the end of mallocgc. 1855 assistG.gcAssistBytes -= int64(size) 1856 1857 if assistG.gcAssistBytes < 0 { 1858 // This G is in debt. Assist the GC to correct 1859 // this before allocating. This must happen 1860 // before disabling preemption. 1861 gcAssistAlloc(assistG) 1862 } 1863 } 1864 1865 // addAssistCredit is like deductAssistCredit, 1866 // but adds credit rather than removes, 1867 // and never calls gcAssistAlloc. 1868 func addAssistCredit(size uintptr) { 1869 // Credit the current user G. 1870 assistG := getg() 1871 if assistG.m.curg != nil { // TODO(thepudds): do we need to do this? 1872 assistG = assistG.m.curg 1873 } 1874 // Credit the size against the G. 1875 assistG.gcAssistBytes += int64(size) 1876 } 1877 1878 const ( 1879 // doubleCheckReusable enables some additional invariant checks for the 1880 // runtime.freegc and reusable objects. Note that some of these checks alter timing, 1881 // and it is good to test changes with and without this enabled. 1882 doubleCheckReusable = false 1883 1884 // debugReusableLog enables some printlns for runtime.freegc and reusable objects. 1885 debugReusableLog = false 1886 ) 1887 1888 // freegc records that a heap object is reusable and available for 1889 // immediate reuse in a subsequent mallocgc allocation, without 1890 // needing to wait for the GC cycle to progress. 1891 // 1892 // The information is recorded in a free list stored in the 1893 // current P's mcache. The caller must pass in the user size 1894 // and whether the object has pointers, which allows a faster free 1895 // operation. 1896 // 1897 // freegc must be called by the effective owner of ptr who knows 1898 // the pointer is logically dead, with no possible aliases that might 1899 // be used past that moment. In other words, ptr must be the 1900 // last and only pointer to its referent. 1901 // 1902 // The intended caller is the compiler. 1903 // 1904 // Note: please do not send changes that attempt to add freegc calls 1905 // to the standard library. 1906 // 1907 // ptr must point to a heap object or into the current g's stack, 1908 // in which case freegc is a no-op. In particular, ptr must not point 1909 // to memory in the data or bss sections, which is partially enforced. 1910 // For objects with a malloc header, ptr should point mallocHeaderSize bytes 1911 // past the base; otherwise, ptr should point to the base of the heap object. 1912 // In other words, ptr should be the same pointer that was returned by mallocgc. 1913 // 1914 // In addition, the caller must know that ptr's object has no specials, such 1915 // as might have been created by a call to SetFinalizer or AddCleanup. 1916 // (Internally, the runtime deals appropriately with internally-created 1917 // specials, such as specials for memory profiling). 1918 // 1919 // If the size of ptr's object is less than 16 bytes or greater than 1920 // 32KiB - gc.MallocHeaderSize bytes, freegc is currently a no-op. It must only 1921 // be called in alloc-safe places. It currently throws if noscan is false 1922 // (support for which is implemented in a later CL in our stack). 1923 // 1924 // Note that freegc accepts an unsafe.Pointer and hence keeps the pointer 1925 // alive. It therefore could be a pessimization in some cases (such 1926 // as a long-lived function) if the caller does not call freegc before 1927 // or roughly when the liveness analysis of the compiler 1928 // would otherwise have determined ptr's object is reclaimable by the GC. 1929 func freegc(ptr unsafe.Pointer, size uintptr, noscan bool) bool { 1930 if !runtimeFreegcEnabled || !reusableSize(size) { 1931 return false 1932 } 1933 if sizeSpecializedMallocEnabled && !noscan { 1934 // TODO(thepudds): temporarily disable freegc with SizeSpecializedMalloc for pointer types 1935 // until we finish integrating. 1936 return false 1937 } 1938 1939 if ptr == nil { 1940 throw("freegc nil") 1941 } 1942 1943 // Set mp.mallocing to keep from being preempted by GC. 1944 // Otherwise, the GC could flush our mcache or otherwise cause problems. 1945 mp := acquirem() 1946 if mp.mallocing != 0 { 1947 throw("freegc deadlock") 1948 } 1949 if mp.gsignal == getg() { 1950 throw("freegc during signal") 1951 } 1952 mp.mallocing = 1 1953 1954 if mp.curg.stack.lo <= uintptr(ptr) && uintptr(ptr) < mp.curg.stack.hi { 1955 // This points into our stack, so free is a no-op. 1956 mp.mallocing = 0 1957 releasem(mp) 1958 return false 1959 } 1960 1961 if doubleCheckReusable { 1962 // TODO(thepudds): we could enforce no free on globals in bss or data. Maybe by 1963 // checking span via spanOf or spanOfHeap, or maybe walk from firstmoduledata 1964 // like isGoPointerWithoutSpan, or activeModules, or something. If so, we might 1965 // be able to delay checking until reuse (e.g., check span just before reusing, 1966 // though currently we don't always need to lookup a span on reuse). If we think 1967 // no usage patterns could result in globals, maybe enforcement for globals could 1968 // be behind -d=checkptr=1 or similar. The compiler can have knowledge of where 1969 // a variable is allocated, but stdlib does not, although there are certain 1970 // usage patterns that cannot result in a global. 1971 // TODO(thepudds): separately, consider a local debugReusableMcacheOnly here 1972 // to ignore freed objects if not in mspan in mcache, maybe when freeing and reading, 1973 // by checking something like s.base() <= uintptr(v) && uintptr(v) < s.limit. Or 1974 // maybe a GODEBUG or compiler debug flag. 1975 span := spanOf(uintptr(ptr)) 1976 if span == nil { 1977 throw("nextReusable: nil span for pointer in free list") 1978 } 1979 if state := span.state.get(); state != mSpanInUse { 1980 throw("nextReusable: span is not in use") 1981 } 1982 } 1983 1984 if debug.clobberfree != 0 { 1985 clobberfree(ptr, size) 1986 } 1987 1988 // We first check if p is still in our per-P cache. 1989 // Get our per-P cache for small objects. 1990 c := getMCache(mp) 1991 if c == nil { 1992 throw("freegc called without a P or outside bootstrapping") 1993 } 1994 1995 v := uintptr(ptr) 1996 if !noscan && !heapBitsInSpan(size) { 1997 // mallocgcSmallScanHeader expects to get the base address of the object back 1998 // from the findReusable funcs (as well as from nextFreeFast and nextFree), and 1999 // not mallocHeaderSize bytes into a object, so adjust that here. 2000 v -= mallocHeaderSize 2001 2002 // The size class lookup wants size to be adjusted by mallocHeaderSize. 2003 size += mallocHeaderSize 2004 } 2005 2006 // TODO(thepudds): should verify (behind doubleCheckReusable constant) that our calculated 2007 // sizeclass here matches what's in span found via spanOf(ptr) or findObject(ptr). 2008 var sizeclass uint8 2009 if size <= gc.SmallSizeMax-8 { 2010 sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 2011 } else { 2012 sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)] 2013 } 2014 2015 spc := makeSpanClass(sizeclass, noscan) 2016 s := c.alloc[spc] 2017 2018 if debugReusableLog { 2019 if s.base() <= uintptr(v) && uintptr(v) < s.limit { 2020 println("freegc [in mcache]:", hex(uintptr(v)), "sweepgen:", mheap_.sweepgen, "writeBarrier.enabled:", writeBarrier.enabled) 2021 } else { 2022 println("freegc [NOT in mcache]:", hex(uintptr(v)), "sweepgen:", mheap_.sweepgen, "writeBarrier.enabled:", writeBarrier.enabled) 2023 } 2024 } 2025 2026 if noscan { 2027 c.addReusableNoscan(spc, uintptr(v)) 2028 } else { 2029 // TODO(thepudds): implemented in later CL in our stack. 2030 throw("freegc called for object with pointers, not yet implemented") 2031 } 2032 2033 // For stats, for now we leave allocCount alone, roughly pretending to the rest 2034 // of the system that this potential reuse never happened. 2035 2036 mp.mallocing = 0 2037 releasem(mp) 2038 2039 return true 2040 } 2041 2042 // nextReusableNoScan returns the next reusable object for a noscan span, 2043 // or 0 if no reusable object is found. 2044 func (c *mcache) nextReusableNoScan(s *mspan, spc spanClass) (gclinkptr, *mspan) { 2045 if !runtimeFreegcEnabled { 2046 return 0, s 2047 } 2048 2049 // Pop a reusable pointer from the free list for this span class. 2050 v := c.reusableNoscan[spc] 2051 if v == 0 { 2052 return 0, s 2053 } 2054 c.reusableNoscan[spc] = v.ptr().next 2055 2056 if debugReusableLog { 2057 println("reusing from ptr free list:", hex(v), "sweepgen:", mheap_.sweepgen, "writeBarrier.enabled:", writeBarrier.enabled) 2058 } 2059 if doubleCheckReusable { 2060 doubleCheckNextReusable(v) // debug only sanity check 2061 } 2062 2063 // For noscan spans, we only need the span if the write barrier is enabled (so that our caller 2064 // can call gcmarknewobject to allocate black). If the write barrier is enabled, we can skip 2065 // looking up the span when the pointer is in a span in the mcache. 2066 if !writeBarrier.enabled { 2067 return v, nil 2068 } 2069 if s.base() <= uintptr(v) && uintptr(v) < s.limit { 2070 // Return the original span. 2071 return v, s 2072 } 2073 2074 // We must find and return the span. 2075 span := spanOf(uintptr(v)) 2076 if span == nil { 2077 // TODO(thepudds): construct a test that triggers this throw. 2078 throw("nextReusableNoScan: nil span for pointer in reusable object free list") 2079 } 2080 2081 return v, span 2082 } 2083 2084 // doubleCheckNextReusable checks some invariants. 2085 // TODO(thepudds): will probably delete some of this. Can mostly be ignored for review. 2086 func doubleCheckNextReusable(v gclinkptr) { 2087 // TODO(thepudds): should probably take the spanClass as well to confirm expected 2088 // sizeclass match. 2089 _, span, objIndex := findObject(uintptr(v), 0, 0) 2090 if span == nil { 2091 throw("nextReusable: nil span for pointer in free list") 2092 } 2093 if state := span.state.get(); state != mSpanInUse { 2094 throw("nextReusable: span is not in use") 2095 } 2096 if uintptr(v) < span.base() || uintptr(v) >= span.limit { 2097 throw("nextReusable: span is not in range") 2098 } 2099 if span.objBase(uintptr(v)) != uintptr(v) { 2100 print("nextReusable: v=", hex(v), " base=", hex(span.objBase(uintptr(v))), "\n") 2101 throw("nextReusable: v is non-base-address for object found on pointer free list") 2102 } 2103 if span.isFree(objIndex) { 2104 throw("nextReusable: pointer on free list is free") 2105 } 2106 2107 const debugReusableEnsureSwept = false 2108 if debugReusableEnsureSwept { 2109 // Currently disabled. 2110 // Note: ensureSwept here alters behavior (not just an invariant check). 2111 span.ensureSwept() 2112 if span.isFree(objIndex) { 2113 throw("nextReusable: pointer on free list is free after ensureSwept") 2114 } 2115 } 2116 } 2117 2118 // reusableSize reports if size is a currently supported size for a reusable object. 2119 func reusableSize(size uintptr) bool { 2120 if size < maxTinySize || size > maxSmallSize-mallocHeaderSize { 2121 return false 2122 } 2123 return true 2124 } 2125 2126 // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers 2127 // on chunks of the buffer to be zeroed, with opportunities for preemption 2128 // along the way. memclrNoHeapPointers contains no safepoints and also 2129 // cannot be preemptively scheduled, so this provides a still-efficient 2130 // block copy that can also be preempted on a reasonable granularity. 2131 // 2132 // Use this with care; if the data being cleared is tagged to contain 2133 // pointers, this allows the GC to run before it is all cleared. 2134 func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) { 2135 v := uintptr(x) 2136 // got this from benchmarking. 128k is too small, 512k is too large. 2137 const chunkBytes = 256 * 1024 2138 vsize := v + size 2139 for voff := v; voff < vsize; voff = voff + chunkBytes { 2140 if getg().preempt { 2141 // may hold locks, e.g., profiling 2142 goschedguarded() 2143 } 2144 // clear min(avail, lump) bytes 2145 n := vsize - voff 2146 if n > chunkBytes { 2147 n = chunkBytes 2148 } 2149 memclrNoHeapPointers(unsafe.Pointer(voff), n) 2150 } 2151 } 2152 2153 // implementation of new builtin 2154 // compiler (both frontend and SSA backend) knows the signature 2155 // of this function. 2156 func newobject(typ *_type) unsafe.Pointer { 2157 return mallocgc(typ.Size_, typ, true) 2158 } 2159 2160 //go:linkname maps_newobject internal/runtime/maps.newobject 2161 func maps_newobject(typ *_type) unsafe.Pointer { 2162 return newobject(typ) 2163 } 2164 2165 // reflect_unsafe_New is meant for package reflect, 2166 // but widely used packages access it using linkname. 2167 // Notable members of the hall of shame include: 2168 // - gitee.com/quant1x/gox 2169 // - github.com/goccy/json 2170 // - github.com/modern-go/reflect2 2171 // - github.com/v2pro/plz 2172 // 2173 // Do not remove or change the type signature. 2174 // See go.dev/issue/67401. 2175 // 2176 //go:linkname reflect_unsafe_New reflect.unsafe_New 2177 func reflect_unsafe_New(typ *_type) unsafe.Pointer { 2178 return mallocgc(typ.Size_, typ, true) 2179 } 2180 2181 //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New 2182 func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { 2183 return mallocgc(typ.Size_, typ, true) 2184 } 2185 2186 // newarray allocates an array of n elements of type typ. 2187 // 2188 // newarray should be an internal detail, 2189 // but widely used packages access it using linkname. 2190 // Notable members of the hall of shame include: 2191 // - github.com/RomiChan/protobuf 2192 // - github.com/segmentio/encoding 2193 // - github.com/ugorji/go/codec 2194 // 2195 // Do not remove or change the type signature. 2196 // See go.dev/issue/67401. 2197 // 2198 //go:linkname newarray 2199 func newarray(typ *_type, n int) unsafe.Pointer { 2200 if n == 1 { 2201 return mallocgc(typ.Size_, typ, true) 2202 } 2203 mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) 2204 if overflow || mem > maxAlloc || n < 0 { 2205 panic(plainError("runtime: allocation size out of range")) 2206 } 2207 return mallocgc(mem, typ, true) 2208 } 2209 2210 // reflect_unsafe_NewArray is meant for package reflect, 2211 // but widely used packages access it using linkname. 2212 // Notable members of the hall of shame include: 2213 // - gitee.com/quant1x/gox 2214 // - github.com/bytedance/sonic 2215 // - github.com/goccy/json 2216 // - github.com/modern-go/reflect2 2217 // - github.com/segmentio/encoding 2218 // - github.com/segmentio/kafka-go 2219 // - github.com/v2pro/plz 2220 // 2221 // Do not remove or change the type signature. 2222 // See go.dev/issue/67401. 2223 // 2224 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 2225 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 2226 return newarray(typ, n) 2227 } 2228 2229 //go:linkname maps_newarray internal/runtime/maps.newarray 2230 func maps_newarray(typ *_type, n int) unsafe.Pointer { 2231 return newarray(typ, n) 2232 } 2233 2234 // profilealloc resets the current mcache's nextSample counter and 2235 // records a memory profile sample. 2236 // 2237 // The caller must be non-preemptible and have a P. 2238 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 2239 c := getMCache(mp) 2240 if c == nil { 2241 throw("profilealloc called without a P or outside bootstrapping") 2242 } 2243 c.memProfRate = MemProfileRate 2244 c.nextSample = nextSample() 2245 mProf_Malloc(mp, x, size) 2246 } 2247 2248 // nextSample returns the next sampling point for heap profiling. The goal is 2249 // to sample allocations on average every MemProfileRate bytes, but with a 2250 // completely random distribution over the allocation timeline; this 2251 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson 2252 // processes, the distance between two samples follows the exponential 2253 // distribution (exp(MemProfileRate)), so the best return value is a random 2254 // number taken from an exponential distribution whose mean is MemProfileRate. 2255 func nextSample() int64 { 2256 if MemProfileRate == 0 { 2257 // Basically never sample. 2258 return math.MaxInt64 2259 } 2260 if MemProfileRate == 1 { 2261 // Sample immediately. 2262 return 0 2263 } 2264 return int64(fastexprand(MemProfileRate)) 2265 } 2266 2267 // fastexprand returns a random number from an exponential distribution with 2268 // the specified mean. 2269 func fastexprand(mean int) int32 { 2270 // Avoid overflow. Maximum possible step is 2271 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean. 2272 switch { 2273 case mean > 0x7000000: 2274 mean = 0x7000000 2275 case mean == 0: 2276 return 0 2277 } 2278 2279 // Take a random sample of the exponential distribution exp(-mean*x). 2280 // The probability distribution function is mean*exp(-mean*x), so the CDF is 2281 // p = 1 - exp(-mean*x), so 2282 // q = 1 - p == exp(-mean*x) 2283 // log_e(q) = -mean*x 2284 // -log_e(q)/mean = x 2285 // x = -log_e(q) * mean 2286 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency 2287 const randomBitCount = 26 2288 q := cheaprandn(1<<randomBitCount) + 1 2289 qlog := fastlog2(float64(q)) - randomBitCount 2290 if qlog > 0 { 2291 qlog = 0 2292 } 2293 const minusLog2 = -0.6931471805599453 // -ln(2) 2294 return int32(qlog*(minusLog2*float64(mean))) + 1 2295 } 2296 2297 type persistentAlloc struct { 2298 base *notInHeap 2299 off uintptr 2300 } 2301 2302 var globalAlloc struct { 2303 mutex 2304 persistentAlloc 2305 } 2306 2307 // persistentChunkSize is the number of bytes we allocate when we grow 2308 // a persistentAlloc. 2309 const persistentChunkSize = 256 << 10 2310 2311 // persistentChunks is a list of all the persistent chunks we have 2312 // allocated. The list is maintained through the first word in the 2313 // persistent chunk. This is updated atomically. 2314 var persistentChunks *notInHeap 2315 2316 // Wrapper around sysAlloc that can allocate small chunks. 2317 // There is no associated free operation. 2318 // Intended for things like function/type/debug-related persistent data. 2319 // If align is 0, uses default align (currently 8). 2320 // The returned memory will be zeroed. 2321 // sysStat must be non-nil. 2322 // 2323 // Consider marking persistentalloc'd types not in heap by embedding 2324 // internal/runtime/sys.NotInHeap. 2325 // 2326 // nosplit because it is used during write barriers and must not be preempted. 2327 // 2328 //go:nosplit 2329 func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 2330 var p *notInHeap 2331 systemstack(func() { 2332 p = persistentalloc1(size, align, sysStat) 2333 }) 2334 return unsafe.Pointer(p) 2335 } 2336 2337 // Must run on system stack because stack growth can (re)invoke it. 2338 // See issue 9174. 2339 // 2340 //go:systemstack 2341 func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { 2342 const ( 2343 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 2344 ) 2345 2346 if size == 0 { 2347 throw("persistentalloc: size == 0") 2348 } 2349 if align != 0 { 2350 if align&(align-1) != 0 { 2351 throw("persistentalloc: align is not a power of 2") 2352 } 2353 if align > pageSize { 2354 throw("persistentalloc: align is too large") 2355 } 2356 } else { 2357 align = 8 2358 } 2359 2360 if size >= maxBlock { 2361 return (*notInHeap)(sysAlloc(size, sysStat, "immortal metadata")) 2362 } 2363 2364 mp := acquirem() 2365 var persistent *persistentAlloc 2366 if mp != nil && mp.p != 0 { 2367 persistent = &mp.p.ptr().palloc 2368 } else { 2369 lock(&globalAlloc.mutex) 2370 persistent = &globalAlloc.persistentAlloc 2371 } 2372 persistent.off = alignUp(persistent.off, align) 2373 if persistent.off+size > persistentChunkSize || persistent.base == nil { 2374 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys, "immortal metadata")) 2375 if persistent.base == nil { 2376 if persistent == &globalAlloc.persistentAlloc { 2377 unlock(&globalAlloc.mutex) 2378 } 2379 throw("runtime: cannot allocate memory") 2380 } 2381 2382 // Add the new chunk to the persistentChunks list. 2383 for { 2384 chunks := uintptr(unsafe.Pointer(persistentChunks)) 2385 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks 2386 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { 2387 break 2388 } 2389 } 2390 persistent.off = alignUp(goarch.PtrSize, align) 2391 } 2392 p := persistent.base.add(persistent.off) 2393 persistent.off += size 2394 releasem(mp) 2395 if persistent == &globalAlloc.persistentAlloc { 2396 unlock(&globalAlloc.mutex) 2397 } 2398 2399 if sysStat != &memstats.other_sys { 2400 sysStat.add(int64(size)) 2401 memstats.other_sys.add(-int64(size)) 2402 } 2403 return p 2404 } 2405 2406 // inPersistentAlloc reports whether p points to memory allocated by 2407 // persistentalloc. This must be nosplit because it is called by the 2408 // cgo checker code, which is called by the write barrier code. 2409 // 2410 //go:nosplit 2411 func inPersistentAlloc(p uintptr) bool { 2412 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) 2413 for chunk != 0 { 2414 if p >= chunk && p < chunk+persistentChunkSize { 2415 return true 2416 } 2417 chunk = *(*uintptr)(unsafe.Pointer(chunk)) 2418 } 2419 return false 2420 } 2421 2422 // linearAlloc is a simple linear allocator that pre-reserves a region 2423 // of memory and then optionally maps that region into the Ready state 2424 // as needed. 2425 // 2426 // The caller is responsible for locking. 2427 type linearAlloc struct { 2428 next uintptr // next free byte 2429 mapped uintptr // one byte past end of mapped space 2430 end uintptr // end of reserved space 2431 2432 mapMemory bool // transition memory from Reserved to Ready if true 2433 } 2434 2435 func (l *linearAlloc) init(base, size uintptr, mapMemory bool) { 2436 if base+size < base { 2437 // Chop off the last byte. The runtime isn't prepared 2438 // to deal with situations where the bounds could overflow. 2439 // Leave that memory reserved, though, so we don't map it 2440 // later. 2441 size -= 1 2442 } 2443 l.next, l.mapped = base, base 2444 l.end = base + size 2445 l.mapMemory = mapMemory 2446 } 2447 2448 func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat, vmaName string) unsafe.Pointer { 2449 p := alignUp(l.next, align) 2450 if p+size > l.end { 2451 return nil 2452 } 2453 l.next = p + size 2454 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { 2455 if l.mapMemory { 2456 // Transition from Reserved to Prepared to Ready. 2457 n := pEnd - l.mapped 2458 sysMap(unsafe.Pointer(l.mapped), n, sysStat, vmaName) 2459 sysUsed(unsafe.Pointer(l.mapped), n, n) 2460 } 2461 l.mapped = pEnd 2462 } 2463 return unsafe.Pointer(p) 2464 } 2465 2466 // notInHeap is off-heap memory allocated by a lower-level allocator 2467 // like sysAlloc or persistentAlloc. 2468 // 2469 // In general, it's better to use real types which embed 2470 // internal/runtime/sys.NotInHeap, but this serves as a generic type 2471 // for situations where that isn't possible (like in the allocators). 2472 // 2473 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? 2474 type notInHeap struct{ _ sys.NotInHeap } 2475 2476 func (p *notInHeap) add(bytes uintptr) *notInHeap { 2477 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) 2478 } 2479 2480 // redZoneSize computes the size of the redzone for a given allocation. 2481 // Refer to the implementation of the compiler-rt. 2482 func redZoneSize(userSize uintptr) uintptr { 2483 switch { 2484 case userSize <= (64 - 16): 2485 return 16 << 0 2486 case userSize <= (128 - 32): 2487 return 16 << 1 2488 case userSize <= (512 - 64): 2489 return 16 << 2 2490 case userSize <= (4096 - 128): 2491 return 16 << 3 2492 case userSize <= (1<<14)-256: 2493 return 16 << 4 2494 case userSize <= (1<<15)-512: 2495 return 16 << 5 2496 case userSize <= (1<<16)-1024: 2497 return 16 << 6 2498 default: 2499 return 16 << 7 2500 } 2501 } 2502