Source file src/runtime/preempt_xreg.go

     1  // Copyright 2025 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build amd64 || arm64 || loong64
     6  
     7  // This provides common support for architectures that use extended register
     8  // state in asynchronous preemption.
     9  //
    10  // While asynchronous preemption stores general-purpose (GP) registers on the
    11  // preempted goroutine's own stack, extended register state can be used to save
    12  // non-GP state off the stack. In particular, this is meant for large vector
    13  // register files. This memory is conservatively scanned to enable using
    14  // non-GP registers for operations that may involve pointers.
    15  //
    16  // For an architecture to support extended register state, it must provide a Go
    17  // definition of an xRegState type for storing the state, and its asyncPreempt
    18  // implementation must write this register state to p.xRegs.scratch.
    19  
    20  package runtime
    21  
    22  import (
    23  	"internal/abi"
    24  	"internal/runtime/sys"
    25  	"unsafe"
    26  )
    27  
    28  // xRegState is long-lived extended register state. It is allocated off-heap and
    29  // manually managed.
    30  type xRegState struct {
    31  	_    sys.NotInHeap // Allocated from xRegAlloc
    32  	regs xRegs
    33  }
    34  
    35  // xRegPerG stores extended register state while a goroutine is asynchronously
    36  // preempted. This is nil otherwise, so we can reuse a (likely small) pool of
    37  // xRegState objects.
    38  type xRegPerG struct {
    39  	state *xRegState
    40  }
    41  
    42  type xRegPerP struct {
    43  	// scratch temporary per-P space where [asyncPreempt] saves the register
    44  	// state before entering Go. It's quickly copied to per-G state.
    45  	scratch xRegs
    46  
    47  	// cache is a 1-element allocation cache of extended register state used by
    48  	// asynchronous preemption. On entry to preemption, this is used as a simple
    49  	// allocation cache. On exit from preemption, the G's xRegState is always
    50  	// stored here where it can be restored, and later either freed or reused
    51  	// for another preemption. On exit, this serves the dual purpose of
    52  	// delay-freeing the allocated xRegState until after we've definitely
    53  	// restored it.
    54  	cache *xRegState
    55  }
    56  
    57  // xRegAlloc allocates xRegState objects.
    58  var xRegAlloc struct {
    59  	lock  mutex
    60  	alloc fixalloc
    61  }
    62  
    63  func xRegInitAlloc() {
    64  	lockInit(&xRegAlloc.lock, lockRankXRegAlloc)
    65  	xRegAlloc.alloc.init(unsafe.Sizeof(xRegState{}), nil, nil, &memstats.other_sys)
    66  }
    67  
    68  // xRegSave saves the extended register state on this P to gp.
    69  //
    70  // This must run on the system stack because it assumes the P won't change.
    71  //
    72  //go:systemstack
    73  func xRegSave(gp *g) {
    74  	if gp.xRegs.state != nil {
    75  		// Double preempt?
    76  		throw("gp.xRegState.p != nil on async preempt")
    77  	}
    78  
    79  	// Get the place to save the register state.
    80  	var dest *xRegState
    81  	pp := gp.m.p.ptr()
    82  	if pp.xRegs.cache != nil {
    83  		// Use the cached allocation.
    84  		dest = pp.xRegs.cache
    85  		pp.xRegs.cache = nil
    86  	} else {
    87  		// Allocate a new save block.
    88  		lock(&xRegAlloc.lock)
    89  		dest = (*xRegState)(xRegAlloc.alloc.alloc())
    90  		unlock(&xRegAlloc.lock)
    91  	}
    92  
    93  	// Copy state saved in the scratchpad to dest.
    94  	//
    95  	// If we ever need to save less state (e.g., avoid saving vector registers
    96  	// that aren't in use), we could have multiple allocation pools for
    97  	// different size states and copy only the registers we need.
    98  	dest.regs = pp.xRegs.scratch
    99  
   100  	// Save on the G.
   101  	gp.xRegs.state = dest
   102  }
   103  
   104  // xRegRestore prepares the extended register state on gp to be restored.
   105  //
   106  // It moves the state to gp.m.p.xRegs.cache where [asyncPreempt] expects to find
   107  // it. This means nothing else may use the cache between this call and the
   108  // return to asyncPreempt. This is not quite symmetric with [xRegSave], which
   109  // uses gp.m.p.xRegs.scratch. By using cache instead, we save a block copy.
   110  //
   111  // This is called with asyncPreempt on the stack and thus must not grow the
   112  // stack.
   113  //
   114  //go:nosplit
   115  func xRegRestore(gp *g) {
   116  	if gp.xRegs.state == nil {
   117  		throw("gp.xRegState.p == nil on return from async preempt")
   118  	}
   119  	// If the P has a block cached on it, free that so we can replace it.
   120  	pp := gp.m.p.ptr()
   121  	if pp.xRegs.cache != nil {
   122  		// Don't grow the G stack.
   123  		systemstack(func() {
   124  			pp.xRegs.free()
   125  		})
   126  	}
   127  	pp.xRegs.cache = gp.xRegs.state
   128  	gp.xRegs.state = nil
   129  }
   130  
   131  func (xRegs *xRegPerP) free() {
   132  	if xRegs.cache != nil {
   133  		lock(&xRegAlloc.lock)
   134  		xRegAlloc.alloc.free(unsafe.Pointer(xRegs.cache))
   135  		xRegs.cache = nil
   136  		unlock(&xRegAlloc.lock)
   137  	}
   138  }
   139  
   140  // xRegScan conservatively scans the extended register state.
   141  //
   142  // This is supposed to be called only by scanstack when it handles async preemption.
   143  func xRegScan(gp *g, gcw *gcWork, state *stackScanState) {
   144  	// Regular async preemption always provides the extended register state.
   145  	if gp.xRegs.state == nil {
   146  		var u unwinder
   147  		for u.init(gp, 0); u.valid(); u.next() {
   148  			if u.frame.fn.valid() && u.frame.fn.funcID == abi.FuncID_debugCallV2 {
   149  				return
   150  			}
   151  		}
   152  		println("runtime: gp=", gp, ", goid=", gp.goid)
   153  		throw("gp.xRegs.state == nil on a scanstack attempt during async preemption")
   154  	}
   155  	b := uintptr(unsafe.Pointer(&gp.xRegs.state.regs))
   156  	n := uintptr(unsafe.Sizeof(gp.xRegs.state.regs))
   157  	if debugScanConservative {
   158  		print("begin scan xRegs of goroutine ", gp.goid, " at [", hex(b), ",", hex(b+n), ")\n")
   159  	}
   160  	scanConservative(b, n, nil, gcw, state)
   161  	if debugScanConservative {
   162  		print("end scan xRegs of goroutine ", gp.goid, "\n")
   163  	}
   164  }
   165  

View as plain text