Source file
src/runtime/syscall_windows.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/goarch"
10 "unsafe"
11 )
12
13
14 var cbs struct {
15 lock mutex
16 ctxt [cb_max]winCallback
17 index map[winCallbackKey]int
18 n int
19 }
20
21 func cbsLock() {
22 lock(&cbs.lock)
23
24
25
26
27 if raceenabled && mainStarted {
28 raceacquire(unsafe.Pointer(&cbs.lock))
29 }
30 }
31
32 func cbsUnlock() {
33 if raceenabled && mainStarted {
34 racerelease(unsafe.Pointer(&cbs.lock))
35 }
36 unlock(&cbs.lock)
37 }
38
39
40 type winCallback struct {
41 fn *funcval
42 retPop uintptr
43 abiMap abiDesc
44 }
45
46
47 type abiPartKind int
48
49 const (
50 abiPartBad abiPartKind = iota
51 abiPartStack
52 abiPartReg
53 )
54
55
56 type abiPart struct {
57 kind abiPartKind
58 srcStackOffset uintptr
59 dstStackOffset uintptr
60 dstRegister int
61 len uintptr
62 }
63
64 func (a *abiPart) tryMerge(b abiPart) bool {
65 if a.kind != abiPartStack || b.kind != abiPartStack {
66 return false
67 }
68 if a.srcStackOffset+a.len == b.srcStackOffset && a.dstStackOffset+a.len == b.dstStackOffset {
69 a.len += b.len
70 return true
71 }
72 return false
73 }
74
75
76
77
78
79
80 type abiDesc struct {
81 parts []abiPart
82
83 srcStackSize uintptr
84 dstStackSize uintptr
85 dstSpill uintptr
86 dstRegisters int
87
88
89
90 retOffset uintptr
91 }
92
93 func (p *abiDesc) assignArg(t *_type) {
94 if t.Size_ > goarch.PtrSize {
95
96
97
98
99
100
101 panic("compileCallback: argument size is larger than uintptr")
102 }
103 if k := t.Kind(); GOARCH != "386" && (k == abi.Float32 || k == abi.Float64) {
104
105
106
107
108
109 panic("compileCallback: float arguments not supported")
110 }
111
112 if t.Size_ == 0 {
113
114 p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.Align_))
115 return
116 }
117
118
119
120
121
122
123
124
125
126 oldParts := p.parts
127 if p.tryRegAssignArg(t, 0) {
128
129
130
131
132 p.dstSpill = alignUp(p.dstSpill, uintptr(t.Align_))
133 p.dstSpill += t.Size_
134 } else {
135
136
137 p.parts = oldParts
138
139
140 p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.Align_))
141
142
143
144
145
146 part := abiPart{
147 kind: abiPartStack,
148 srcStackOffset: p.srcStackSize,
149 dstStackOffset: p.dstStackSize,
150 len: t.Size_,
151 }
152
153 if len(p.parts) == 0 || !p.parts[len(p.parts)-1].tryMerge(part) {
154 p.parts = append(p.parts, part)
155 }
156
157 p.dstStackSize += t.Size_
158 }
159
160
161
162 p.srcStackSize += goarch.PtrSize
163 }
164
165
166
167
168
169
170
171 func (p *abiDesc) tryRegAssignArg(t *_type, offset uintptr) bool {
172 switch k := t.Kind(); k {
173 case abi.Bool, abi.Int, abi.Int8, abi.Int16, abi.Int32, abi.Uint, abi.Uint8, abi.Uint16, abi.Uint32, abi.Uintptr, abi.Pointer, abi.UnsafePointer:
174
175 return p.assignReg(t.Size_, offset)
176 case abi.Int64, abi.Uint64:
177
178 if goarch.PtrSize == 8 {
179 return p.assignReg(t.Size_, offset)
180 }
181 case abi.Array:
182 at := (*arraytype)(unsafe.Pointer(t))
183 if at.Len == 1 {
184 return p.tryRegAssignArg(at.Elem, offset)
185 }
186 case abi.Struct:
187 st := (*structtype)(unsafe.Pointer(t))
188 for i := range st.Fields {
189 f := &st.Fields[i]
190 if !p.tryRegAssignArg(f.Typ, offset+f.Offset) {
191 return false
192 }
193 }
194 return true
195 }
196
197
198 panic("compileCallback: type " + toRType(t).string() + " is currently not supported for use in system callbacks")
199 }
200
201
202
203
204
205
206 func (p *abiDesc) assignReg(size, offset uintptr) bool {
207 if p.dstRegisters >= intArgRegs {
208 return false
209 }
210 p.parts = append(p.parts, abiPart{
211 kind: abiPartReg,
212 srcStackOffset: p.srcStackSize + offset,
213 dstRegister: p.dstRegisters,
214 len: size,
215 })
216 p.dstRegisters++
217 return true
218 }
219
220 type winCallbackKey struct {
221 fn *funcval
222 cdecl bool
223 }
224
225 func callbackasm()
226
227
228
229
230
231
232
233
234
235
236 func callbackasmAddr(i int) uintptr {
237 var entrySize int
238 switch GOARCH {
239 default:
240 panic("unsupported architecture")
241 case "386", "amd64":
242 entrySize = 5
243 case "arm64":
244
245
246 entrySize = 8
247 }
248 return abi.FuncPCABI0(callbackasm) + uintptr(i*entrySize)
249 }
250
251 const callbackMaxFrame = 64 * goarch.PtrSize
252
253
254
255
256
257
258
259
260
261 func compileCallback(fn eface, cdecl bool) (code uintptr) {
262 if GOARCH != "386" {
263
264 cdecl = false
265 }
266
267 if fn._type == nil || fn._type.Kind() != abi.Func {
268 panic("compileCallback: expected function with one uintptr-sized result")
269 }
270 ft := (*functype)(unsafe.Pointer(fn._type))
271
272
273 var abiMap abiDesc
274 for _, t := range ft.InSlice() {
275 abiMap.assignArg(t)
276 }
277
278
279 abiMap.dstStackSize = alignUp(abiMap.dstStackSize, goarch.PtrSize)
280 abiMap.retOffset = abiMap.dstStackSize
281
282 if len(ft.OutSlice()) != 1 {
283 panic("compileCallback: expected function with one uintptr-sized result")
284 }
285 if ft.OutSlice()[0].Size_ != goarch.PtrSize {
286 panic("compileCallback: expected function with one uintptr-sized result")
287 }
288 if k := ft.OutSlice()[0].Kind(); k == abi.Float32 || k == abi.Float64 {
289
290
291
292 panic("compileCallback: float results not supported")
293 }
294 if intArgRegs == 0 {
295
296
297
298 abiMap.dstStackSize += goarch.PtrSize
299 }
300
301
302
303 frameSize := alignUp(abiMap.dstStackSize, goarch.PtrSize)
304 frameSize += abiMap.dstSpill
305 if frameSize > callbackMaxFrame {
306 panic("compileCallback: function argument frame too large")
307 }
308
309
310
311 var retPop uintptr
312 if cdecl {
313 retPop = abiMap.srcStackSize
314 }
315
316 key := winCallbackKey{(*funcval)(fn.data), cdecl}
317
318 cbsLock()
319
320
321 if n, ok := cbs.index[key]; ok {
322 cbsUnlock()
323 return callbackasmAddr(n)
324 }
325
326
327 if cbs.index == nil {
328 cbs.index = make(map[winCallbackKey]int)
329 }
330 n := cbs.n
331 if n >= len(cbs.ctxt) {
332 cbsUnlock()
333 throw("too many callback functions")
334 }
335 c := winCallback{key.fn, retPop, abiMap}
336 cbs.ctxt[n] = c
337 cbs.index[key] = n
338 cbs.n++
339
340 cbsUnlock()
341 return callbackasmAddr(n)
342 }
343
344 type callbackArgs struct {
345 index uintptr
346
347
348
349
350
351
352
353 args unsafe.Pointer
354
355 result uintptr
356 retPop uintptr
357 }
358
359
360 func callbackWrap(a *callbackArgs) {
361 c := cbs.ctxt[a.index]
362 a.retPop = c.retPop
363
364
365 var regs abi.RegArgs
366 var frame [callbackMaxFrame]byte
367 goArgs := unsafe.Pointer(&frame)
368 for _, part := range c.abiMap.parts {
369 switch part.kind {
370 case abiPartStack:
371 memmove(add(goArgs, part.dstStackOffset), add(a.args, part.srcStackOffset), part.len)
372 case abiPartReg:
373 goReg := unsafe.Pointer(®s.Ints[part.dstRegister])
374 memmove(goReg, add(a.args, part.srcStackOffset), part.len)
375 default:
376 panic("bad ABI description")
377 }
378 }
379
380
381
382 frameSize := alignUp(c.abiMap.dstStackSize, goarch.PtrSize)
383 frameSize += c.abiMap.dstSpill
384
385
386
387 reflectcall(nil, unsafe.Pointer(c.fn), noescape(goArgs), uint32(c.abiMap.dstStackSize), uint32(c.abiMap.retOffset), uint32(frameSize), ®s)
388
389
390
391
392
393
394 if c.abiMap.dstStackSize != c.abiMap.retOffset {
395 a.result = *(*uintptr)(unsafe.Pointer(&frame[c.abiMap.retOffset]))
396 } else {
397 var zero int
398
399
400
401 a.result = regs.Ints[zero]
402 }
403 }
404
405
406
407
408
409
410
411
412
413 func syscall_syscalln(fn, n uintptr, args ...uintptr) (r1, r2, err uintptr) {
414 if n > uintptr(len(args)) {
415 panic("syscall: n > len(args)")
416 }
417
418
419
420
421 c := &getg().m.winsyscall
422 c.Fn = fn
423 c.N = n
424 if c.N != 0 {
425 c.Args = uintptr(noescape(unsafe.Pointer(&args[0])))
426 }
427 errno := cgocall(asmstdcallAddr, unsafe.Pointer(c))
428
429
430
431 c = &getg().m.winsyscall
432 return c.R1, c.R2, uintptr(uint32(errno))
433 }
434
View as plain text