threads: close shared memory with store, not child modules (#1939)
Signed-off-by: Anuraag Agrawal <anuraaga@gmail.com>
This commit is contained in:
committed by
GitHub
parent
7d5b6739da
commit
33ed8c488c
@@ -60,8 +60,8 @@ func ExampleCoreFeaturesThreads() {
|
||||
// Channel to synchronize end of goroutines.
|
||||
endCh := make(chan struct{})
|
||||
|
||||
// We start up 8 goroutines and run for 6000 iterations each. The count should reach
|
||||
// 48000, at the end, but it would not if threads weren't working!
|
||||
// We start up 8 goroutines and run for 100000 iterations each. The count should reach
|
||||
// 800000, at the end, but it would not if threads weren't working!
|
||||
for i := 0; i < 8; i++ {
|
||||
go func() {
|
||||
defer func() { endCh <- struct{}{} }()
|
||||
@@ -71,7 +71,7 @@ func ExampleCoreFeaturesThreads() {
|
||||
// among arbitrary goroutine invocations.
|
||||
child := createChildModule(r, mod, wasmCompiled)
|
||||
fn := child.mod.ExportedFunction("run")
|
||||
for i := 0; i < 6000; i++ {
|
||||
for i := 0; i < 100000; i++ {
|
||||
_, err := fn.Call(ctx)
|
||||
if err != nil {
|
||||
log.Panicln(err)
|
||||
@@ -92,7 +92,7 @@ func ExampleCoreFeaturesThreads() {
|
||||
log.Panicln(err)
|
||||
}
|
||||
fmt.Println(res[0])
|
||||
// Output: 48000
|
||||
// Output: 800000
|
||||
}
|
||||
|
||||
type childModule struct {
|
||||
|
||||
@@ -332,7 +332,6 @@ const (
|
||||
// XCHGB is the XCHG instruction in 8-bit mode. https://www.felixcloutier.com/x86/xchg
|
||||
XCHGB
|
||||
// RET is the RET instruction. https://www.felixcloutier.com/x86/ret
|
||||
// RET is the RET instruction. https://www.felixcloutier.com/x86/ret
|
||||
RET
|
||||
// JMP is the JMP instruction. https://www.felixcloutier.com/x86/jmp
|
||||
JMP
|
||||
|
||||
@@ -1151,8 +1151,16 @@ func (ce *callEngine) builtinFunctionMemoryGrow(mem *wasm.MemoryInstance) {
|
||||
|
||||
// Update the moduleContext fields as they become stale after the update ^^.
|
||||
bufSliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&mem.Buffer))
|
||||
ce.moduleContext.memorySliceLen = uint64(bufSliceHeader.Len)
|
||||
ce.moduleContext.memoryElement0Address = bufSliceHeader.Data
|
||||
if mem.Shared {
|
||||
// Use atomic to ensure visibility for good measure. Though in practice, we know
|
||||
// the data address should never change for shared memory, and the length field
|
||||
// in the context is ignored.
|
||||
atomic.StoreUint64(&ce.moduleContext.memorySliceLen, uint64(bufSliceHeader.Len))
|
||||
atomic.StoreUintptr(&ce.moduleContext.memoryElement0Address, bufSliceHeader.Data)
|
||||
} else {
|
||||
ce.moduleContext.memorySliceLen = uint64(bufSliceHeader.Len)
|
||||
ce.moduleContext.memoryElement0Address = bufSliceHeader.Data
|
||||
}
|
||||
}
|
||||
|
||||
func (ce *callEngine) builtinFunctionTableGrow(tables []*wasm.TableInstance) {
|
||||
|
||||
@@ -259,6 +259,9 @@ func (m *MemoryInstance) Grow(delta uint32) (result uint32, ok bool) {
|
||||
if newPages > m.Max {
|
||||
return 0, false
|
||||
} else if newPages > m.Cap { // grow the memory.
|
||||
if m.Shared {
|
||||
panic("shared memory cannot be grown, this is a bug in wazero")
|
||||
}
|
||||
m.Buffer = append(m.Buffer, make([]byte, MemoryPagesToBytesNum(delta))...)
|
||||
m.Cap = newPages
|
||||
return currentPages, true
|
||||
|
||||
@@ -123,6 +123,11 @@ func (m *ModuleInstance) closeWithExitCode(ctx context.Context, exitCode uint32)
|
||||
if !m.setExitCode(exitCode, exitCodeFlagResourceClosed) {
|
||||
return nil // not an error to have already closed
|
||||
}
|
||||
if mem := m.MemoryInstance; mem != nil {
|
||||
if err = mem.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return m.ensureResourcesClosed(ctx)
|
||||
}
|
||||
|
||||
@@ -157,12 +162,6 @@ func (m *ModuleInstance) ensureResourcesClosed(ctx context.Context) (err error)
|
||||
m.Sys = nil
|
||||
}
|
||||
|
||||
if mem := m.MemoryInstance; mem != nil {
|
||||
if err = mem.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if m.CodeCloser == nil {
|
||||
return
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user