diff --git a/.github/workflows/commit.yaml b/.github/workflows/commit.yaml index 2a2722ad..41b954de 100644 --- a/.github/workflows/commit.yaml +++ b/.github/workflows/commit.yaml @@ -47,11 +47,4 @@ jobs: ~/go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - - name: Install TinyGo - run: | - wget https://github.com/tinygo-org/tinygo/releases/download/v0.20.0/tinygo_0.20.0_amd64.deb - sudo dpkg -i tinygo_0.20.0_amd64.deb - - - run: make build.examples - - run: make test diff --git a/README.md b/README.md index cd4943d1..77fcb8c2 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Gasm -A minimal implementation of Wasm Virtual machine purely written in Go. The VM passes all the [Wasm Spec test suites](https://github.com/WebAssembly/spec/tree/master/test/core) and is fully compatible Wasm v1.0 Specification. +A minimal implementation of Wasm Virtual Machine purely written in Go. The VM passes all the [Wasm Spec test suites](https://github.com/WebAssembly/spec/tree/wg-1.0/test/core) and is fully compatible with the WebAssembly v1.0 Specification. The VM can be embedded in your Go program without any dependency like cgo, and enables Gophers to write Wasm host environments easily. diff --git a/wasm/const_expr.go b/wasm/const_expr.go index 9810c8c7..d094c6bc 100644 --- a/wasm/const_expr.go +++ b/wasm/const_expr.go @@ -15,52 +15,58 @@ type ConstantExpression struct { Data []byte } -func (s *Store) executeConstExpression(target *ModuleInstance, expr *ConstantExpression) (v interface{}, err error) { +func (s *Store) executeConstExpression(target *ModuleInstance, expr *ConstantExpression) (v interface{}, valueType ValueType, err error) { r := bytes.NewBuffer(expr.Data) switch expr.OptCode { case OptCodeI32Const: v, _, err = leb128.DecodeInt32(r) if err != nil { - return nil, fmt.Errorf("read uint32: %w", err) + return nil, 0, fmt.Errorf("read uint32: %w", err) } + return v, ValueTypeI32, nil case OptCodeI64Const: v, _, err = leb128.DecodeInt32(r) if err != nil { - return nil, fmt.Errorf("read uint64: %w", err) + return nil, 0, fmt.Errorf("read uint64: %w", err) } + return v, ValueTypeI64, nil case OptCodeF32Const: v, err = readFloat32(r) if err != nil { - return nil, fmt.Errorf("read f34: %w", err) + return nil, 0, fmt.Errorf("read f34: %w", err) } + return v, ValueTypeF32, nil case OptCodeF64Const: v, err = readFloat64(r) if err != nil { - return nil, fmt.Errorf("read f64: %w", err) + return nil, 0, fmt.Errorf("read f64: %w", err) } + return v, ValueTypeF64, nil case OptCodeGlobalGet: id, _, err := leb128.DecodeUint32(r) if err != nil { - return nil, fmt.Errorf("read index of global: %w", err) + return nil, 0, fmt.Errorf("read index of global: %w", err) } if uint32(len(target.GlobalsAddrs)) <= id { - return nil, fmt.Errorf("global index out of range") + return nil, 0, fmt.Errorf("global index out of range") } g := s.Globals[target.GlobalsAddrs[id]] switch g.Type.ValType { case ValueTypeI32: v = int32(g.Val) + return v, ValueTypeI32, nil case ValueTypeI64: v = int64(g.Val) + return v, ValueTypeI64, nil case ValueTypeF32: v = math.Float32frombits(uint32(g.Val)) + return v, ValueTypeF32, nil case ValueTypeF64: v = math.Float64frombits(uint64(g.Val)) + return v, ValueTypeF64, nil } - default: - return nil, fmt.Errorf("invalid opt code: %#x", expr.OptCode) } - return v, nil + return nil, 0, fmt.Errorf("invalid opt code: %#x", expr.OptCode) } func readConstantExpression(r io.Reader) (*ConstantExpression, error) { diff --git a/wasm/optcode.go b/wasm/optcode.go index 6057a75a..da787c42 100644 --- a/wasm/optcode.go +++ b/wasm/optcode.go @@ -56,12 +56,13 @@ const ( OptCodeMemorySize OptCode = 0x3f OptCodeMemoryGrow OptCode = 0x40 - // numeric instruction + // const instructions. OptCodeI32Const OptCode = 0x41 OptCodeI64Const OptCode = 0x42 OptCodeF32Const OptCode = 0x43 OptCodeF64Const OptCode = 0x44 + // numeric instructions. OptCodeI32eqz OptCode = 0x45 OptCodeI32eq OptCode = 0x46 OptCodeI32ne OptCode = 0x47 diff --git a/wasm/section.go b/wasm/section.go index 070bb49f..646fc090 100644 --- a/wasm/section.go +++ b/wasm/section.go @@ -70,7 +70,7 @@ func (m *Module) readSections(r *Reader) error { err = errors.New("invalid section id") } - if sectionContentStart+int(ss) != r.read { + if err == nil && sectionContentStart+int(ss) != r.read { err = fmt.Errorf("invalid section length: expected to be %d but got %d", ss, r.read-sectionContentStart) } @@ -149,7 +149,7 @@ func (m *Module) readSectionFunctions(r *Reader) error { for i := range m.FunctionSection { m.FunctionSection[i], _, err = leb128.DecodeUint32(r) if err != nil { - return fmt.Errorf("get typeidx: %v", err) + return fmt.Errorf("get type index: %v", err) } } return nil @@ -215,7 +215,9 @@ func (m *Module) readSectionExports(r *Reader) error { if err != nil { return fmt.Errorf("read export: %v", err) } - + if _, ok := m.ExportSection[expDesc.Name]; ok { + return fmt.Errorf("duplicate export name: %s", expDesc.Name) + } m.ExportSection[expDesc.Name] = expDesc } return nil @@ -257,7 +259,7 @@ func (m *Module) readSectionCodes(r *Reader) error { for i := range m.CodeSection { m.CodeSection[i], err = readCodeSegment(r) if err != nil { - return fmt.Errorf("read code segment: %v", err) + return fmt.Errorf("read %d-th code segment: %v", i, err) } } return nil diff --git a/wasm/segment.go b/wasm/segment.go index 5affa129..560e4f91 100644 --- a/wasm/segment.go +++ b/wasm/segment.go @@ -3,12 +3,18 @@ package wasm import ( "fmt" "io" - "io/ioutil" "math" "github.com/mathetake/gasm/wasm/leb128" ) +const ( + ImportKindFunction = 0x00 + ImportKindTable = 0x01 + ImportKindMemory = 0x02 + ImportKindGlobal = 0x03 +) + type ImportDesc struct { Kind byte @@ -25,41 +31,40 @@ func readImportDesc(r io.Reader) (*ImportDesc, error) { } switch b[0] { - case 0x00: + case ImportKindFunction: tID, _, err := leb128.DecodeUint32(r) if err != nil { return nil, fmt.Errorf("read typeindex: %v", err) } return &ImportDesc{ - Kind: 0x00, + Kind: ImportKindFunction, TypeIndexPtr: &tID, }, nil - case 0x01: + case ImportKindTable: tt, err := readTableType(r) if err != nil { return nil, fmt.Errorf("read table type: %v", err) } return &ImportDesc{ - Kind: 0x01, + Kind: ImportKindTable, TableTypePtr: tt, }, nil - case 0x02: + case ImportKindMemory: mt, err := readMemoryType(r) if err != nil { return nil, fmt.Errorf("read table type: %v", err) } return &ImportDesc{ - Kind: 0x02, + Kind: ImportKindMemory, MemTypePtr: mt, }, nil - case 0x03: + case ImportKindGlobal: gt, err := readGlobalType(r) if err != nil { return nil, fmt.Errorf("read global type: %v", err) } - return &ImportDesc{ - Kind: 0x03, + Kind: ImportKindGlobal, GlobalTypePtr: gt, }, nil default: @@ -206,8 +211,9 @@ func readElementSegment(r io.Reader) (*ElementSegment, error) { } type CodeSegment struct { - NumLocals uint32 - Body []byte + NumLocals uint32 + LocalTypes []ValueType + Body []byte } func readCodeSegment(r io.Reader) (*CodeSegment, error) { @@ -221,29 +227,46 @@ func readCodeSegment(r io.Reader) (*CodeSegment, error) { // parse locals ls, _, err := leb128.DecodeUint32(r) if err != nil { - return nil, fmt.Errorf("get the size locals: %w", err) + return nil, fmt.Errorf("get the size locals: %v", err) } - var numLocals uint64 + var nums []uint64 + var types []ValueType + var sum uint64 b := make([]byte, 1) for i := uint32(0); i < ls; i++ { n, _, err := leb128.DecodeUint32(r) if err != nil { - return nil, fmt.Errorf("read n of locals: %w", err) + return nil, fmt.Errorf("read n of locals: %v", err) } - numLocals += uint64(n) + sum += uint64(n) + nums = append(nums, uint64(n)) - if _, err := io.ReadFull(r, b); err != nil { - return nil, fmt.Errorf("read type of local") + _, err = io.ReadFull(r, b) + if err != nil { + return nil, fmt.Errorf("read type of local: %v", err) + } + switch vt := ValueType(b[0]); vt { + case ValueTypeI32, ValueTypeF32, ValueTypeI64, ValueTypeF64: + types = append(types, vt) + default: + return nil, fmt.Errorf("invalid local type: 0x%x", vt) } } - if numLocals > math.MaxUint32 { - return nil, fmt.Errorf("too many locals: %d", numLocals) + if sum > math.MaxUint32 { + return nil, fmt.Errorf("too many locals: %d", sum) } - // extract body - body, err := ioutil.ReadAll(r) + var localTypes []ValueType + for i, num := range nums { + t := types[i] + for j := uint64(0); j < num; j++ { + localTypes = append(localTypes, t) + } + } + + body, err := io.ReadAll(r) if err != nil { return nil, fmt.Errorf("read body: %w", err) } @@ -253,8 +276,9 @@ func readCodeSegment(r io.Reader) (*CodeSegment, error) { } return &CodeSegment{ - Body: body[:len(body)-1], - NumLocals: uint32(numLocals), + Body: body, + NumLocals: uint32(sum), + LocalTypes: localTypes, }, nil } diff --git a/wasm/segment_test.go b/wasm/segment_test.go deleted file mode 100644 index 6c470cb2..00000000 --- a/wasm/segment_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package wasm - -import ( - "bytes" - "errors" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestReadImportDesc(t *testing.T) { - t.Run("ng", func(t *testing.T) { - buf := []byte{0x04} - _, err := readImportDesc(bytes.NewBuffer(buf)) - require.True(t, errors.Is(err, ErrInvalidByte)) - t.Log(err) - }) - - for i, c := range []struct { - bytes []byte - exp *ImportDesc - }{ - { - bytes: []byte{0x00, 0x0a}, - exp: &ImportDesc{ - Kind: 0, - TypeIndexPtr: uint32Ptr(10), - }, - }, - { - bytes: []byte{0x01, 0x70, 0x0, 0x0a}, - exp: &ImportDesc{ - Kind: 1, - TableTypePtr: &TableType{ - ElemType: 0x70, - Limit: &LimitsType{Min: 10}, - }, - }, - }, - { - bytes: []byte{0x02, 0x0, 0x0a}, - exp: &ImportDesc{ - Kind: 2, - MemTypePtr: &MemoryType{Min: 10}, - }, - }, - { - bytes: []byte{0x03, 0x7e, 0x01}, - exp: &ImportDesc{ - Kind: 3, - GlobalTypePtr: &GlobalType{ValType: ValueTypeI64, Mutable: true}, - }, - }, - } { - t.Run(strconv.Itoa(i), func(t *testing.T) { - actual, err := readImportDesc(bytes.NewBuffer(c.bytes)) - require.NoError(t, err) - assert.Equal(t, c.exp, actual) - }) - - } -} - -func TestReadImportSegment(t *testing.T) { - exp := &ImportSegment{ - Module: "abc", - Name: "ABC", - Desc: &ImportDesc{Kind: 0, TypeIndexPtr: uint32Ptr(10)}, - } - - buf := []byte{byte(len(exp.Module))} - buf = append(buf, exp.Module...) - buf = append(buf, byte(len(exp.Name))) - buf = append(buf, exp.Name...) - buf = append(buf, 0x00, 0x0a) - - actual, err := readImportSegment(bytes.NewBuffer(buf)) - require.NoError(t, err) - assert.Equal(t, exp, actual) -} - -func TestReadGlobalSegment(t *testing.T) { - exp := &GlobalSegment{ - Type: &GlobalType{ValType: ValueTypeI64, Mutable: false}, - Init: &ConstantExpression{ - OptCode: OptCodeI64Const, - Data: []byte{0x01}, - }, - } - - buf := []byte{0x7e, 0x00, 0x42, 0x01, 0x0b} - actual, err := readGlobalSegment(bytes.NewBuffer(buf)) - require.NoError(t, err) - assert.Equal(t, exp, actual) -} - -func TestReadExportDesc(t *testing.T) { - t.Run("ng", func(t *testing.T) { - buf := []byte{0x04} - _, err := readExportDesc(bytes.NewBuffer(buf)) - require.True(t, errors.Is(err, ErrInvalidByte)) - t.Log(err) - }) - - for i, c := range []struct { - bytes []byte - exp *ExportDesc - }{ - { - bytes: []byte{0x00, 0x0a}, - exp: &ExportDesc{Kind: 0, Index: 10}, - }, - { - bytes: []byte{0x01, 0x05}, - exp: &ExportDesc{Kind: 1, Index: 5}, - }, - { - bytes: []byte{0x02, 0x01}, - exp: &ExportDesc{Kind: 2, Index: 1}, - }, - { - bytes: []byte{0x03, 0x0b}, - exp: &ExportDesc{Kind: 3, Index: 11}, - }, - } { - t.Run(strconv.Itoa(i), func(t *testing.T) { - actual, err := readExportDesc(bytes.NewBuffer(c.bytes)) - require.NoError(t, err) - assert.Equal(t, c.exp, actual) - }) - - } -} - -func TestReadExportSegment(t *testing.T) { - exp := &ExportSegment{ - Name: "ABC", - Desc: &ExportDesc{Kind: 0, Index: 10}, - } - - buf := []byte{byte(len(exp.Name))} - buf = append(buf, exp.Name...) - buf = append(buf, 0x00, 0x0a) - - actual, err := readExportSegment(bytes.NewBuffer(buf)) - require.NoError(t, err) - assert.Equal(t, exp, actual) -} - -func TestReadElementSegment(t *testing.T) { - for i, c := range []struct { - bytes []byte - exp *ElementSegment - }{ - { - bytes: []byte{0xa, 0x41, 0x1, 0x0b, 0x02, 0x05, 0x07}, - exp: &ElementSegment{ - TableIndex: 10, - OffsetExpr: &ConstantExpression{ - OptCode: OptCodeI32Const, - Data: []byte{0x01}, - }, - Init: []uint32{5, 7}, - }, - }, - { - bytes: []byte{0x3, 0x41, 0x04, 0x0b, 0x01, 0x0a}, - exp: &ElementSegment{ - TableIndex: 3, - OffsetExpr: &ConstantExpression{ - OptCode: OptCodeI32Const, - Data: []byte{0x04}, - }, - Init: []uint32{10}, - }, - }, - } { - t.Run(strconv.Itoa(i), func(t *testing.T) { - actual, err := readElementSegment(bytes.NewBuffer(c.bytes)) - require.NoError(t, err) - assert.Equal(t, c.exp, actual) - }) - } -} - -func TestReadCodeSegment(t *testing.T) { - buf := []byte{0x9, 0x1, 0x1, 0x1, 0x1, 0x1, 0x12, 0x3, 0x01, 0x0b} - exp := &CodeSegment{ - NumLocals: 0x01, - Body: []byte{0x1, 0x1, 0x12, 0x3, 0x01}, - } - actual, err := readCodeSegment(bytes.NewBuffer(buf)) - require.NoError(t, err) - assert.Equal(t, exp, actual) -} - -func TestDataSegment(t *testing.T) { - for i, c := range []struct { - bytes []byte - exp *DataSegment - }{ - { - bytes: []byte{0x0, 0x41, 0x1, 0x0b, 0x02, 0x05, 0x07}, - exp: &DataSegment{ - OffsetExpression: &ConstantExpression{ - OptCode: OptCodeI32Const, - Data: []byte{0x01}, - }, - Init: []byte{5, 7}, - }, - }, - { - bytes: []byte{0x0, 0x41, 0x04, 0x0b, 0x01, 0x0a}, - exp: &DataSegment{ - OffsetExpression: &ConstantExpression{ - OptCode: OptCodeI32Const, - Data: []byte{0x04}, - }, - Init: []byte{0x0a}, - }, - }, - } { - t.Run(strconv.Itoa(i), func(t *testing.T) { - actual, err := readDataSegment(bytes.NewBuffer(c.bytes)) - require.NoError(t, err) - assert.Equal(t, c.exp, actual) - }) - } -} diff --git a/wasm/spectests/spec_test.go b/wasm/spectests/spec_test.go index d64c1e2b..b04399f9 100644 --- a/wasm/spectests/spec_test.go +++ b/wasm/spectests/spec_test.go @@ -202,8 +202,8 @@ func TestSpecification(t *testing.T) { var base testbase require.NoError(t, json.Unmarshal(raw, &base)) - wastName := filepath.Base(base.SourceFile) + t.Run(wastName, func(t *testing.T) { vm, err := wasm.NewVM() require.NoError(t, err) @@ -317,7 +317,17 @@ func TestSpecification(t *testing.T) { t.Fatalf("unsupported action type type: %v", c) } case "assert_invalid": - // TODO: + if c.ModuleType == "text" { + // We don't support direct loading of wast yet. + t.Skip() + } + buf, err := os.ReadFile(filepath.Join(caseDir, c.Filename)) + require.NoError(t, err, msg) + mod, err := wasm.DecodeModule(buf) + if err == nil { + err = vm.InstantiateModule(mod, "") + } + require.Error(t, err, msg) case "assert_exhaustion": // TODO: case "assert_unlinkable": diff --git a/wasm/store.go b/wasm/store.go index 1d9f7868..e67624b1 100644 --- a/wasm/store.go +++ b/wasm/store.go @@ -6,6 +6,7 @@ import ( "io" "math" "reflect" + "strings" "github.com/mathetake/gasm/wasm/leb128" ) @@ -64,30 +65,39 @@ func NewStore() *Store { } func (s *Store) Instantiate(module *Module, name string) (*ModuleInstance, error) { - ret := &ModuleInstance{Types: module.TypeSection} - s.ModuleInstances[name] = ret + instance := &ModuleInstance{Types: module.TypeSection} + s.ModuleInstances[name] = instance - if err := s.resolveImports(module, ret); err != nil { + if err := s.resolveImports(module, instance); err != nil { return nil, fmt.Errorf("resolve imports: %w", err) } - if err := s.buildGlobalInstances(module, ret); err != nil { + if err := s.buildGlobalInstances(module, instance); err != nil { return nil, fmt.Errorf("globals: %w", err) } - if err := s.buildFunctionInstances(module, ret); err != nil { + if err := s.buildFunctionInstances(module, instance); err != nil { return nil, fmt.Errorf("functions: %w", err) } - if err := s.buildTableInstances(module, ret); err != nil { + if err := s.buildTableInstances(module, instance); err != nil { return nil, fmt.Errorf("tables: %w", err) } - if err := s.buildMemoryInstances(module, ret); err != nil { + if err := s.buildMemoryInstances(module, instance); err != nil { return nil, fmt.Errorf("memories: %w", err) } - if err := s.buildExportInstances(module, ret); err != nil { + if err := s.buildExportInstances(module, instance); err != nil { return nil, fmt.Errorf("exports: %w", err) } - // TODO: Execute start func - return ret, nil + if module.StartSection != nil { + index := *module.StartSection + if int(index) >= len(instance.FunctionAddrs) { + return nil, fmt.Errorf("invalid start function index: %d", index) + } + signature := s.Functions[instance.FunctionAddrs[index]].FunctionType() + if len(signature.InputTypes) != 0 || len(signature.ReturnTypes) != 0 { + return nil, fmt.Errorf("start function must have the empty signature") + } + } + return instance, nil } func (s *Store) resolveImports(module *Module, target *ModuleInstance) error { @@ -139,10 +149,13 @@ func (s *Store) resolveImport(target *ModuleInstance, is *ImportSegment) error { func (s *Store) applyFunctionImport(target *ModuleInstance, is *ImportSegment, externModuleExportIsntance *ExportInstance) error { if is.Desc.TypeIndexPtr == nil { - return fmt.Errorf("is.Desc.TypeIndexPtr is nill") + return fmt.Errorf("is.Desc.TypeIndexPtr is nil") } - f := s.Functions[externModuleExportIsntance.Addr] + typeIndex := *is.Desc.TypeIndexPtr + if int(typeIndex) >= len(target.Types) { + return fmt.Errorf("unknown type for function import") + } iSig := target.Types[*is.Desc.TypeIndexPtr] if !hasSameSignature(iSig.ReturnTypes, f.FunctionType().ReturnTypes) { return fmt.Errorf("return signature mimatch: %#x != %#x", iSig.ReturnTypes, f.FunctionType().ReturnTypes) @@ -174,10 +187,13 @@ func (s *Store) applyGlobalImport(target *ModuleInstance, externModuleExportIsnt func (s *Store) buildGlobalInstances(module *Module, target *ModuleInstance) error { for _, gs := range module.GlobalSection { - raw, err := s.executeConstExpression(target, gs.Init) + raw, t, err := s.executeConstExpression(target, gs.Init) if err != nil { return fmt.Errorf("execution failed: %w", err) } + if gs.Type.ValType != t { + return fmt.Errorf("global type mismatch") + } var gv uint64 switch v := raw.(type) { case int32: @@ -199,6 +215,29 @@ func (s *Store) buildGlobalInstances(module *Module, target *ModuleInstance) err } func (s *Store) buildFunctionInstances(module *Module, target *ModuleInstance) error { + var functionDeclarations []uint32 + var globalDecalarations []*GlobalType + var memoryDeclarations []*MemoryType + var tableDeclarations []*TableType + for _, imp := range module.ImportSection { + switch imp.Desc.Kind { + case ImportKindFunction: + functionDeclarations = append(functionDeclarations, *imp.Desc.TypeIndexPtr) + case ImportKindGlobal: + globalDecalarations = append(globalDecalarations, imp.Desc.GlobalTypePtr) + case ImportKindMemory: + memoryDeclarations = append(memoryDeclarations, imp.Desc.MemTypePtr) + case ImportKindTable: + tableDeclarations = append(tableDeclarations, imp.Desc.TableTypePtr) + } + } + functionDeclarations = append(functionDeclarations, module.FunctionSection...) + for _, g := range module.GlobalSection { + globalDecalarations = append(globalDecalarations, g.Type) + } + memoryDeclarations = append(memoryDeclarations, module.MemorySection...) + tableDeclarations = append(tableDeclarations, module.TableSection...) + for codeIndex, typeIndex := range module.FunctionSection { if typeIndex >= uint32(len(module.TypeSection)) { return fmt.Errorf("function type index out of range") @@ -209,15 +248,19 @@ func (s *Store) buildFunctionInstances(module *Module, target *ModuleInstance) e f := &NativeFunction{ Signature: module.TypeSection[typeIndex], Body: module.CodeSection[codeIndex].Body, - NumLocal: module.CodeSection[codeIndex].NumLocals, + NumLocals: module.CodeSection[codeIndex].NumLocals, + LocalTypes: module.CodeSection[codeIndex].LocalTypes, ModuleInstance: target, + Blocks: map[uint64]*NativeFunctionBlock{}, } - blocks, err := parseBlocks(module, f.Body) + err := analyzeFunction( + module, f, functionDeclarations, globalDecalarations, + memoryDeclarations, tableDeclarations, + ) if err != nil { - return fmt.Errorf("parse blocks in function index %d: %w", codeIndex, err) + return fmt.Errorf("invalid function at index %d/%d: %v", codeIndex, len(module.FunctionSection), err) } - f.Blocks = blocks target.FunctionAddrs = append(target.FunctionAddrs, len(s.Functions)) s.Functions = append(s.Functions, f) } @@ -242,14 +285,16 @@ func (s *Store) buildMemoryInstances(module *Module, target *ModuleInstance) err return fmt.Errorf("index out of range of index space") } - rawOffset, err := s.executeConstExpression(target, d.OffsetExpression) + rawOffset, offsetType, err := s.executeConstExpression(target, d.OffsetExpression) if err != nil { return fmt.Errorf("calculate offset: %w", err) + } else if offsetType != ValueTypeI32 { + return fmt.Errorf("offset is not int32 but %T", offsetType) } offset, ok := rawOffset.(int32) if !ok { - return fmt.Errorf("offset is not int32 but %T", rawOffset) + return fmt.Errorf("offset is not int32 but 0x%x", offsetType) } size := uint64(offset) + uint64(len(d.Init)) @@ -271,6 +316,9 @@ func (s *Store) buildMemoryInstances(module *Module, target *ModuleInstance) err copy(memoryInst.Memory[offset:], d.Init) } } + if len(target.MemoryAddrs) > 1 { + return fmt.Errorf("multiple memories not supported") + } return nil } @@ -293,14 +341,16 @@ func (s *Store) buildTableInstances(module *Module, target *ModuleInstance) erro return fmt.Errorf("index out of range of index space") } - rawOffset, err := s.executeConstExpression(target, elem.OffsetExpr) + rawOffset, offsetType, err := s.executeConstExpression(target, elem.OffsetExpr) if err != nil { return fmt.Errorf("calculate offset: %w", err) + } else if offsetType != ValueTypeI32 { + return fmt.Errorf("offset is not int32 but %T", offsetType) } offset32, ok := rawOffset.(int32) if !ok { - return fmt.Errorf("offset is not int32 but %T", rawOffset) + return fmt.Errorf("offset is not int32 but %T", offsetType) } offset := int(offset32) @@ -320,17 +370,28 @@ func (s *Store) buildTableInstances(module *Module, target *ModuleInstance) erro next := make([]*uint32, size) copy(next, tableInst.Table) for i := range elem.Init { - addr := uint32(target.FunctionAddrs[elem.Init[i]]) + elm := elem.Init[i] + if elm >= uint32(len(target.FunctionAddrs)) { + return fmt.Errorf("unknown function specified by element") + } + addr := uint32(target.FunctionAddrs[elm]) next[i+offset] = &addr } tableInst.Table = next } else { for i := range elem.Init { - addr := uint32(target.FunctionAddrs[elem.Init[i]]) + elm := elem.Init[i] + if elm >= uint32(len(target.FunctionAddrs)) { + return fmt.Errorf("unknown function specified by element") + } + addr := uint32(target.FunctionAddrs[elm]) tableInst.Table[i+offset] = &addr } } } + if len(target.TableAddrs) > 1 { + return fmt.Errorf("multiple tables not supported") + } return nil } @@ -338,15 +399,28 @@ func (s *Store) buildExportInstances(module *Module, target *ModuleInstance) err target.Exports = make(map[string]*ExportInstance, len(module.ExportSection)) for name, exp := range module.ExportSection { var addr int + index := int(exp.Desc.Index) switch exp.Desc.Kind { case ExportKindFunction: - addr = target.FunctionAddrs[exp.Desc.Index] + if index >= len(target.FunctionAddrs) { + return fmt.Errorf("unknown function for export") + } + addr = target.FunctionAddrs[index] case ExportKindGlobal: + if index >= len(target.GlobalsAddrs) { + return fmt.Errorf("unknown global for export") + } addr = target.GlobalsAddrs[exp.Desc.Index] case ExportKindMemory: - addr = target.MemoryAddrs[addr] + if index >= len(target.MemoryAddrs) { + return fmt.Errorf("unknown memory for export") + } + addr = target.MemoryAddrs[exp.Desc.Index] case ExportKindTable: - addr = target.TableAddrs[addr] + if index >= len(target.TableAddrs) { + return fmt.Errorf("unknown memory for export") + } + addr = target.TableAddrs[exp.Desc.Index] } target.Exports[name] = &ExportInstance{ Kind: exp.Desc.Kind, @@ -356,126 +430,906 @@ func (s *Store) buildExportInstances(module *Module, target *ModuleInstance) err return nil } +type valueTypeStack struct { + stack []ValueType + stackLimits []int +} + +const ( + valueTypeUnknown = ValueType(0xFE) +) + +func (s *valueTypeStack) pop() (ValueType, error) { + limit := 0 + if len(s.stackLimits) > 0 { + limit = s.stackLimits[len(s.stackLimits)-1] + } + if len(s.stack) <= limit { + return 0, fmt.Errorf("invalid operation: trying to pop at %d with limit %d", + len(s.stack), limit) + } else if len(s.stack) == limit+1 && s.stack[limit] == valueTypeUnknown { + return valueTypeUnknown, nil + } else { + ret := s.stack[len(s.stack)-1] + s.stack = s.stack[:len(s.stack)-1] + return ret, nil + } +} + +func (s *valueTypeStack) popAndVerifyType(expected ValueType) error { + actual, err := s.pop() + if err != nil { + return err + } + if actual != expected && actual != valueTypeUnknown && expected != valueTypeUnknown { + return fmt.Errorf("type mismatch") + } + return nil +} + +func (s *valueTypeStack) push(v ValueType) { + s.stack = append(s.stack, v) +} + +func (s *valueTypeStack) unreachable() { + s.resetAtStackLimit() + s.stack = append(s.stack, valueTypeUnknown) +} + +func (s *valueTypeStack) resetAtStackLimit() { + if len(s.stackLimits) != 0 { + s.stack = s.stack[:s.stackLimits[len(s.stackLimits)-1]] + } else { + s.stack = []ValueType{} + } +} + +func (s *valueTypeStack) popStackLimit() { + if len(s.stackLimits) != 0 { + s.stackLimits = s.stackLimits[:len(s.stackLimits)-1] + } +} + +func (s *valueTypeStack) pushStackLimit() { + s.stackLimits = append(s.stackLimits, len(s.stack)) +} + +func (s *valueTypeStack) popResults(expResults []ValueType, checkAboveLimit bool) error { + limit := 0 + if len(s.stackLimits) > 0 { + limit = s.stackLimits[len(s.stackLimits)-1] + } + for _, exp := range expResults { + if err := s.popAndVerifyType(exp); err != nil { + return err + } + } + if checkAboveLimit { + if !(limit == len(s.stack) || (limit+1 == len(s.stack) && s.stack[limit] == valueTypeUnknown)) { + return fmt.Errorf("leftovers found in the stack") + } + } + return nil +} + +func (s *valueTypeStack) String() string { + var typeStrs, limits []string + for _, v := range s.stack { + var str string + if v == valueTypeUnknown { + str = "unknown" + } else if v == ValueTypeI32 { + str = "i32" + } else if v == ValueTypeI64 { + str = "i64" + } else if v == ValueTypeF32 { + str = "f32" + } else if v == ValueTypeF64 { + str = "f64" + } + typeStrs = append(typeStrs, str) + } + for _, d := range s.stackLimits { + limits = append(limits, fmt.Sprintf("%d", d)) + } + return fmt.Sprintf("{stack: [%s], limits: [%s]}", + strings.Join(typeStrs, ", "), strings.Join(limits, ",")) +} + type BlockType = FunctionType -func parseBlocks(module *Module, body []byte) (map[uint64]*NativeFunctionBlock, error) { - ret := map[uint64]*NativeFunctionBlock{} - stack := make([]*NativeFunctionBlock, 0) - for pc := uint64(0); pc < uint64(len(body)); pc++ { - rawOc := body[pc] +func analyzeFunction( + module *Module, f *NativeFunction, + functionDeclarations []uint32, + globalDeclarations []*GlobalType, + memoryDeclarations []*MemoryType, + tableDeclarations []*TableType, +) error { + labelStack := []*NativeFunctionBlock{ + {BlockType: f.Signature, StartAt: math.MaxUint64}, + } + valueTypeStack := &valueTypeStack{} + for pc := uint64(0); pc < uint64(len(f.Body)); pc++ { + rawOc := f.Body[pc] if 0x28 <= rawOc && rawOc <= 0x3e { // memory load,store + if len(memoryDeclarations) == 0 { + return fmt.Errorf("unknown memory access") + } pc++ - // align - _, num, err := leb128.DecodeUint32(bytes.NewBuffer(body[pc:])) + align, num, err := leb128.DecodeUint32(bytes.NewBuffer(f.Body[pc:])) if err != nil { - return nil, fmt.Errorf("read memory align: %v", err) + return fmt.Errorf("read memory align: %v", err) + } + switch OptCode(rawOc) { + case OptCodeI32Load: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OptCodeF32Load: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeF32) + case OptCodeI32Store: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OptCodeF32Store: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeF32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OptCodeI64Load: + if 1< 64/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OptCodeF64Load: + if 1< 64/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeF64) + case OptCodeI64Store: + if 1< 64/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OptCodeF64Store: + if 1< 64/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeF64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OptCodeI32Load8s: + if 1< 1 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OptCodeI32Load8u: + if 1< 1 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OptCodeI64Load8s, OptCodeI64Load8u: + if 1< 1 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OptCodeI32Store8: + if 1< 1 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OptCodeI64Store8: + if 1< 1 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OptCodeI32Load16s, OptCodeI32Load16u: + if 1< 16/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OptCodeI64Load16s, OptCodeI64Load16u: + if 1< 16/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OptCodeI32Store16: + if 1< 16/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OptCodeI64Store16: + if 1< 16/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + case OptCodeI64Load32s, OptCodeI64Load32u: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI64) + case OptCodeI64Store32: + if 1< 32/8 { + return fmt.Errorf("invalid memory alignment") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return err + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } } pc += num // offset - _, num, err = leb128.DecodeUint32(bytes.NewBuffer(body[pc:])) + _, num, err = leb128.DecodeUint32(bytes.NewBuffer(f.Body[pc:])) if err != nil { - return nil, fmt.Errorf("read memory offset: %v", err) + return fmt.Errorf("read memory offset: %v", err) + } + pc += num - 1 + } else if 0x3f <= rawOc && rawOc <= 0x40 { // memory grow,size + if len(memoryDeclarations) == 0 { + return fmt.Errorf("unknown memory access") + } + pc++ + val, num, err := leb128.DecodeUint32(bytes.NewBuffer(f.Body[pc:])) + if err != nil { + return fmt.Errorf("read immediate: %v", err) + } + if val != 0 || num != 1 { + return fmt.Errorf("memory instruction reserved bytes not zero with 1 byte") + } + switch OptCode(rawOc) { + case OptCodeMemoryGrow: + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return err + } + valueTypeStack.push(ValueTypeI32) + case OptCodeMemorySize: + valueTypeStack.push(ValueTypeI32) } pc += num - 1 - continue } else if 0x41 <= rawOc && rawOc <= 0x44 { // const instructions pc++ switch OptCode(rawOc) { case OptCodeI32Const: - _, num, err := leb128.DecodeInt32(bytes.NewBuffer(body[pc:])) + _, num, err := leb128.DecodeInt32(bytes.NewBuffer(f.Body[pc:])) if err != nil { - return nil, fmt.Errorf("read i32 immediate: %s", err) + return fmt.Errorf("read i32 immediate: %s", err) } pc += num - 1 + valueTypeStack.push(ValueTypeI32) case OptCodeI64Const: - _, num, err := leb128.DecodeInt64(bytes.NewBuffer(body[pc:])) + _, num, err := leb128.DecodeInt64(bytes.NewBuffer(f.Body[pc:])) if err != nil { - return nil, fmt.Errorf("read i64 immediate: %v", err) + return fmt.Errorf("read i64 immediate: %v", err) } + valueTypeStack.push(ValueTypeI64) pc += num - 1 case OptCodeF32Const: + valueTypeStack.push(ValueTypeF32) pc += 3 case OptCodeF64Const: + valueTypeStack.push(ValueTypeF64) pc += 7 } - continue - } else if (0x3f <= rawOc && rawOc <= 0x40) || // memory grow,size - (0x20 <= rawOc && rawOc <= 0x24) || // variable instructions - (0x0c <= rawOc && rawOc <= 0x0d) || // br,br_if instructions - (0x10 <= rawOc && rawOc <= 0x11) { // call,call_indirect + } else if 0x20 <= rawOc && rawOc <= 0x24 { // variable instructions pc++ - val, num, err := leb128.DecodeUint32(bytes.NewBuffer(body[pc:])) + index, num, err := leb128.DecodeUint32(bytes.NewBuffer(f.Body[pc:])) if err != nil { - return nil, fmt.Errorf("read immediate: %v", err) - } - if (rawOc == 0x3f || rawOc == 0x40) && (val != 0 || num != 1) { // memory grow,size - return nil, fmt.Errorf("memory instruction reserved bytes not zero with 1 byte") + return fmt.Errorf("read immediate: %v", err) } pc += num - 1 - if rawOc == 0x11 { // if call_indirect - pc++ - if body[pc] != 0x00 { - return nil, fmt.Errorf("call_indirect reserved bytes not zero but got %d", body[pc]) - + switch OptCode(rawOc) { + case OptCodeLocalGet: + inputLen := uint32(len(f.Signature.InputTypes)) + if l := f.NumLocals + inputLen; index >= l { + return fmt.Errorf("invalid local index for local.get %d >= %d(=len(locals)+len(parameters))", index, l) + } + if index < inputLen { + valueTypeStack.push(f.Signature.InputTypes[index]) + } else { + valueTypeStack.push(f.LocalTypes[index-inputLen]) + } + case OptCodeLocalSet: + inputLen := uint32(len(f.Signature.InputTypes)) + if l := f.NumLocals + inputLen; index >= l { + return fmt.Errorf("invalid local index for local.set %d >= %d(=len(locals)+len(parameters))", index, l) + } + var expType ValueType + if index < inputLen { + expType = f.Signature.InputTypes[index] + } else { + expType = f.LocalTypes[index-inputLen] + } + if err := valueTypeStack.popAndVerifyType(expType); err != nil { + return err + } + case OptCodeLocalTee: + inputLen := uint32(len(f.Signature.InputTypes)) + if l := f.NumLocals + inputLen; index >= l { + return fmt.Errorf("invalid local index for local.tee %d >= %d(=len(locals)+len(parameters))", index, l) + } + var expType ValueType + if index < inputLen { + expType = f.Signature.InputTypes[index] + } else { + expType = f.LocalTypes[index-inputLen] + } + if err := valueTypeStack.popAndVerifyType(expType); err != nil { + return err + } + valueTypeStack.push(expType) + case OptCodeGlobalGet: + if index >= uint32(len(globalDeclarations)) { + return fmt.Errorf("invalid global index") + } + valueTypeStack.push(globalDeclarations[index].ValType) + case OptCodeGlobalSet: + if index >= uint32(len(globalDeclarations)) { + return fmt.Errorf("invalid global index") + } else if !globalDeclarations[index].Mutable { + return fmt.Errorf("globa.set on immutable global type") + } else if err := valueTypeStack.popAndVerifyType( + globalDeclarations[index].ValType); err != nil { + return err } } - continue + } else if rawOc == 0x0c { // br + pc++ + index, num, err := leb128.DecodeUint32(bytes.NewBuffer(f.Body[pc:])) + if err != nil { + return fmt.Errorf("read immediate: %v", err) + } else if int(index) >= len(labelStack) { + return fmt.Errorf("invalid br operation: index out of range") + } + pc += num - 1 + // Check type soundness. + target := labelStack[len(labelStack)-int(index)-1] + targetResultType := target.BlockType.ReturnTypes + if target.IsLoop { + // Loop operation doesn't require results since the continuation is + // the beginning of the loop. + targetResultType = []ValueType{} + } + if err := valueTypeStack.popResults(targetResultType, false); err != nil { + return fmt.Errorf("type mismatch on the br operation: %v", err) + } + // br instruction is stack-polymorphic. + valueTypeStack.unreachable() + } else if rawOc == 0x0d { // br_if + pc++ + index, num, err := leb128.DecodeUint32(bytes.NewBuffer(f.Body[pc:])) + if err != nil { + return fmt.Errorf("read immediate: %v", err) + } else if int(index) >= len(labelStack) { + return fmt.Errorf( + "invalid ln param given for br_if: index=%d with %d for the current lable stack length", + index, len(labelStack)) + } + pc += num - 1 + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the required operand for br_if") + } + // Check type soundness. + target := labelStack[len(labelStack)-int(index)-1] + targetResultType := target.BlockType.ReturnTypes + if target.IsLoop { + // Loop operation doesn't require results since the continuation is + // the beginning of the loop. + targetResultType = []ValueType{} + } + if err := valueTypeStack.popResults(targetResultType, false); err != nil { + return fmt.Errorf("type mismatch on the br_if operation: %v", err) + } + // Push back the result + for _, t := range targetResultType { + valueTypeStack.push(t) + } } else if rawOc == 0x0e { // br_table pc++ - r := bytes.NewBuffer(body[pc:]) + r := bytes.NewBuffer(f.Body[pc:]) nl, num, err := leb128.DecodeUint32(r) if err != nil { - return nil, fmt.Errorf("read immediate: %w", err) + return fmt.Errorf("read immediate: %w", err) } + list := make([]uint32, nl) for i := uint32(0); i < nl; i++ { - _, n, err := leb128.DecodeUint32(r) + l, n, err := leb128.DecodeUint32(r) if err != nil { - return nil, fmt.Errorf("read immediate: %w", err) + return fmt.Errorf("read immediate: %w", err) } num += n + list[i] = l } - - _, n, err := leb128.DecodeUint32(r) + ln, n, err := leb128.DecodeUint32(r) if err != nil { - return nil, fmt.Errorf("read immediate: %w", err) + return fmt.Errorf("read immediate: %w", err) + } else if int(ln) >= len(labelStack) { + return fmt.Errorf( + "invalid ln param given for br_table: ln=%d with %d for the current lable stack length", + ln, len(labelStack)) } pc += n + num - 1 - continue - } - - switch OptCode(rawOc) { - case OptCodeBlock, OptCodeIf, OptCodeLoop: - bt, num, err := readBlockType(module, bytes.NewBuffer(body[pc+1:])) - if err != nil { - return nil, fmt.Errorf("read block: %w", err) + // Check type soundness. + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the required operand for br_table") } - stack = append(stack, &NativeFunctionBlock{ + lnLabel := labelStack[len(labelStack)-1-int(ln)] + expType := lnLabel.BlockType.ReturnTypes + if lnLabel.IsLoop { + // Loop operation doesn't require results since the continuation is + // the beginning of the loop. + expType = []ValueType{} + } + for _, l := range list { + if int(l) >= len(labelStack) { + return fmt.Errorf("invalid l param given for br_table") + } + label := labelStack[len(labelStack)-1-int(l)] + expType2 := label.BlockType.ReturnTypes + if label.IsLoop { + // Loop operation doesn't require results since the continuation is + // the beginning of the loop. + expType2 = []ValueType{} + } + if len(expType) != len(expType2) { + return fmt.Errorf("incosistent block type length for br_table at %d; %v (ln=%d) != %v (l=%d)", l, expType, ln, expType2, l) + } + for i := range expType { + if expType[i] != expType2[i] { + return fmt.Errorf("incosistent block type for br_table at %d", l) + } + } + } + if err := valueTypeStack.popResults(expType, false); err != nil { + return fmt.Errorf("type mismatch on the br_table operation: %v", err) + } + // br_table instruction is stack-polymorphic. + valueTypeStack.unreachable() + } else if rawOc == 0x10 { // call + pc++ + index, num, err := leb128.DecodeUint32(bytes.NewBuffer(f.Body[pc:])) + if err != nil { + return fmt.Errorf("read immediate: %v", err) + } + pc += num - 1 + if int(index) >= len(functionDeclarations) { + return fmt.Errorf("invalid function index") + } + funcType := module.TypeSection[functionDeclarations[index]] + for i := 0; i < len(funcType.InputTypes); i++ { + if err := valueTypeStack.popAndVerifyType(funcType.InputTypes[len(funcType.InputTypes)-1-i]); err != nil { + return fmt.Errorf("type mismatch on call operation input type") + } + } + for _, exp := range funcType.ReturnTypes { + valueTypeStack.push(exp) + } + } else if rawOc == 0x11 { // call_indirect + pc++ + typeIndex, num, err := leb128.DecodeUint32(bytes.NewBuffer(f.Body[pc:])) + if err != nil { + return fmt.Errorf("read immediate: %v", err) + } + pc += num - 1 + pc++ + if f.Body[pc] != 0x00 { + return fmt.Errorf("call_indirect reserved bytes not zero but got %d", f.Body[pc]) + } + if len(tableDeclarations) == 0 { + return fmt.Errorf("table not given while having call_indirect") + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the in table index's type for call_indirect") + } + if int(typeIndex) >= len(module.TypeSection) { + return fmt.Errorf("invalid type index at call_indirect: %d", typeIndex) + } + funcType := module.TypeSection[typeIndex] + for i := 0; i < len(funcType.InputTypes); i++ { + if err := valueTypeStack.popAndVerifyType(funcType.InputTypes[len(funcType.InputTypes)-1-i]); err != nil { + return fmt.Errorf("type mismatch on call_indirect operation input type") + } + } + for _, exp := range funcType.ReturnTypes { + valueTypeStack.push(exp) + } + } else if 0x45 <= rawOc && rawOc <= 0xbf { // numeric instructions + switch OptCode(rawOc) { + case OptCodeI32eqz: + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the operand for i32.eqz: %v", err) + } + valueTypeStack.push(ValueTypeI32) + case OptCodeI32eq, OptCodeI32ne, OptCodeI32lts, + OptCodeI32ltu, OptCodeI32gts, OptCodeI32gtu, OptCodeI32les, + OptCodeI32leu, OptCodeI32ges, OptCodeI32geu: + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the 1st i32 operand for 0x%x: %v", rawOc, err) + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the 2nd i32 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeI32) + case OptCodeI64eqz: + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return fmt.Errorf("cannot pop the operand for i64.eqz: %v", err) + } + valueTypeStack.push(ValueTypeI32) + case OptCodeI64eq, OptCodeI64ne, OptCodeI64lts, + OptCodeI64ltu, OptCodeI64gts, OptCodeI64gtu, + OptCodeI64les, OptCodeI64leu, OptCodeI64ges, OptCodeI64geu: + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return fmt.Errorf("cannot pop the 1st i64 operand for 0x%x: %v", rawOc, err) + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return fmt.Errorf("cannot pop the 2nd i64 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeI32) + case OptCodeF32eq, OptCodeF32ne, OptCodeF32lt, OptCodeF32gt, OptCodeF32le, OptCodeF32ge: + if err := valueTypeStack.popAndVerifyType(ValueTypeF32); err != nil { + return fmt.Errorf("cannot pop the 1st f32 operand for 0x%x: %v", rawOc, err) + } + if err := valueTypeStack.popAndVerifyType(ValueTypeF32); err != nil { + return fmt.Errorf("cannot pop the 2nd f32 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeI32) + case OptCodeF64eq, OptCodeF64ne, OptCodeF64lt, OptCodeF64gt, OptCodeF64le, OptCodeF64ge: + if err := valueTypeStack.popAndVerifyType(ValueTypeF64); err != nil { + return fmt.Errorf("cannot pop the 1st f64 operand for 0x%x: %v", rawOc, err) + } + if err := valueTypeStack.popAndVerifyType(ValueTypeF64); err != nil { + return fmt.Errorf("cannot pop the 2nd f64 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeI32) + case OptCodeI32clz, OptCodeI32ctz, OptCodeI32popcnt: + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the i32 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeI32) + case OptCodeI32add, OptCodeI32sub, OptCodeI32mul, OptCodeI32divs, + OptCodeI32divu, OptCodeI32rems, OptCodeI32remu, OptCodeI32and, + OptCodeI32or, OptCodeI32xor, OptCodeI32shl, OptCodeI32shrs, + OptCodeI32shru, OptCodeI32rotl, OptCodeI32rotr: + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the 1st i32 operand for 0x%x: %v", rawOc, err) + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the 2nd i32 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeI32) + case OptCodeI64clz, OptCodeI64ctz, OptCodeI64popcnt: + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return fmt.Errorf("cannot pop the i64 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeI64) + case OptCodeI64add, OptCodeI64sub, OptCodeI64mul, OptCodeI64divs, + OptCodeI64divu, OptCodeI64rems, OptCodeI64remu, OptCodeI64and, + OptCodeI64or, OptCodeI64xor, OptCodeI64shl, OptCodeI64shrs, + OptCodeI64shru, OptCodeI64rotl, OptCodeI64rotr: + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return fmt.Errorf("cannot pop the 1st i64 operand for 0x%x: %v", rawOc, err) + } + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return fmt.Errorf("cannot pop the 2nd i64 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeI64) + case OptCodeF32abs, OptCodeF32neg, OptCodeF32ceil, + OptCodeF32floor, OptCodeF32trunc, OptCodeF32nearest, + OptCodeF32sqrt: + if err := valueTypeStack.popAndVerifyType(ValueTypeF32); err != nil { + return fmt.Errorf("cannot pop the 1st f32 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeF32) + case OptCodeF32add, OptCodeF32sub, OptCodeF32mul, + OptCodeF32div, OptCodeF32min, OptCodeF32max, + OptCodeF32copysign: + if err := valueTypeStack.popAndVerifyType(ValueTypeF32); err != nil { + return fmt.Errorf("cannot pop the 1st f32 operand for 0x%x: %v", rawOc, err) + } + if err := valueTypeStack.popAndVerifyType(ValueTypeF32); err != nil { + return fmt.Errorf("cannot pop the 2nd f32 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeF32) + case OptCodeF64abs, OptCodeF64neg, OptCodeF64ceil, + OptCodeF64floor, OptCodeF64trunc, OptCodeF64nearest, + OptCodeF64sqrt: + if err := valueTypeStack.popAndVerifyType(ValueTypeF64); err != nil { + return fmt.Errorf("cannot pop the 1st f64 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeF64) + case OptCodeF64add, OptCodeF64sub, OptCodeF64mul, + OptCodeF64div, OptCodeF64min, OptCodeF64max, + OptCodeF64copysign: + if err := valueTypeStack.popAndVerifyType(ValueTypeF64); err != nil { + return fmt.Errorf("cannot pop the 1st f64 operand for 0x%x: %v", rawOc, err) + } + if err := valueTypeStack.popAndVerifyType(ValueTypeF64); err != nil { + return fmt.Errorf("cannot pop the 2nd f64 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeF64) + case OptCodeI32wrapI64: + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return fmt.Errorf("cannot pop the operand for i32.wrap_i64: %v", err) + } + valueTypeStack.push(ValueTypeI32) + case OptCodeI32truncf32s, OptCodeI32truncf32u: + if err := valueTypeStack.popAndVerifyType(ValueTypeF32); err != nil { + return fmt.Errorf("cannot pop the f32 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeI32) + case OptCodeI32truncf64s, OptCodeI32truncf64u: + if err := valueTypeStack.popAndVerifyType(ValueTypeF64); err != nil { + return fmt.Errorf("cannot pop the f64 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeI32) + case OptCodeI64Extendi32s, OptCodeI64Extendi32u: + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the i32 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeI64) + case OptCodeI64TruncF32s, OptCodeI64TruncF32u: + if err := valueTypeStack.popAndVerifyType(ValueTypeF32); err != nil { + return fmt.Errorf("cannot pop the f32 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeI64) + case OptCodeI64Truncf64s, OptCodeI64Truncf64u: + if err := valueTypeStack.popAndVerifyType(ValueTypeF64); err != nil { + return fmt.Errorf("cannot pop the f64 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeI64) + case OptCodeF32Converti32s, OptCodeF32Converti32u: + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the i32 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeF32) + case OptCodeF32Converti64s, OptCodeF32Converti64u: + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return fmt.Errorf("cannot pop the i64 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeF32) + case OptCodeF32Demotef64: + if err := valueTypeStack.popAndVerifyType(ValueTypeF64); err != nil { + return fmt.Errorf("cannot pop the operand for f32.demote_f64: %v", err) + } + valueTypeStack.push(ValueTypeF32) + case OptCodeF64Converti32s, OptCodeF64Converti32u: + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the i32 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeF64) + case OptCodeF64Converti64s, OptCodeF64Converti64u: + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return fmt.Errorf("cannot pop the i64 operand for 0x%x: %v", rawOc, err) + } + valueTypeStack.push(ValueTypeF64) + case OptCodeF64Promotef32: + if err := valueTypeStack.popAndVerifyType(ValueTypeF32); err != nil { + return fmt.Errorf("cannot pop the operand for f64.promote_f32: %v", err) + } + valueTypeStack.push(ValueTypeF64) + case OptCodeI32reinterpretf32: + if err := valueTypeStack.popAndVerifyType(ValueTypeF32); err != nil { + return fmt.Errorf("cannot pop the operand for i32.reinterpret_f32: %v", err) + } + valueTypeStack.push(ValueTypeI32) + case OptCodeI64reinterpretf64: + if err := valueTypeStack.popAndVerifyType(ValueTypeF64); err != nil { + return fmt.Errorf("cannot pop the operand for i64.reinterpret_f64: %v", err) + } + valueTypeStack.push(ValueTypeI64) + case OptCodeF32reinterpreti32: + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the operand for f32.reinterpret_i32: %v", err) + } + valueTypeStack.push(ValueTypeF32) + case OptCodeF64reinterpreti64: + if err := valueTypeStack.popAndVerifyType(ValueTypeI64); err != nil { + return fmt.Errorf("cannot pop the operand for f64.reinterpret_i64: %v", err) + } + valueTypeStack.push(ValueTypeF64) + default: + return fmt.Errorf("invalid numeric instruction 0x%x", rawOc) + } + } else if rawOc == 0x02 { // Block + bt, num, err := readBlockType(module, bytes.NewBuffer(f.Body[pc+1:])) + if err != nil { + return fmt.Errorf("read block: %w", err) + } + labelStack = append(labelStack, &NativeFunctionBlock{ StartAt: pc, BlockType: bt, BlockTypeBytes: num, }) + valueTypeStack.pushStackLimit() pc += num - case OptCodeElse: - stack[len(stack)-1].ElseAt = pc - case OptCodeEnd: - bl := stack[len(stack)-1] - stack = stack[:len(stack)-1] + } else if rawOc == 0x03 { // Loop + bt, num, err := readBlockType(module, bytes.NewBuffer(f.Body[pc+1:])) + if err != nil { + return fmt.Errorf("read block: %w", err) + } + labelStack = append(labelStack, &NativeFunctionBlock{ + StartAt: pc, + BlockType: bt, + BlockTypeBytes: num, + IsLoop: true, + }) + valueTypeStack.pushStackLimit() + pc += num + } else if rawOc == 0x04 { // If + bt, num, err := readBlockType(module, bytes.NewBuffer(f.Body[pc+1:])) + if err != nil { + return fmt.Errorf("read block: %w", err) + } + labelStack = append(labelStack, &NativeFunctionBlock{ + StartAt: pc, + BlockType: bt, + BlockTypeBytes: num, + IsIf: true, + }) + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("cannot pop the operand for 'if': %v", err) + } + valueTypeStack.pushStackLimit() + pc += num + } else if rawOc == 0x05 { // Else + bl := labelStack[len(labelStack)-1] + bl.ElseAt = pc + // Check the type soundness of the instructions *before*  entering this Eles Op. + if err := valueTypeStack.popResults(bl.BlockType.ReturnTypes, true); err != nil { + return fmt.Errorf("invalid instruction results in then instructions") + } + // Before entring instructions inside else, we pop all the values pushed by + // then block. + valueTypeStack.resetAtStackLimit() + } else if rawOc == 0x0b { // End + bl := labelStack[len(labelStack)-1] bl.EndAt = pc - ret[bl.StartAt] = bl - if bl.ElseAt <= bl.StartAt { + labelStack = labelStack[:len(labelStack)-1] + f.Blocks[bl.StartAt] = bl + if bl.IsIf && bl.ElseAt <= bl.StartAt { + if len(bl.BlockType.ReturnTypes) > 0 { + return fmt.Errorf("type mismatch between then and else blocks.") + } // To handle if block without else properly, - // we set ElseAt to EndAt so we can just skip else. + // we set ElseAt to EndAt-1 so we can just skip else. bl.ElseAt = bl.EndAt - 1 } + // Check type soundness. + if err := valueTypeStack.popResults(bl.BlockType.ReturnTypes, true); err != nil { + return fmt.Errorf("invalid instruction results at end instruction; expected %v: %v", bl.BlockType.ReturnTypes, err) + } + // Put the result types at the end after resetting at the stack limit + // since we might have Any type between the limit and the current top. + valueTypeStack.resetAtStackLimit() + for _, exp := range bl.BlockType.ReturnTypes { + valueTypeStack.push(exp) + } + // We exit if/loop/block, so reset the constraints on the stack manipulation + // on values previously pushed by outer blocks. + valueTypeStack.popStackLimit() + } else if rawOc == 0x0f { // Return + expTypes := f.Signature.ReturnTypes + for i := 0; i < len(expTypes); i++ { + if err := valueTypeStack.popAndVerifyType(expTypes[len(expTypes)-1-i]); err != nil { + return fmt.Errorf("return type mismatch on return: %v; want %v", err, expTypes) + } + } + // return instruction is stack-polymorphic. + valueTypeStack.unreachable() + } else if rawOc == 0x1a { // Drop + _, err := valueTypeStack.pop() + if err != nil { + return fmt.Errorf("invalid drop: %v", err) + } + } else if rawOc == 0x1b { // Select + if err := valueTypeStack.popAndVerifyType(ValueTypeI32); err != nil { + return fmt.Errorf("type mismatch on 3rd select operand: %v", err) + } + v1, err := valueTypeStack.pop() + if err != nil { + return fmt.Errorf("invalid select: %v", err) + } + v2, err := valueTypeStack.pop() + if err != nil { + return fmt.Errorf("invalid select: %v", err) + } + if v1 != v2 && v1 != valueTypeUnknown && v2 != valueTypeUnknown { + return fmt.Errorf("type mismatch on 1st and 2nd select operands") + } + if v1 == valueTypeUnknown { + valueTypeStack.push(v2) + } else { + valueTypeStack.push(v1) + } + } else if rawOc == 0x00 { // unreachable + // unreachable instruction is stack-polymorphic. + valueTypeStack.unreachable() + } else if rawOc == 0x01 { // Nop + } else { + return fmt.Errorf("invalid instruction 0x%x", rawOc) } } - if len(stack) > 0 { - return nil, fmt.Errorf("ill-nested block exists") + if len(labelStack) > 0 { + return fmt.Errorf("ill-nested block exists") } - return ret, nil + return nil } func readBlockType(module *Module, r io.Reader) (*BlockType, uint64, error) { diff --git a/wasm/type.go b/wasm/type.go index 9e687c7a..a7ef15f1 100644 --- a/wasm/type.go +++ b/wasm/type.go @@ -39,6 +39,8 @@ func readFunctionType(r io.Reader) (*FunctionType, error) { s, _, err = leb128.DecodeUint32(r) if err != nil { return nil, fmt.Errorf("get the size of output value types: %w", err) + } else if s > 1 { + return nil, fmt.Errorf("multi value results not supported") } op, err := readValueTypes(r, s) @@ -116,7 +118,21 @@ func readTableType(r io.Reader) (*TableType, error) { type MemoryType = LimitsType func readMemoryType(r io.Reader) (*MemoryType, error) { - return readLimitsType(r) + ret, err := readLimitsType(r) + if err != nil { + return nil, err + } + if ret.Min > uint32(PageSize) { + return nil, fmt.Errorf("memory min must be at most 65536 pages (4GiB)") + } + if ret.Max != nil { + if *ret.Max < ret.Min { + return nil, fmt.Errorf("memory size minimum must not be greater than maximum") + } else if *ret.Max > uint32(PageSize) { + return nil, fmt.Errorf("memory max must be at most 65536 pages (4GiB)") + } + } + return ret, nil } type GlobalType struct { diff --git a/wasm/type_test.go b/wasm/type_test.go deleted file mode 100644 index 6f31dd46..00000000 --- a/wasm/type_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package wasm - -import ( - "bytes" - "errors" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestReadFunctionType(t *testing.T) { - t.Run("ng", func(t *testing.T) { - buf := []byte{0x00} - _, err := readFunctionType(bytes.NewBuffer(buf)) - assert.True(t, errors.Is(err, ErrInvalidByte)) - t.Log(err) - }) - - for i, c := range []struct { - bytes []byte - exp *FunctionType - }{ - { - bytes: []byte{0x60, 0x0, 0x0}, - exp: &FunctionType{ - InputTypes: []ValueType{}, - ReturnTypes: []ValueType{}, - }, - }, - { - bytes: []byte{0x60, 0x2, 0x7f, 0x7e, 0x0}, - exp: &FunctionType{ - InputTypes: []ValueType{ValueTypeI32, ValueTypeI64}, - ReturnTypes: []ValueType{}, - }, - }, - { - bytes: []byte{0x60, 0x1, 0x7e, 0x2, 0x7f, 0x7e}, - exp: &FunctionType{ - InputTypes: []ValueType{ValueTypeI64}, - ReturnTypes: []ValueType{ValueTypeI32, ValueTypeI64}, - }, - }, - { - bytes: []byte{0x60, 0x0, 0x2, 0x7f, 0x7e}, - exp: &FunctionType{ - InputTypes: []ValueType{}, - ReturnTypes: []ValueType{ValueTypeI32, ValueTypeI64}, - }, - }, - } { - t.Run(strconv.Itoa(i), func(t *testing.T) { - actual, err := readFunctionType(bytes.NewBuffer(c.bytes)) - require.NoError(t, err) - assert.Equal(t, c.exp, actual) - }) - } -} - -func TestReadLimitsType(t *testing.T) { - for i, c := range []struct { - bytes []byte - exp *LimitsType - }{ - {bytes: []byte{0x00, 0xa}, exp: &LimitsType{Min: 10}}, - {bytes: []byte{0x01, 0xa, 0xa}, exp: &LimitsType{Min: 10, Max: uint32Ptr(10)}}, - } { - t.Run(strconv.Itoa(i), func(t *testing.T) { - actual, err := readLimitsType(bytes.NewBuffer(c.bytes)) - require.NoError(t, err) - assert.Equal(t, c.exp, actual) - }) - } -} - -func uint32Ptr(in uint32) *uint32 { - return &in -} - -func TestReadTableType(t *testing.T) { - t.Run("ng", func(t *testing.T) { - buf := []byte{0x00} - _, err := readTableType(bytes.NewBuffer(buf)) - require.True(t, errors.Is(err, ErrInvalidByte)) - t.Log(err) - }) - - for i, c := range []struct { - bytes []byte - exp *TableType - }{ - { - bytes: []byte{0x70, 0x00, 0xa}, - exp: &TableType{ - ElemType: 0x70, - Limit: &LimitsType{Min: 10}, - }, - }, - { - bytes: []byte{0x70, 0x01, 0x01, 0xa}, - exp: &TableType{ - ElemType: 0x70, - Limit: &LimitsType{Min: 1, Max: uint32Ptr(10)}, - }, - }, - } { - c := c - t.Run(strconv.Itoa(i), func(t *testing.T) { - actual, err := readTableType(bytes.NewBuffer(c.bytes)) - require.NoError(t, err) - assert.Equal(t, c.exp, actual) - }) - } -} - -func TestReadMemoryType(t *testing.T) { - for i, c := range []struct { - bytes []byte - exp *MemoryType - }{ - {bytes: []byte{0x00, 0xa}, exp: &MemoryType{Min: 10}}, - {bytes: []byte{0x01, 0xa, 0xa}, exp: &MemoryType{Min: 10, Max: uint32Ptr(10)}}, - } { - t.Run(strconv.Itoa(i), func(t *testing.T) { - actual, err := readMemoryType(bytes.NewBuffer(c.bytes)) - require.NoError(t, err) - assert.Equal(t, c.exp, actual) - }) - } -} - -func TestReadGlobalType(t *testing.T) { - t.Run("ng", func(t *testing.T) { - buf := []byte{0x7e, 0x3} - _, err := readGlobalType(bytes.NewBuffer(buf)) - require.True(t, errors.Is(err, ErrInvalidByte)) - t.Log(err) - }) - - for i, c := range []struct { - bytes []byte - exp *GlobalType - }{ - {bytes: []byte{0x7e, 0x00}, exp: &GlobalType{ValType: ValueTypeI64, Mutable: false}}, - {bytes: []byte{0x7e, 0x01}, exp: &GlobalType{ValType: ValueTypeI64, Mutable: true}}, - } { - t.Run(strconv.Itoa(i), func(t *testing.T) { - actual, err := readGlobalType(bytes.NewBuffer(c.bytes)) - require.NoError(t, err) - assert.Equal(t, c.exp, actual) - }) - } -} diff --git a/wasm/value.go b/wasm/value.go index 318ab8ba..a1ec8916 100644 --- a/wasm/value.go +++ b/wasm/value.go @@ -3,6 +3,7 @@ package wasm import ( "fmt" "io" + "unicode/utf8" "github.com/mathetake/gasm/wasm/leb128" ) @@ -46,6 +47,10 @@ func readNameValue(r io.Reader) (string, error) { return "", fmt.Errorf("read bytes of name: %v", err) } + if !utf8.Valid(buf) { + return "", fmt.Errorf("name must be valid as utf8") + } + return string(buf), nil } diff --git a/wasm/vm.go b/wasm/vm.go index 667d3071..281ac0d0 100644 --- a/wasm/vm.go +++ b/wasm/vm.go @@ -61,7 +61,7 @@ func (vm *VirtualMachine) ExecExportedFunction(moduleName, funcName string, args exp, ok := m.Exports[funcName] if !ok { - return nil, nil, fmt.Errorf("exported func of name '%s' not found in '%s'", funcName, moduleName) + return nil, nil, fmt.Errorf("exported function '%s' not found in '%s'", funcName, moduleName) } if exp.Kind != ExportKindFunction { diff --git a/wasm/vm_func.go b/wasm/vm_func.go index b27a50be..55b66d0b 100644 --- a/wasm/vm_func.go +++ b/wasm/vm_func.go @@ -15,7 +15,8 @@ type ( } NativeFunction struct { Signature *FunctionType - NumLocal uint32 + NumLocals uint32 + LocalTypes []ValueType Body []byte Blocks map[uint64]*NativeFunctionBlock ModuleInstance *ModuleInstance @@ -24,6 +25,8 @@ type ( StartAt, ElseAt, EndAt uint64 BlockType *FunctionType BlockTypeBytes uint64 + IsLoop bool // TODO: might not be necessary + IsIf bool // TODO: might not be necessary } ) @@ -83,7 +86,7 @@ func (h *HostFunction) Call(vm *VirtualMachine) { func (n *NativeFunction) Call(vm *VirtualMachine) { al := len(n.Signature.InputTypes) - locals := make([]uint64, n.NumLocal+uint32(al)) + locals := make([]uint64, n.NumLocals+uint32(al)) for i := 0; i < al; i++ { locals[al-1-i] = vm.OperandStack.Pop() } diff --git a/wasm/vm_global.go b/wasm/vm_global.go index 5155c711..b324fe50 100644 --- a/wasm/vm_global.go +++ b/wasm/vm_global.go @@ -12,7 +12,6 @@ func setGlobal(vm *VirtualMachine) { vm.ActiveContext.PC++ index := vm.FetchUint32() addr := vm.ActiveContext.Function.ModuleInstance.GlobalsAddrs[index] - // TODO: Check mutatability. vm.Store.Globals[addr].Val = vm.OperandStack.Pop() vm.ActiveContext.PC++ }