From 0dc152d672b9df467645605b2834c28d723500e4 Mon Sep 17 00:00:00 2001 From: Edoardo Vacchi Date: Wed, 5 Apr 2023 02:38:49 +0200 Subject: [PATCH] wazeroir: migrate vector, table, branch and all other remaining ops to compact repr (#1334) Signed-off-by: Edoardo Vacchi Co-authored-by: Takeshi Yoneda --- internal/engine/compiler/compiler.go | 350 +-- .../engine/compiler/compiler_bench_test.go | 12 +- .../compiler_conditional_save_test.go | 12 +- .../compiler/compiler_controlflow_test.go | 230 +- .../compiler/compiler_conversion_test.go | 36 +- .../engine/compiler/compiler_global_test.go | 8 +- .../engine/compiler/compiler_memory_test.go | 62 +- .../engine/compiler/compiler_numeric_test.go | 172 +- .../engine/compiler/compiler_post1_0_test.go | 64 +- .../engine/compiler/compiler_stack_test.go | 58 +- internal/engine/compiler/compiler_test.go | 6 +- internal/engine/compiler/compiler_vec_test.go | 508 ++--- internal/engine/compiler/engine.go | 583 +++-- internal/engine/compiler/impl_amd64.go | 337 +-- internal/engine/compiler/impl_amd64_test.go | 28 +- internal/engine/compiler/impl_arm64.go | 291 +-- internal/engine/compiler/impl_arm64_test.go | 2 +- internal/engine/compiler/impl_vec_amd64.go | 415 ++-- .../engine/compiler/impl_vec_amd64_test.go | 39 +- internal/engine/compiler/impl_vec_arm64.go | 385 ++-- .../engine/compiler/impl_vec_arm64_test.go | 38 +- internal/engine/interpreter/interpreter.go | 323 +-- .../engine/interpreter/interpreter_test.go | 30 +- internal/wazeroir/compiler.go | 731 +++--- internal/wazeroir/compiler_test.go | 1068 ++++----- internal/wazeroir/format.go | 7 +- internal/wazeroir/operations.go | 2011 +++++++---------- internal/wazeroir/operations_test.go | 68 +- 28 files changed, 3465 insertions(+), 4409 deletions(-) diff --git a/internal/engine/compiler/compiler.go b/internal/engine/compiler/compiler.go index 8fc9d31c..17cc98ea 100644 --- a/internal/engine/compiler/compiler.go +++ b/internal/engine/compiler/compiler.go @@ -23,88 +23,88 @@ type compiler interface { compileGoDefinedHostFunction() error // compileLabel notify compilers of the beginning of a label. // Return true if the compiler decided to skip the entire label. - // See wazeroir.OperationLabel - compileLabel(o wazeroir.OperationLabel) (skipThisLabel bool) + // See wazeroir.NewOperationLabel + compileLabel(o *wazeroir.UnionOperation) (skipThisLabel bool) // compileUnreachable adds instruction to perform wazeroir.OperationUnreachable. compileUnreachable() error // compileSet adds instruction to perform wazeroir.OperationSet. - compileSet(o wazeroir.UnionOperation) error + compileSet(o *wazeroir.UnionOperation) error // compileGlobalGet adds instructions to perform wazeroir.OperationGlobalGet. - compileGlobalGet(o wazeroir.UnionOperation) error + compileGlobalGet(o *wazeroir.UnionOperation) error // compileGlobalSet adds instructions to perform wazeroir.OperationGlobalSet. - compileGlobalSet(o wazeroir.UnionOperation) error - // compileBr adds instructions to perform wazeroir.OperationBr. - compileBr(o wazeroir.OperationBr) error - // compileBrIf adds instructions to perform wazeroir.OperationBrIf. - compileBrIf(o wazeroir.OperationBrIf) error - // compileBrTable adds instructions to perform wazeroir.OperationBrTable. - compileBrTable(o wazeroir.OperationBrTable) error + compileGlobalSet(o *wazeroir.UnionOperation) error + // compileBr adds instructions to perform wazeroir.NewOperationBr. + compileBr(o *wazeroir.UnionOperation) error + // compileBrIf adds instructions to perform wazeroir.NewOperationBrIf. + compileBrIf(o *wazeroir.UnionOperation) error + // compileBrTable adds instructions to perform wazeroir.NewOperationBrTable. + compileBrTable(o *wazeroir.UnionOperation) error // compileCall adds instructions to perform wazeroir.OperationCall. - compileCall(o wazeroir.UnionOperation) error + compileCall(o *wazeroir.UnionOperation) error // compileCallIndirect adds instructions to perform wazeroir.OperationCallIndirect. - compileCallIndirect(o wazeroir.UnionOperation) error - // compileDrop adds instructions to perform wazeroir.OperationDrop. - compileDrop(o wazeroir.OperationDrop) error + compileCallIndirect(o *wazeroir.UnionOperation) error + // compileDrop adds instructions to perform wazeroir.NewOperationDrop. + compileDrop(o *wazeroir.UnionOperation) error // compileSelect adds instructions to perform wazeroir.OperationSelect. - compileSelect(o wazeroir.UnionOperation) error + compileSelect(o *wazeroir.UnionOperation) error // compilePick adds instructions to perform wazeroir.OperationPick. - compilePick(o wazeroir.UnionOperation) error + compilePick(o *wazeroir.UnionOperation) error // compileAdd adds instructions to perform wazeroir.OperationAdd. - compileAdd(o wazeroir.UnionOperation) error + compileAdd(o *wazeroir.UnionOperation) error // compileSub adds instructions to perform wazeroir.OperationSub. - compileSub(o wazeroir.UnionOperation) error + compileSub(o *wazeroir.UnionOperation) error // compileMul adds instructions to perform wazeroir.OperationMul. - compileMul(o wazeroir.UnionOperation) error + compileMul(o *wazeroir.UnionOperation) error // compileClz adds instructions to perform wazeroir.OperationClz. - compileClz(o wazeroir.UnionOperation) error + compileClz(o *wazeroir.UnionOperation) error // compileCtz adds instructions to perform wazeroir.OperationCtz. - compileCtz(o wazeroir.UnionOperation) error + compileCtz(o *wazeroir.UnionOperation) error // compilePopcnt adds instructions to perform wazeroir.OperationPopcnt. - compilePopcnt(o wazeroir.UnionOperation) error + compilePopcnt(o *wazeroir.UnionOperation) error // compileDiv adds instructions to perform wazeroir.OperationDiv. - compileDiv(o wazeroir.UnionOperation) error + compileDiv(o *wazeroir.UnionOperation) error // compileRem adds instructions to perform wazeroir.OperationRem. - compileRem(o wazeroir.UnionOperation) error + compileRem(o *wazeroir.UnionOperation) error // compileAnd adds instructions to perform wazeroir.OperationAnd. - compileAnd(o wazeroir.UnionOperation) error + compileAnd(o *wazeroir.UnionOperation) error // compileOr adds instructions to perform wazeroir.OperationOr. - compileOr(o wazeroir.UnionOperation) error + compileOr(o *wazeroir.UnionOperation) error // compileXor adds instructions to perform wazeroir.OperationXor. - compileXor(o wazeroir.UnionOperation) error + compileXor(o *wazeroir.UnionOperation) error // compileShl adds instructions to perform wazeroir.OperationShl. - compileShl(o wazeroir.UnionOperation) error + compileShl(o *wazeroir.UnionOperation) error // compileShr adds instructions to perform wazeroir.OperationShr. - compileShr(o wazeroir.UnionOperation) error + compileShr(o *wazeroir.UnionOperation) error // compileRotl adds instructions to perform wazeroir.OperationRotl. - compileRotl(o wazeroir.UnionOperation) error + compileRotl(o *wazeroir.UnionOperation) error // compileRotr adds instructions to perform wazeroir.OperationRotr. - compileRotr(o wazeroir.UnionOperation) error + compileRotr(o *wazeroir.UnionOperation) error // compileNeg adds instructions to perform wazeroir.OperationAbs. - compileAbs(o wazeroir.UnionOperation) error + compileAbs(o *wazeroir.UnionOperation) error // compileNeg adds instructions to perform wazeroir.OperationNeg. - compileNeg(o wazeroir.UnionOperation) error + compileNeg(o *wazeroir.UnionOperation) error // compileCeil adds instructions to perform wazeroir.OperationCeil. - compileCeil(o wazeroir.UnionOperation) error + compileCeil(o *wazeroir.UnionOperation) error // compileFloor adds instructions to perform wazeroir.OperationFloor. - compileFloor(o wazeroir.UnionOperation) error + compileFloor(o *wazeroir.UnionOperation) error // compileTrunc adds instructions to perform wazeroir.OperationTrunc. - compileTrunc(o wazeroir.UnionOperation) error + compileTrunc(o *wazeroir.UnionOperation) error // compileNearest adds instructions to perform wazeroir.OperationNearest. - compileNearest(o wazeroir.UnionOperation) error + compileNearest(o *wazeroir.UnionOperation) error // compileSqrt adds instructions perform wazeroir.OperationSqrt. - compileSqrt(o wazeroir.UnionOperation) error + compileSqrt(o *wazeroir.UnionOperation) error // compileMin adds instructions perform wazeroir.OperationMin. - compileMin(o wazeroir.UnionOperation) error + compileMin(o *wazeroir.UnionOperation) error // compileMax adds instructions perform wazeroir.OperationMax. - compileMax(o wazeroir.UnionOperation) error + compileMax(o *wazeroir.UnionOperation) error // compileCopysign adds instructions to perform wazeroir.OperationCopysign. - compileCopysign(o wazeroir.UnionOperation) error + compileCopysign(o *wazeroir.UnionOperation) error // compileI32WrapFromI64 adds instructions to perform wazeroir.OperationI32WrapFromI64. compileI32WrapFromI64() error - // compileITruncFromF adds instructions to perform wazeroir.OperationITruncFromF. - compileITruncFromF(o wazeroir.OperationITruncFromF) error - // compileFConvertFromI adds instructions to perform wazeroir.OperationFConvertFromI. - compileFConvertFromI(o wazeroir.OperationFConvertFromI) error + // compileITruncFromF adds instructions to perform wazeroir.NewOperationITruncFromF. + compileITruncFromF(o *wazeroir.UnionOperation) error + // compileFConvertFromI adds instructions to perform wazeroir.NewOperationFConvertFromI. + compileFConvertFromI(o *wazeroir.UnionOperation) error // compileF32DemoteFromF64 adds instructions to perform wazeroir.OperationF32DemoteFromF64. compileF32DemoteFromF64() error // compileF64PromoteFromF32 adds instructions to perform wazeroir.OperationF64PromoteFromF32. @@ -117,50 +117,50 @@ type compiler interface { compileF32ReinterpretFromI32() error // compileF64ReinterpretFromI64 adds instructions to perform wazeroir.OperationF64ReinterpretFromI64. compileF64ReinterpretFromI64() error - // compileExtend adds instructions to perform wazeroir.OperationExtend. - compileExtend(o wazeroir.OperationExtend) error + // compileExtend adds instructions to perform wazeroir.NewOperationExtend. + compileExtend(o *wazeroir.UnionOperation) error // compileEq adds instructions to perform wazeroir.OperationEq. - compileEq(o wazeroir.UnionOperation) error + compileEq(o *wazeroir.UnionOperation) error // compileEq adds instructions to perform wazeroir.OperationNe. - compileNe(o wazeroir.UnionOperation) error + compileNe(o *wazeroir.UnionOperation) error // compileEq adds instructions to perform wazeroir.OperationEqz. - compileEqz(o wazeroir.UnionOperation) error + compileEqz(o *wazeroir.UnionOperation) error // compileLt adds instructions to perform wazeroir.OperationLt. - compileLt(o wazeroir.UnionOperation) error + compileLt(o *wazeroir.UnionOperation) error // compileGt adds instructions to perform wazeroir.OperationGt. - compileGt(o wazeroir.UnionOperation) error + compileGt(o *wazeroir.UnionOperation) error // compileLe adds instructions to perform wazeroir.OperationLe. - compileLe(o wazeroir.UnionOperation) error + compileLe(o *wazeroir.UnionOperation) error // compileLe adds instructions to perform wazeroir.OperationGe. - compileGe(o wazeroir.UnionOperation) error + compileGe(o *wazeroir.UnionOperation) error // compileLoad adds instructions to perform wazeroir.OperationLoad. - compileLoad(o wazeroir.UnionOperation) error + compileLoad(o *wazeroir.UnionOperation) error // compileLoad8 adds instructions to perform wazeroir.OperationLoad8. - compileLoad8(o wazeroir.UnionOperation) error + compileLoad8(o *wazeroir.UnionOperation) error // compileLoad16 adds instructions to perform wazeroir.OperationLoad16. - compileLoad16(o wazeroir.UnionOperation) error + compileLoad16(o *wazeroir.UnionOperation) error // compileLoad32 adds instructions to perform wazeroir.OperationLoad32. - compileLoad32(o wazeroir.UnionOperation) error + compileLoad32(o *wazeroir.UnionOperation) error // compileStore adds instructions to perform wazeroir.OperationStore. - compileStore(o wazeroir.UnionOperation) error + compileStore(o *wazeroir.UnionOperation) error // compileStore8 adds instructions to perform wazeroir.OperationStore8. - compileStore8(o wazeroir.UnionOperation) error + compileStore8(o *wazeroir.UnionOperation) error // compileStore16 adds instructions to perform wazeroir.OperationStore16. - compileStore16(o wazeroir.UnionOperation) error + compileStore16(o *wazeroir.UnionOperation) error // compileStore32 adds instructions to perform wazeroir.OperationStore32. - compileStore32(o wazeroir.UnionOperation) error + compileStore32(o *wazeroir.UnionOperation) error // compileMemorySize adds instruction to perform wazeroir.OperationMemoryGrow. compileMemoryGrow() error // compileMemorySize adds instruction to perform wazeroir.OperationMemorySize. compileMemorySize() error // compileConstI32 adds instruction to perform wazeroir.NewOperationConstI32. - compileConstI32(o wazeroir.UnionOperation) error + compileConstI32(o *wazeroir.UnionOperation) error // compileConstI64 adds instruction to perform wazeroir.NewOperationConstI64. - compileConstI64(o wazeroir.UnionOperation) error + compileConstI64(o *wazeroir.UnionOperation) error // compileConstF32 adds instruction to perform wazeroir.NewOperationConstF32. - compileConstF32(o wazeroir.UnionOperation) error + compileConstF32(o *wazeroir.UnionOperation) error // compileConstF64 adds instruction to perform wazeroir.NewOperationConstF64. - compileConstF64(o wazeroir.UnionOperation) error + compileConstF64(o *wazeroir.UnionOperation) error // compileSignExtend32From8 adds instructions to perform wazeroir.OperationSignExtend32From8. compileSignExtend32From8() error // compileSignExtend32From16 adds instructions to perform wazeroir.OperationSignExtend32From16. @@ -171,134 +171,134 @@ type compiler interface { compileSignExtend64From16() error // compileSignExtend64From32 adds instructions to perform wazeroir.OperationSignExtend64From32. compileSignExtend64From32() error - // compileMemoryInit adds instructions to perform wazeroir.OperationMemoryInit. - compileMemoryInit(wazeroir.OperationMemoryInit) error - // compileDataDrop adds instructions to perform wazeroir.OperationDataDrop. - compileDataDrop(wazeroir.OperationDataDrop) error + // compileMemoryInit adds instructions to perform wazeroir.NewOperationMemoryInit. + compileMemoryInit(*wazeroir.UnionOperation) error + // compileDataDrop adds instructions to perform wazeroir.NewOperationDataDrop. + compileDataDrop(*wazeroir.UnionOperation) error // compileMemoryCopy adds instructions to perform wazeroir.OperationMemoryCopy. compileMemoryCopy() error // compileMemoryFill adds instructions to perform wazeroir.OperationMemoryFill. compileMemoryFill() error - // compileTableInit adds instructions to perform wazeroir.OperationTableInit. - compileTableInit(wazeroir.OperationTableInit) error - // compileTableCopy adds instructions to perform wazeroir.OperationTableCopy. - compileTableCopy(wazeroir.OperationTableCopy) error - // compileElemDrop adds instructions to perform wazeroir.OperationElemDrop. - compileElemDrop(wazeroir.OperationElemDrop) error - // compileRefFunc adds instructions to perform wazeroir.OperationRefFunc. - compileRefFunc(wazeroir.OperationRefFunc) error - // compileTableGet adds instructions to perform wazeroir.OperationTableGet. - compileTableGet(wazeroir.OperationTableGet) error - // compileTableSet adds instructions to perform wazeroir.OperationTableSet. - compileTableSet(wazeroir.OperationTableSet) error - // compileTableGrow adds instructions to perform wazeroir.OperationTableGrow. - compileTableGrow(wazeroir.OperationTableGrow) error - // compileTableSize adds instructions to perform wazeroir.OperationTableSize. - compileTableSize(wazeroir.OperationTableSize) error - // compileTableFill adds instructions to perform wazeroir.OperationTableFill. - compileTableFill(wazeroir.OperationTableFill) error - // compileV128Const adds instructions to perform wazeroir.OperationV128Const. - compileV128Const(wazeroir.OperationV128Const) error + // compileTableInit adds instructions to perform wazeroir.NewOperationTableInit. + compileTableInit(*wazeroir.UnionOperation) error + // compileTableCopy adds instructions to perform wazeroir.NewOperationTableCopy. + compileTableCopy(*wazeroir.UnionOperation) error + // compileElemDrop adds instructions to perform wazeroir.NewOperationElemDrop. + compileElemDrop(*wazeroir.UnionOperation) error + // compileRefFunc adds instructions to perform wazeroir.NewOperationRefFunc. + compileRefFunc(*wazeroir.UnionOperation) error + // compileTableGet adds instructions to perform wazeroir.NewOperationTableGet. + compileTableGet(*wazeroir.UnionOperation) error + // compileTableSet adds instructions to perform wazeroir.NewOperationTableSet. + compileTableSet(*wazeroir.UnionOperation) error + // compileTableGrow adds instructions to perform wazeroir.NewOperationTableGrow. + compileTableGrow(*wazeroir.UnionOperation) error + // compileTableSize adds instructions to perform wazeroir.NewOperationTableSize. + compileTableSize(*wazeroir.UnionOperation) error + // compileTableFill adds instructions to perform wazeroir.NewOperationTableFill. + compileTableFill(*wazeroir.UnionOperation) error + // compileV128Const adds instructions to perform wazeroir.NewOperationV128Const. + compileV128Const(*wazeroir.UnionOperation) error // compileV128Add adds instructions to perform wazeroir.OperationV128Add. - compileV128Add(o wazeroir.OperationV128Add) error + compileV128Add(o *wazeroir.UnionOperation) error // compileV128Sub adds instructions to perform wazeroir.OperationV128Sub. - compileV128Sub(o wazeroir.OperationV128Sub) error + compileV128Sub(o *wazeroir.UnionOperation) error // compileV128Load adds instructions to perform wazeroir.OperationV128Load. - compileV128Load(o wazeroir.OperationV128Load) error + compileV128Load(o *wazeroir.UnionOperation) error // compileV128LoadLane adds instructions to perform wazeroir.OperationV128LoadLane. - compileV128LoadLane(o wazeroir.OperationV128LoadLane) error - // compileV128Store adds instructions to perform wazeroir.OperationV128Store. - compileV128Store(o wazeroir.OperationV128Store) error - // compileV128StoreLane adds instructions to perform wazeroir.OperationV128StoreLane. - compileV128StoreLane(o wazeroir.OperationV128StoreLane) error - // compileV128ExtractLane adds instructions to perform wazeroir.OperationV128ExtractLane. - compileV128ExtractLane(o wazeroir.OperationV128ExtractLane) error - // compileV128ReplaceLane adds instructions to perform wazeroir.OperationV128ReplaceLane. - compileV128ReplaceLane(o wazeroir.OperationV128ReplaceLane) error - // compileV128Splat adds instructions to perform wazeroir.OperationV128Splat. - compileV128Splat(o wazeroir.OperationV128Splat) error - // compileV128Shuffle adds instructions to perform wazeroir.OperationV128Shuffle. - compileV128Shuffle(o wazeroir.OperationV128Shuffle) error + compileV128LoadLane(o *wazeroir.UnionOperation) error + // compileV128Store adds instructions to perform wazeroir.NewOperationV128Store. + compileV128Store(o *wazeroir.UnionOperation) error + // compileV128StoreLane adds instructions to perform wazeroir.NewOperationV128StoreLane. + compileV128StoreLane(o *wazeroir.UnionOperation) error + // compileV128ExtractLane adds instructions to perform wazeroir.NewOperationV128ExtractLane. + compileV128ExtractLane(o *wazeroir.UnionOperation) error + // compileV128ReplaceLane adds instructions to perform wazeroir.NewOperationV128ReplaceLane. + compileV128ReplaceLane(o *wazeroir.UnionOperation) error + // compileV128Splat adds instructions to perform wazeroir.NewOperationV128Splat. + compileV128Splat(o *wazeroir.UnionOperation) error + // compileV128Shuffle adds instructions to perform wazeroir.NewOperationV128Shuffle. + compileV128Shuffle(o *wazeroir.UnionOperation) error // compileV128Swizzle adds instructions to perform wazeroir.OperationV128Swizzle. - compileV128Swizzle(o wazeroir.OperationV128Swizzle) error + compileV128Swizzle(o *wazeroir.UnionOperation) error // compileV128AnyTrue adds instructions to perform wazeroir.OperationV128AnyTrue. - compileV128AnyTrue(o wazeroir.OperationV128AnyTrue) error - // compileV128AllTrue adds instructions to perform wazeroir.OperationV128AllTrue. - compileV128AllTrue(o wazeroir.OperationV128AllTrue) error - // compileV128BitMask adds instructions to perform wazeroir.OperationV128BitMask. - compileV128BitMask(wazeroir.OperationV128BitMask) error + compileV128AnyTrue(o *wazeroir.UnionOperation) error + // compileV128AllTrue adds instructions to perform wazeroir.NewOperationV128AllTrue. + compileV128AllTrue(o *wazeroir.UnionOperation) error + // compileV128BitMask adds instructions to perform wazeroir.NewOperationV128BitMask. + compileV128BitMask(*wazeroir.UnionOperation) error // compileV128And adds instructions to perform wazeroir.OperationV128And. - compileV128And(wazeroir.OperationV128And) error + compileV128And(*wazeroir.UnionOperation) error // compileV128Not adds instructions to perform wazeroir.OperationV128Not. - compileV128Not(wazeroir.OperationV128Not) error + compileV128Not(*wazeroir.UnionOperation) error // compileV128Or adds instructions to perform wazeroir.OperationV128Or. - compileV128Or(wazeroir.OperationV128Or) error + compileV128Or(*wazeroir.UnionOperation) error // compileV128Xor adds instructions to perform wazeroir.OperationV128Xor. - compileV128Xor(wazeroir.OperationV128Xor) error + compileV128Xor(*wazeroir.UnionOperation) error // compileV128Bitselect adds instructions to perform wazeroir.OperationV128Bitselect. - compileV128Bitselect(wazeroir.OperationV128Bitselect) error + compileV128Bitselect(*wazeroir.UnionOperation) error // compileV128AndNot adds instructions to perform wazeroir.OperationV128AndNot. - compileV128AndNot(wazeroir.OperationV128AndNot) error - // compileV128Shr adds instructions to perform wazeroir.OperationV128Shr. - compileV128Shr(wazeroir.OperationV128Shr) error - // compileV128Shl adds instructions to perform wazeroir.OperationV128Shl. - compileV128Shl(wazeroir.OperationV128Shl) error - // compileV128Cmp adds instructions to perform wazeroir.OperationV128Cmp. - compileV128Cmp(wazeroir.OperationV128Cmp) error - // compileV128AddSat adds instructions to perform wazeroir.OperationV128AddSat. - compileV128AddSat(wazeroir.OperationV128AddSat) error - // compileV128SubSat adds instructions to perform wazeroir.OperationV128SubSat. - compileV128SubSat(wazeroir.OperationV128SubSat) error - // compileV128Mul adds instructions to perform wazeroir.OperationV128Mul. - compileV128Mul(wazeroir.OperationV128Mul) error - // compileV128Div adds instructions to perform wazeroir.OperationV128Div. - compileV128Div(wazeroir.OperationV128Div) error - // compileV128Neg adds instructions to perform wazeroir.OperationV128Neg. - compileV128Neg(wazeroir.OperationV128Neg) error - // compileV128Sqrt adds instructions to perform wazeroir.OperationV128Sqrt. - compileV128Sqrt(wazeroir.OperationV128Sqrt) error - // compileV128Abs adds instructions to perform wazeroir.OperationV128Abs. - compileV128Abs(wazeroir.OperationV128Abs) error - // compileV128Popcnt adds instructions to perform wazeroir.OperationV128Popcnt. - compileV128Popcnt(wazeroir.OperationV128Popcnt) error - // compileV128Min adds instructions to perform wazeroir.OperationV128Min. - compileV128Min(wazeroir.OperationV128Min) error - // compileV128Max adds instructions to perform wazeroir.OperationV128Max. - compileV128Max(wazeroir.OperationV128Max) error - // compileV128AvgrU adds instructions to perform wazeroir.OperationV128AvgrU. - compileV128AvgrU(wazeroir.OperationV128AvgrU) error - // compileV128Pmin adds instructions to perform wazeroir.OperationV128Pmin. - compileV128Pmin(wazeroir.OperationV128Pmin) error - // compileV128Pmax adds instructions to perform wazeroir.OperationV128Pmax. - compileV128Pmax(wazeroir.OperationV128Pmax) error - // compileV128Ceil adds instructions to perform wazeroir.OperationV128Ceil. - compileV128Ceil(wazeroir.OperationV128Ceil) error - // compileV128Floor adds instructions to perform wazeroir.OperationV128Floor. - compileV128Floor(wazeroir.OperationV128Floor) error - // compileV128Trunc adds instructions to perform wazeroir.OperationV128Trunc. - compileV128Trunc(wazeroir.OperationV128Trunc) error - // compileV128Nearest adds instructions to perform wazeroir.OperationV128Nearest. - compileV128Nearest(wazeroir.OperationV128Nearest) error - // compileV128Extend adds instructions to perform wazeroir.OperationV128Extend. - compileV128Extend(wazeroir.OperationV128Extend) error - // compileV128ExtMul adds instructions to perform wazeroir.OperationV128ExtMul. - compileV128ExtMul(wazeroir.OperationV128ExtMul) error + compileV128AndNot(*wazeroir.UnionOperation) error + // compileV128Shr adds instructions to perform wazeroir.NewOperationV128Shr. + compileV128Shr(*wazeroir.UnionOperation) error + // compileV128Shl adds instructions to perform wazeroir.NewOperationV128Shl. + compileV128Shl(*wazeroir.UnionOperation) error + // compileV128Cmp adds instructions to perform wazeroir.NewOperationV128Cmp. + compileV128Cmp(*wazeroir.UnionOperation) error + // compileV128AddSat adds instructions to perform wazeroir.NewOperationV128AddSat. + compileV128AddSat(*wazeroir.UnionOperation) error + // compileV128SubSat adds instructions to perform wazeroir.NewOperationV128SubSat. + compileV128SubSat(*wazeroir.UnionOperation) error + // compileV128Mul adds instructions to perform wazeroir.NewOperationV128Mul. + compileV128Mul(*wazeroir.UnionOperation) error + // compileV128Div adds instructions to perform wazeroir.NewOperationV128Div. + compileV128Div(*wazeroir.UnionOperation) error + // compileV128Neg adds instructions to perform wazeroir.NewOperationV128Neg. + compileV128Neg(*wazeroir.UnionOperation) error + // compileV128Sqrt adds instructions to perform wazeroir.NewOperationV128Sqrt. + compileV128Sqrt(*wazeroir.UnionOperation) error + // compileV128Abs adds instructions to perform wazeroir.NewOperationV128Abs. + compileV128Abs(*wazeroir.UnionOperation) error + // compileV128Popcnt adds instructions to perform wazeroir.NewOperationV128Popcnt. + compileV128Popcnt(*wazeroir.UnionOperation) error + // compileV128Min adds instructions to perform wazeroir.NewOperationV128Min. + compileV128Min(*wazeroir.UnionOperation) error + // compileV128Max adds instructions to perform wazeroir.NewOperationV128Max. + compileV128Max(*wazeroir.UnionOperation) error + // compileV128AvgrU adds instructions to perform wazeroir.NewOperationV128AvgrU. + compileV128AvgrU(*wazeroir.UnionOperation) error + // compileV128Pmin adds instructions to perform wazeroir.NewOperationV128Pmin. + compileV128Pmin(*wazeroir.UnionOperation) error + // compileV128Pmax adds instructions to perform wazeroir.NewOperationV128Pmax. + compileV128Pmax(*wazeroir.UnionOperation) error + // compileV128Ceil adds instructions to perform wazeroir.NewOperationV128Ceil. + compileV128Ceil(*wazeroir.UnionOperation) error + // compileV128Floor adds instructions to perform wazeroir.NewOperationV128Floor. + compileV128Floor(*wazeroir.UnionOperation) error + // compileV128Trunc adds instructions to perform wazeroir.NewOperationV128Trunc. + compileV128Trunc(*wazeroir.UnionOperation) error + // compileV128Nearest adds instructions to perform wazeroir.NewOperationV128Nearest. + compileV128Nearest(*wazeroir.UnionOperation) error + // compileV128Extend adds instructions to perform wazeroir.NewOperationV128Extend. + compileV128Extend(*wazeroir.UnionOperation) error + // compileV128ExtMul adds instructions to perform wazeroir.NewOperationV128ExtMul. + compileV128ExtMul(*wazeroir.UnionOperation) error // compileV128Q15mulrSatS adds instructions to perform wazeroir.OperationV128Q15mulrSatS. - compileV128Q15mulrSatS(wazeroir.OperationV128Q15mulrSatS) error - // compileV128ExtAddPairwise adds instructions to perform wazeroir.OperationV128ExtAddPairwise. - compileV128ExtAddPairwise(o wazeroir.OperationV128ExtAddPairwise) error + compileV128Q15mulrSatS(*wazeroir.UnionOperation) error + // compileV128ExtAddPairwise adds instructions to perform wazeroir.NewOperationV128ExtAddPairwise. + compileV128ExtAddPairwise(o *wazeroir.UnionOperation) error // compileV128FloatPromote adds instructions to perform wazeroir.OperationV128FloatPromote. - compileV128FloatPromote(o wazeroir.OperationV128FloatPromote) error + compileV128FloatPromote(o *wazeroir.UnionOperation) error // compileV128FloatDemote adds instructions to perform wazeroir.OperationV128FloatDemote. - compileV128FloatDemote(o wazeroir.OperationV128FloatDemote) error - // compileV128FConvertFromI adds instructions to perform wazeroir.OperationV128FConvertFromI. - compileV128FConvertFromI(o wazeroir.OperationV128FConvertFromI) error + compileV128FloatDemote(o *wazeroir.UnionOperation) error + // compileV128FConvertFromI adds instructions to perform wazeroir.NewOperationV128FConvertFromI. + compileV128FConvertFromI(o *wazeroir.UnionOperation) error // compileV128Dot adds instructions to perform wazeroir.OperationV128Dot. - compileV128Dot(o wazeroir.OperationV128Dot) error - // compileV128Narrow adds instructions to perform wazeroir.OperationV128Narrow. - compileV128Narrow(o wazeroir.OperationV128Narrow) error - // compileV128ITruncSatFromF adds instructions to perform wazeroir.OperationV128ITruncSatFromF. - compileV128ITruncSatFromF(o wazeroir.OperationV128ITruncSatFromF) error + compileV128Dot(o *wazeroir.UnionOperation) error + // compileV128Narrow adds instructions to perform wazeroir.NewOperationV128Narrow. + compileV128Narrow(o *wazeroir.UnionOperation) error + // compileV128ITruncSatFromF adds instructions to perform wazeroir.NewOperationV128ITruncSatFromF. + compileV128ITruncSatFromF(o *wazeroir.UnionOperation) error // compileBuiltinFunctionCheckExitCode adds instructions to perform wazeroir.OperationBuiltinFunctionCheckExitCode. compileBuiltinFunctionCheckExitCode() error diff --git a/internal/engine/compiler/compiler_bench_test.go b/internal/engine/compiler/compiler_bench_test.go index f5e49c0a..b5a0dafb 100644 --- a/internal/engine/compiler/compiler_bench_test.go +++ b/internal/engine/compiler/compiler_bench_test.go @@ -37,11 +37,11 @@ func BenchmarkCompiler_compileMemoryCopy(b *testing.B) { destOffset, sourceOffset = 777, 1 } - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(destOffset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(destOffset))) requireNoError(b, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(sourceOffset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(sourceOffset))) requireNoError(b, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(size)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(size))) requireNoError(b, err) err = compiler.compileMemoryCopy() requireNoError(b, err) @@ -84,11 +84,11 @@ func BenchmarkCompiler_compileMemoryFill(b *testing.B) { var startOffset uint32 = 100 var value uint8 = 5 - err := compiler.compileConstI32(wazeroir.NewOperationConstI32(startOffset)) + err := compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(startOffset))) requireNoError(b, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(value))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(value)))) requireNoError(b, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(size)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(size))) requireNoError(b, err) err = compiler.compileMemoryFill() requireNoError(b, err) diff --git a/internal/engine/compiler/compiler_conditional_save_test.go b/internal/engine/compiler/compiler_conditional_save_test.go index caad721a..3004e966 100644 --- a/internal/engine/compiler/compiler_conditional_save_test.go +++ b/internal/engine/compiler/compiler_conditional_save_test.go @@ -16,25 +16,25 @@ func TestCompiler_conditional_value_saving(t *testing.T) { require.NoError(t, err) // Place the f32 local. - err = compiler.compileConstF32(wazeroir.NewOperationConstF32(1.0)) + err = compiler.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(1.0))) require.NoError(t, err) // Generate constants to occupy all the unreserved GP registers. for i := 0; i < len(unreservedGeneralPurposeRegisters); i++ { - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(100)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(100))) require.NoError(t, err) } // Pick the f32 floating point local (1.0) twice. // Note that the f32 (function local variable in general) is placed above the call frame. - err = compiler.compilePick(wazeroir.NewOperationPick(int(compiler.runtimeValueLocationStack().sp-1-callFrameDataSizeInUint64), false)) + err = compiler.compilePick(operationPtr(wazeroir.NewOperationPick(int(compiler.runtimeValueLocationStack().sp-1-callFrameDataSizeInUint64), false))) require.NoError(t, err) - err = compiler.compilePick(wazeroir.NewOperationPick(int(compiler.runtimeValueLocationStack().sp-1-callFrameDataSizeInUint64), false)) + err = compiler.compilePick(operationPtr(wazeroir.NewOperationPick(int(compiler.runtimeValueLocationStack().sp-1-callFrameDataSizeInUint64), false))) require.NoError(t, err) // Generate conditional flag via floating point comparisons. - err = compiler.compileLe(wazeroir.NewOperationLe(wazeroir.SignedTypeFloat32)) + err = compiler.compileLe(operationPtr(wazeroir.NewOperationLe(wazeroir.SignedTypeFloat32))) require.NoError(t, err) // Ensures that we have conditional value at top of stack. @@ -46,7 +46,7 @@ func TestCompiler_conditional_value_saving(t *testing.T) { require.False(t, ok) // We should be able to use the conditional value (an i32 value in Wasm) as an operand for, say, i32.add. - err = compiler.compileAdd(wazeroir.NewOperationAdd(wazeroir.UnsignedTypeI32)) + err = compiler.compileAdd(operationPtr(wazeroir.NewOperationAdd(wazeroir.UnsignedTypeI32))) require.NoError(t, err) err = compiler.compileReturnFunction() diff --git a/internal/engine/compiler/compiler_controlflow_test.go b/internal/engine/compiler/compiler_controlflow_test.go index 1dbb2808..d1d417e2 100644 --- a/internal/engine/compiler/compiler_controlflow_test.go +++ b/internal/engine/compiler/compiler_controlflow_test.go @@ -47,7 +47,7 @@ func TestCompiler_compileHostFunction(t *testing.T) { } func TestCompiler_compileLabel(t *testing.T) { - label := wazeroir.Label{FrameID: 100, Kind: wazeroir.LabelKindContinuation} + label := wazeroir.NewLabel(wazeroir.LabelKindContinuation, 100) for _, expectSkip := range []bool{false, true} { expectSkip := expectSkip t.Run(fmt.Sprintf("expect skip=%v", expectSkip), func(t *testing.T) { @@ -56,12 +56,12 @@ func TestCompiler_compileLabel(t *testing.T) { if expectSkip { // If the initial stack is not set, compileLabel must return skip=true. - actual := compiler.compileLabel(wazeroir.OperationLabel{Label: label}) + actual := compiler.compileLabel(operationPtr(wazeroir.NewOperationLabel(label))) require.True(t, actual) } else { - err := compiler.compileBr(wazeroir.OperationBr{Target: label}) + err := compiler.compileBr(operationPtr(wazeroir.NewOperationBr(label))) require.NoError(t, err) - actual := compiler.compileLabel(wazeroir.OperationLabel{Label: label}) + actual := compiler.compileLabel(operationPtr(wazeroir.NewOperationLabel(label))) require.False(t, actual) } }) @@ -70,8 +70,8 @@ func TestCompiler_compileLabel(t *testing.T) { func TestCompiler_compileBrIf(t *testing.T) { unreachableStatus, thenLabelExitStatus, elseLabelExitStatus := nativeCallStatusCodeUnreachable, nativeCallStatusCodeUnreachable+1, nativeCallStatusCodeUnreachable+2 - thenBranchTarget := wazeroir.BranchTargetDrop{Target: wazeroir.Label{Kind: wazeroir.LabelKindHeader, FrameID: 1}} - elseBranchTarget := wazeroir.BranchTargetDrop{Target: wazeroir.Label{Kind: wazeroir.LabelKindHeader, FrameID: 2}} + thenBranchTarget := wazeroir.BranchTargetDrop{Target: wazeroir.NewLabel(wazeroir.LabelKindHeader, 1)} + elseBranchTarget := wazeroir.BranchTargetDrop{Target: wazeroir.NewLabel(wazeroir.LabelKindHeader, 2)} tests := []struct { name string @@ -84,7 +84,7 @@ func TestCompiler_compileBrIf(t *testing.T) { if shouldGoElse { val = 0 } - err := compiler.compileConstI32(wazeroir.NewOperationConstI32(val)) + err := compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(val))) require.NoError(t, err) }, }, @@ -97,7 +97,7 @@ func TestCompiler_compileBrIf(t *testing.T) { } requirePushTwoInt32Consts(t, x1, x2, compiler) // Le on unsigned integer produces the value on COND_LS register. - err := compiler.compileLe(wazeroir.NewOperationLe(wazeroir.SignedTypeUint32)) + err := compiler.compileLe(operationPtr(wazeroir.NewOperationLe(wazeroir.SignedTypeUint32))) require.NoError(t, err) }, }, @@ -110,7 +110,7 @@ func TestCompiler_compileBrIf(t *testing.T) { } requirePushTwoInt32Consts(t, x1, x2, compiler) // Le on signed integer produces the value on COND_LE register. - err := compiler.compileLe(wazeroir.NewOperationLe(wazeroir.SignedTypeInt32)) + err := compiler.compileLe(operationPtr(wazeroir.NewOperationLe(wazeroir.SignedTypeInt32))) require.NoError(t, err) }, }, @@ -123,7 +123,7 @@ func TestCompiler_compileBrIf(t *testing.T) { } requirePushTwoInt32Consts(t, x1, x2, compiler) // Ge on unsigned integer produces the value on COND_HS register. - err := compiler.compileGe(wazeroir.NewOperationGe(wazeroir.SignedTypeUint32)) + err := compiler.compileGe(operationPtr(wazeroir.NewOperationGe(wazeroir.SignedTypeUint32))) require.NoError(t, err) }, }, @@ -136,7 +136,7 @@ func TestCompiler_compileBrIf(t *testing.T) { } requirePushTwoInt32Consts(t, x1, x2, compiler) // Ge on signed integer produces the value on COND_GE register. - err := compiler.compileGe(wazeroir.NewOperationGe(wazeroir.SignedTypeInt32)) + err := compiler.compileGe(operationPtr(wazeroir.NewOperationGe(wazeroir.SignedTypeInt32))) require.NoError(t, err) }, }, @@ -149,7 +149,7 @@ func TestCompiler_compileBrIf(t *testing.T) { } requirePushTwoInt32Consts(t, x1, x2, compiler) // Gt on unsigned integer produces the value on COND_HI register. - err := compiler.compileGt(wazeroir.NewOperationGt(wazeroir.SignedTypeUint32)) + err := compiler.compileGt(operationPtr(wazeroir.NewOperationGt(wazeroir.SignedTypeUint32))) require.NoError(t, err) }, }, @@ -162,7 +162,7 @@ func TestCompiler_compileBrIf(t *testing.T) { } requirePushTwoInt32Consts(t, x1, x2, compiler) // Gt on signed integer produces the value on COND_GT register. - err := compiler.compileGt(wazeroir.NewOperationGt(wazeroir.SignedTypeInt32)) + err := compiler.compileGt(operationPtr(wazeroir.NewOperationGt(wazeroir.SignedTypeInt32))) require.NoError(t, err) }, }, @@ -175,7 +175,7 @@ func TestCompiler_compileBrIf(t *testing.T) { } requirePushTwoInt32Consts(t, x1, x2, compiler) // Lt on unsigned integer produces the value on COND_LO register. - err := compiler.compileLt(wazeroir.NewOperationLt(wazeroir.SignedTypeUint32)) + err := compiler.compileLt(operationPtr(wazeroir.NewOperationLt(wazeroir.SignedTypeUint32))) require.NoError(t, err) }, }, @@ -188,7 +188,7 @@ func TestCompiler_compileBrIf(t *testing.T) { } requirePushTwoInt32Consts(t, x1, x2, compiler) // Lt on signed integer produces the value on COND_LT register. - err := compiler.compileLt(wazeroir.NewOperationLt(wazeroir.SignedTypeInt32)) + err := compiler.compileLt(operationPtr(wazeroir.NewOperationLt(wazeroir.SignedTypeInt32))) require.NoError(t, err) }, }, @@ -201,7 +201,7 @@ func TestCompiler_compileBrIf(t *testing.T) { } requirePushTwoFloat32Consts(t, x1, x2, compiler) // Lt on floats produces the value on COND_MI register. - err := compiler.compileLt(wazeroir.NewOperationLt(wazeroir.SignedTypeFloat32)) + err := compiler.compileLt(operationPtr(wazeroir.NewOperationLt(wazeroir.SignedTypeFloat32))) require.NoError(t, err) }, }, @@ -213,7 +213,7 @@ func TestCompiler_compileBrIf(t *testing.T) { x2++ } requirePushTwoInt32Consts(t, x1, x2, compiler) - err := compiler.compileEq(wazeroir.NewOperationEq(wazeroir.UnsignedTypeI32)) + err := compiler.compileEq(operationPtr(wazeroir.NewOperationEq(wazeroir.UnsignedTypeI32))) require.NoError(t, err) }, }, @@ -225,7 +225,7 @@ func TestCompiler_compileBrIf(t *testing.T) { x2 = x1 } requirePushTwoInt32Consts(t, x1, x2, compiler) - err := compiler.compileNe(wazeroir.NewOperationNe(wazeroir.UnsignedTypeI32)) + err := compiler.compileNe(operationPtr(wazeroir.NewOperationNe(wazeroir.UnsignedTypeI32))) require.NoError(t, err) }, }, @@ -245,17 +245,17 @@ func TestCompiler_compileBrIf(t *testing.T) { tc.setupFunc(t, compiler, shouldGoToElse) requireRuntimeLocationStackPointerEqual(t, uint64(1), compiler) - err = compiler.compileBrIf(wazeroir.OperationBrIf{Then: thenBranchTarget, Else: elseBranchTarget}) + err = compiler.compileBrIf(operationPtr(wazeroir.NewOperationBrIf(thenBranchTarget, elseBranchTarget))) require.NoError(t, err) compiler.compileExitFromNativeCode(unreachableStatus) // Emit code for .then label. - skip := compiler.compileLabel(wazeroir.OperationLabel{Label: thenBranchTarget.Target}) + skip := compiler.compileLabel(operationPtr(wazeroir.NewOperationLabel(thenBranchTarget.Target))) require.False(t, skip) compiler.compileExitFromNativeCode(thenLabelExitStatus) // Emit code for .else label. - skip = compiler.compileLabel(wazeroir.OperationLabel{Label: elseBranchTarget.Target}) + skip = compiler.compileLabel(operationPtr(wazeroir.NewOperationLabel(elseBranchTarget.Target))) require.False(t, skip) compiler.compileExitFromNativeCode(elseLabelExitStatus) @@ -291,11 +291,11 @@ func TestCompiler_compileBrTable(t *testing.T) { requireRunAndExpectedValueReturned := func(t *testing.T, env *compilerEnv, c compilerImpl, expValue uint32) { // Emit code for each label which returns the frame ID. for returnValue := uint32(0); returnValue < 7; returnValue++ { - label := wazeroir.Label{Kind: wazeroir.LabelKindHeader, FrameID: returnValue} - err := c.compileBr(wazeroir.OperationBr{Target: label}) + label := wazeroir.NewLabel(wazeroir.LabelKindHeader, returnValue) + err := c.compileBr(operationPtr(wazeroir.NewOperationBr(label))) require.NoError(t, err) - _ = c.compileLabel(wazeroir.OperationLabel{Label: label}) - _ = c.compileConstI32(wazeroir.NewOperationConstI32(label.FrameID)) + _ = c.compileLabel(operationPtr(wazeroir.NewOperationLabel(label))) + _ = c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(label.FrameID())))) err = c.compileReturnFunction() require.NoError(t, err) } @@ -310,120 +310,106 @@ func TestCompiler_compileBrTable(t *testing.T) { require.Equal(t, expValue, env.stackTopAsUint32()) } - getBranchTargetDropFromFrameID := func(frameid uint32) *wazeroir.BranchTargetDrop { - return &wazeroir.BranchTargetDrop{ - Target: wazeroir.Label{FrameID: frameid, Kind: wazeroir.LabelKindHeader}, - } + getBranchLabelFromFrameID := func(frameid uint32) uint64 { + return uint64(wazeroir.NewLabel(wazeroir.LabelKindHeader, frameid)) } tests := []struct { name string index int64 - o wazeroir.OperationBrTable + o *wazeroir.UnionOperation expectedValue uint32 }{ { name: "only default with index 0", - o: wazeroir.OperationBrTable{Default: getBranchTargetDropFromFrameID(6)}, + o: operationPtr(wazeroir.NewOperationBrTable([]uint64{getBranchLabelFromFrameID(6)}, nil)), index: 0, expectedValue: 6, }, { name: "only default with index 100", - o: wazeroir.OperationBrTable{Default: getBranchTargetDropFromFrameID(6)}, + o: operationPtr(wazeroir.NewOperationBrTable([]uint64{getBranchLabelFromFrameID(6)}, nil)), index: 100, expectedValue: 6, }, { name: "select default with targets and good index", - o: wazeroir.OperationBrTable{ - Targets: []*wazeroir.BranchTargetDrop{ - getBranchTargetDropFromFrameID(1), - getBranchTargetDropFromFrameID(2), - }, - Default: getBranchTargetDropFromFrameID(6), - }, + o: operationPtr(wazeroir.NewOperationBrTable([]uint64{ + getBranchLabelFromFrameID(6), // default + getBranchLabelFromFrameID(1), + getBranchLabelFromFrameID(2), + }, nil)), index: 3, expectedValue: 6, }, { name: "select default with targets and huge index", - o: wazeroir.OperationBrTable{ - Targets: []*wazeroir.BranchTargetDrop{ - getBranchTargetDropFromFrameID(1), - getBranchTargetDropFromFrameID(2), - }, - Default: getBranchTargetDropFromFrameID(6), + o: operationPtr(wazeroir.NewOperationBrTable([]uint64{ + getBranchLabelFromFrameID(6), // default + getBranchLabelFromFrameID(1), + getBranchLabelFromFrameID(2), }, + nil, + )), index: 100000, expectedValue: 6, }, { name: "select first with two targets", - o: wazeroir.OperationBrTable{ - Targets: []*wazeroir.BranchTargetDrop{ - getBranchTargetDropFromFrameID(1), - getBranchTargetDropFromFrameID(2), - }, - Default: getBranchTargetDropFromFrameID(5), - }, + o: operationPtr(wazeroir.NewOperationBrTable([]uint64{ + getBranchLabelFromFrameID(5), // default + getBranchLabelFromFrameID(1), + getBranchLabelFromFrameID(2), + }, nil)), index: 0, expectedValue: 1, }, { name: "select last with two targets", - o: wazeroir.OperationBrTable{ - Targets: []*wazeroir.BranchTargetDrop{ - getBranchTargetDropFromFrameID(1), - getBranchTargetDropFromFrameID(2), - }, - Default: getBranchTargetDropFromFrameID(6), - }, + o: operationPtr(wazeroir.NewOperationBrTable([]uint64{ + getBranchLabelFromFrameID(6), // default + getBranchLabelFromFrameID(1), + getBranchLabelFromFrameID(2), + }, nil)), index: 1, expectedValue: 2, }, { name: "select first with five targets", - o: wazeroir.OperationBrTable{ - Targets: []*wazeroir.BranchTargetDrop{ - getBranchTargetDropFromFrameID(1), - getBranchTargetDropFromFrameID(2), - getBranchTargetDropFromFrameID(3), - getBranchTargetDropFromFrameID(4), - getBranchTargetDropFromFrameID(5), - }, - Default: getBranchTargetDropFromFrameID(5), - }, + o: operationPtr(wazeroir.NewOperationBrTable([]uint64{ + getBranchLabelFromFrameID(5), // default + getBranchLabelFromFrameID(1), + getBranchLabelFromFrameID(2), + getBranchLabelFromFrameID(3), + getBranchLabelFromFrameID(4), + getBranchLabelFromFrameID(5), + }, nil)), index: 0, expectedValue: 1, }, { name: "select middle with five targets", - o: wazeroir.OperationBrTable{ - Targets: []*wazeroir.BranchTargetDrop{ - getBranchTargetDropFromFrameID(1), - getBranchTargetDropFromFrameID(2), - getBranchTargetDropFromFrameID(3), - getBranchTargetDropFromFrameID(4), - getBranchTargetDropFromFrameID(5), - }, - Default: getBranchTargetDropFromFrameID(5), - }, + o: operationPtr(wazeroir.NewOperationBrTable([]uint64{ + getBranchLabelFromFrameID(5), // default + getBranchLabelFromFrameID(1), + getBranchLabelFromFrameID(2), + getBranchLabelFromFrameID(3), + getBranchLabelFromFrameID(4), + getBranchLabelFromFrameID(5), + }, nil)), index: 2, expectedValue: 3, }, { name: "select last with five targets", - o: wazeroir.OperationBrTable{ - Targets: []*wazeroir.BranchTargetDrop{ - getBranchTargetDropFromFrameID(1), - getBranchTargetDropFromFrameID(2), - getBranchTargetDropFromFrameID(3), - getBranchTargetDropFromFrameID(4), - getBranchTargetDropFromFrameID(5), - }, - Default: getBranchTargetDropFromFrameID(5), - }, + o: operationPtr(wazeroir.NewOperationBrTable([]uint64{ + getBranchLabelFromFrameID(5), // default + getBranchLabelFromFrameID(1), + getBranchLabelFromFrameID(2), + getBranchLabelFromFrameID(3), + getBranchLabelFromFrameID(4), + getBranchLabelFromFrameID(5), + }, nil)), index: 4, expectedValue: 5, }, @@ -438,7 +424,7 @@ func TestCompiler_compileBrTable(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(tc.index))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(tc.index)))) require.NoError(t, err) err = compiler.compileBrTable(tc.o) @@ -452,16 +438,16 @@ func TestCompiler_compileBrTable(t *testing.T) { } func requirePushTwoInt32Consts(t *testing.T, x1, x2 uint32, compiler compilerImpl) { - err := compiler.compileConstI32(wazeroir.NewOperationConstI32(x1)) + err := compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(x1))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(x2)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(x2))) require.NoError(t, err) } func requirePushTwoFloat32Consts(t *testing.T, x1, x2 float32, compiler compilerImpl) { - err := compiler.compileConstF32(wazeroir.NewOperationConstF32(x1)) + err := compiler.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(x1))) require.NoError(t, err) - err = compiler.compileConstF32(wazeroir.NewOperationConstF32(x2)) + err = compiler.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(x2))) require.NoError(t, err) } @@ -473,7 +459,7 @@ func TestCompiler_compileBr(t *testing.T) { require.NoError(t, err) // Branch into nil label is interpreted as return. See BranchTarget.IsReturnTarget - err = compiler.compileBr(wazeroir.OperationBr{Target: wazeroir.Label{Kind: wazeroir.LabelKindReturn}}) + err = compiler.compileBr(operationPtr(wazeroir.NewOperationBr(wazeroir.NewLabel(wazeroir.LabelKindReturn, 0)))) require.NoError(t, err) // Compile and execute the code under test. @@ -491,34 +477,34 @@ func TestCompiler_compileBr(t *testing.T) { require.NoError(t, err) // Emit the forward br, meaning that handle Br instruction where the target label hasn't been compiled yet. - forwardLabel := wazeroir.Label{Kind: wazeroir.LabelKindHeader, FrameID: 0} - err = compiler.compileBr(wazeroir.OperationBr{Target: forwardLabel}) + forwardLabel := wazeroir.NewLabel(wazeroir.LabelKindHeader, 0) + err = compiler.compileBr(operationPtr(wazeroir.NewOperationBr(forwardLabel))) require.NoError(t, err) // We must not reach the code after Br, so emit the code exiting with Unreachable status. compiler.compileExitFromNativeCode(nativeCallStatusCodeUnreachable) require.NoError(t, err) - exitLabel := wazeroir.Label{Kind: wazeroir.LabelKindHeader, FrameID: 1} - err = compiler.compileBr(wazeroir.OperationBr{Target: exitLabel}) + exitLabel := wazeroir.NewLabel(wazeroir.LabelKindHeader, 1) + err = compiler.compileBr(operationPtr(wazeroir.NewOperationBr(exitLabel))) require.NoError(t, err) // Emit code for the exitLabel. - skip := compiler.compileLabel(wazeroir.OperationLabel{Label: exitLabel}) + skip := compiler.compileLabel(operationPtr(wazeroir.NewOperationLabel(exitLabel))) require.False(t, skip) compiler.compileExitFromNativeCode(nativeCallStatusCodeReturned) require.NoError(t, err) // Emit code for the forwardLabel. - skip = compiler.compileLabel(wazeroir.OperationLabel{Label: forwardLabel}) + skip = compiler.compileLabel(operationPtr(wazeroir.NewOperationLabel(forwardLabel))) require.False(t, skip) - err = compiler.compileBr(wazeroir.OperationBr{Target: exitLabel}) + err = compiler.compileBr(operationPtr(wazeroir.NewOperationBr(exitLabel))) require.NoError(t, err) code, _, err := compiler.compile() require.NoError(t, err) - // The generated code looks like this: + // The generated code looks like this:) // // ... code from compilePreamble() // br .forwardLabel @@ -547,10 +533,10 @@ func TestCompiler_compileCallIndirect(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - targetOperation := wazeroir.NewOperationCallIndirect(0, 0) + targetOperation := operationPtr(wazeroir.NewOperationCallIndirect(0, 0)) // Place the offset value. - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(10)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(10))) require.NoError(t, err) err = compiler.compileCallIndirect(targetOperation) @@ -577,8 +563,8 @@ func TestCompiler_compileCallIndirect(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - targetOperation := wazeroir.NewOperationCallIndirect(0, 0) - targetOffset := wazeroir.NewOperationConstI32(uint32(0)) + targetOperation := operationPtr(wazeroir.NewOperationCallIndirect(0, 0)) + targetOffset := operationPtr(wazeroir.NewOperationConstI32(uint32(0))) // and the typeID doesn't match the table[targetOffset]'s type ID. table := make([]wasm.Reference, 10) @@ -613,8 +599,8 @@ func TestCompiler_compileCallIndirect(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - targetOperation := wazeroir.NewOperationCallIndirect(0, 0) - targetOffset := wazeroir.NewOperationConstI32(uint32(0)) + targetOperation := operationPtr(wazeroir.NewOperationCallIndirect(0, 0)) + targetOffset := operationPtr(wazeroir.NewOperationConstI32(uint32(0))) env.module().TypeIDs = []wasm.FunctionTypeID{1000} // Ensure that the module instance has the type information for targetOperation.TypeIndex, // and the typeID doesn't match the table[targetOffset]'s type ID. @@ -650,7 +636,7 @@ func TestCompiler_compileCallIndirect(t *testing.T) { } const typeIndex = 0 targetTypeID := wasm.FunctionTypeID(10) - operation := wazeroir.NewOperationCallIndirect(typeIndex, 0) + operation := operationPtr(wazeroir.NewOperationCallIndirect(typeIndex, 0)) table := make([]wasm.Reference, 10) env := newCompilerEnvironment() @@ -674,12 +660,12 @@ func TestCompiler_compileCallIndirect(t *testing.T) { }) err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(expectedReturnValue)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(expectedReturnValue))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) // The function result value must be set at the bottom of the stack. - err = compiler.compileSet(wazeroir.NewOperationSet(int(compiler.runtimeValueLocationStack().sp-1), false)) + err = compiler.compileSet(operationPtr(wazeroir.NewOperationSet(int(compiler.runtimeValueLocationStack().sp-1), false))) require.NoError(t, err) err = compiler.compileReturnFunction() require.NoError(t, err) @@ -713,7 +699,7 @@ func TestCompiler_compileCallIndirect(t *testing.T) { require.NoError(t, err) // Place the offset value. Here we try calling a function of functionaddr == table[i].FunctionIndex. - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(i))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(i)))) require.NoError(t, err) // At this point, we should have one item (offset value) on the stack. @@ -750,7 +736,7 @@ func TestCompiler_callIndirect_largeTypeIndex(t *testing.T) { // Ensure that the module instance has the type information for targetOperation.TypeIndex, // and the typeID matches the table[targetOffset]'s type ID. const typeIndex, typeID = 12345, 0 - operation := wazeroir.NewOperationCallIndirect(typeIndex, 0) + operation := operationPtr(wazeroir.NewOperationCallIndirect(typeIndex, 0)) env.module().TypeIDs = make([]wasm.FunctionTypeID, typeIndex+1) env.module().TypeIDs[typeIndex] = typeID env.module().Engine = &moduleEngine{functions: []function{}} @@ -786,7 +772,7 @@ func TestCompiler_callIndirect_largeTypeIndex(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(0)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0))) require.NoError(t, err) require.NoError(t, compiler.compileCallIndirect(operation)) @@ -823,16 +809,16 @@ func TestCompiler_compileCall(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(addTargetValue)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(addTargetValue))) require.NoError(t, err) // Picks the function argument placed at the bottom of the stack. - err = compiler.compilePick(wazeroir.NewOperationPick(int(compiler.runtimeValueLocationStack().sp-1), false)) + err = compiler.compilePick(operationPtr(wazeroir.NewOperationPick(int(compiler.runtimeValueLocationStack().sp-1), false))) require.NoError(t, err) // Adds the const to the picked value. - err = compiler.compileAdd(wazeroir.NewOperationAdd(wazeroir.UnsignedTypeI32)) + err = compiler.compileAdd(operationPtr(wazeroir.NewOperationAdd(wazeroir.UnsignedTypeI32))) require.NoError(t, err) // Then store the added result into the bottom of the stack (which is treated as the result of the function). - err = compiler.compileSet(wazeroir.NewOperationSet(int(compiler.runtimeValueLocationStack().sp-1), false)) + err = compiler.compileSet(operationPtr(wazeroir.NewOperationSet(int(compiler.runtimeValueLocationStack().sp-1), false))) require.NoError(t, err) err = compiler.compileReturnFunction() @@ -859,14 +845,14 @@ func TestCompiler_compileCall(t *testing.T) { const initialValue = 100 expectedValue += initialValue - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(1234)) // Dummy value so the base pointer would be non-trivial for callees. + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(1234))) // Dummy value so the base pointer would be non-trivial for callees. require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(initialValue)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(initialValue))) require.NoError(t, err) // Call all the built functions. for i := 0; i < numCalls; i++ { - err = compiler.compileCall(wazeroir.NewOperationCall(1)) + err = compiler.compileCall(operationPtr(wazeroir.NewOperationCall(1))) require.NoError(t, err) } diff --git a/internal/engine/compiler/compiler_conversion_test.go b/internal/engine/compiler/compiler_conversion_test.go index 5d101513..8dce5fe3 100644 --- a/internal/engine/compiler/compiler_conversion_test.go +++ b/internal/engine/compiler/compiler_conversion_test.go @@ -43,14 +43,14 @@ func TestCompiler_compileReinterpret(t *testing.T) { case wazeroir.OperationKindF32ReinterpretFromI32: is32Bit = true if !originOnStack { - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(v))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(v)))) require.NoError(t, err) } err = compiler.compileF32ReinterpretFromI32() require.NoError(t, err) case wazeroir.OperationKindF64ReinterpretFromI64: if !originOnStack { - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(v)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(v))) require.NoError(t, err) } err = compiler.compileF64ReinterpretFromI64() @@ -58,14 +58,14 @@ func TestCompiler_compileReinterpret(t *testing.T) { case wazeroir.OperationKindI32ReinterpretFromF32: is32Bit = true if !originOnStack { - err = compiler.compileConstF32(wazeroir.NewOperationConstF32(math.Float32frombits(uint32(v)))) + err = compiler.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(math.Float32frombits(uint32(v))))) require.NoError(t, err) } err = compiler.compileI32ReinterpretFromF32() require.NoError(t, err) case wazeroir.OperationKindI64ReinterpretFromF64: if !originOnStack { - err = compiler.compileConstF64(wazeroir.NewOperationConstF64(math.Float64frombits(v))) + err = compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(math.Float64frombits(v)))) require.NoError(t, err) } err = compiler.compileI64ReinterpretFromF64() @@ -111,10 +111,10 @@ func TestCompiler_compileExtend(t *testing.T) { require.NoError(t, err) // Setup the promote target. - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(v)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(v))) require.NoError(t, err) - err = compiler.compileExtend(wazeroir.OperationExtend{Signed: signed}) + err = compiler.compileExtend(operationPtr(wazeroir.NewOperationExtend(signed))) require.NoError(t, err) err = compiler.compileReturnFunction() @@ -194,15 +194,15 @@ func TestCompiler_compileITruncFromF(t *testing.T) { // Setup the conversion target. if tc.inputType == wazeroir.Float32 { - err = compiler.compileConstF32(wazeroir.NewOperationConstF32(float32(v))) + err = compiler.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(float32(v)))) } else { - err = compiler.compileConstF64(wazeroir.NewOperationConstF64(v)) + err = compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(v))) } require.NoError(t, err) - err = compiler.compileITruncFromF(wazeroir.OperationITruncFromF{ - InputType: tc.inputType, OutputType: tc.outputType, NonTrapping: tc.nonTrapping, - }) + err = compiler.compileITruncFromF(operationPtr(wazeroir.NewOperationITruncFromF( + tc.inputType, tc.outputType, tc.nonTrapping, + ))) require.NoError(t, err) err = compiler.compileReturnFunction() @@ -393,15 +393,15 @@ func TestCompiler_compileFConvertFromI(t *testing.T) { // Setup the conversion target. if tc.inputType == wazeroir.SignedInt32 || tc.inputType == wazeroir.SignedUint32 { - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(v))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(v)))) } else { - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(uint64(v))) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(uint64(v)))) } require.NoError(t, err) - err = compiler.compileFConvertFromI(wazeroir.OperationFConvertFromI{ - InputType: tc.inputType, OutputType: tc.outputType, - }) + err = compiler.compileFConvertFromI(operationPtr(wazeroir.NewOperationFConvertFromI( + tc.inputType, tc.outputType, + ))) require.NoError(t, err) err = compiler.compileReturnFunction() @@ -469,7 +469,7 @@ func TestCompiler_compileF64PromoteFromF32(t *testing.T) { require.NoError(t, err) // Setup the promote target. - err = compiler.compileConstF32(wazeroir.NewOperationConstF32(v)) + err = compiler.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(v))) require.NoError(t, err) err = compiler.compileF64PromoteFromF32() @@ -515,7 +515,7 @@ func TestCompiler_compileF32DemoteFromF64(t *testing.T) { require.NoError(t, err) // Setup the demote target. - err = compiler.compileConstF64(wazeroir.NewOperationConstF64(v)) + err = compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(v))) require.NoError(t, err) err = compiler.compileF32DemoteFromF64() diff --git a/internal/engine/compiler/compiler_global_test.go b/internal/engine/compiler/compiler_global_test.go index 0bbbf3f4..09c3a8ff 100644 --- a/internal/engine/compiler/compiler_global_test.go +++ b/internal/engine/compiler/compiler_global_test.go @@ -28,7 +28,7 @@ func TestCompiler_compileGlobalGet(t *testing.T) { // Emit the code. err := compiler.compilePreamble() require.NoError(t, err) - op := wazeroir.NewOperationGlobalGet(1) + op := operationPtr(wazeroir.NewOperationGlobalGet(1)) err = compiler.compileGlobalGet(op) require.NoError(t, err) @@ -75,7 +75,7 @@ func TestCompiler_compileGlobalGet_v128(t *testing.T) { // Emit the code. err := compiler.compilePreamble() require.NoError(t, err) - op := wazeroir.NewOperationGlobalGet(1) + op := operationPtr(wazeroir.NewOperationGlobalGet(1)) err = compiler.compileGlobalGet(op) require.NoError(t, err) @@ -141,7 +141,7 @@ func TestCompiler_compileGlobalSet(t *testing.T) { env.stack()[loc.stackPointer] = valueToSet const index = 1 - op := wazeroir.NewOperationGlobalSet(index) + op := operationPtr(wazeroir.NewOperationGlobalSet(index)) err = compiler.compileGlobalSet(op) requireRuntimeLocationStackPointerEqual(t, 0, compiler) @@ -189,7 +189,7 @@ func TestCompiler_compileGlobalSet_v128(t *testing.T) { env.stack()[hi.stackPointer] = valueToSetHi const index = 1 - op := wazeroir.NewOperationGlobalSet(index) + op := operationPtr(wazeroir.NewOperationGlobalSet(index)) err = compiler.compileGlobalSet(op) requireRuntimeLocationStackPointerEqual(t, 0, compiler) require.NoError(t, err) diff --git a/internal/engine/compiler/compiler_memory_test.go b/internal/engine/compiler/compiler_memory_test.go index 55ca5d7b..a4a74ec1 100644 --- a/internal/engine/compiler/compiler_memory_test.go +++ b/internal/engine/compiler/compiler_memory_test.go @@ -24,7 +24,7 @@ func TestCompiler_compileMemoryGrow(t *testing.T) { // Emit arbitrary code after MemoryGrow returned so that we can verify // that the code can set the return address properly. const expValue uint32 = 100 - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(expValue)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(expValue))) require.NoError(t, err) err = compiler.compileReturnFunction() require.NoError(t, err) @@ -91,7 +91,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "i32.load", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad(wazeroir.NewOperationLoad(wazeroir.UnsignedTypeI32, arg)) + err := compiler.compileLoad(operationPtr(wazeroir.NewOperationLoad(wazeroir.UnsignedTypeI32, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -101,7 +101,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "i64.load", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad(wazeroir.NewOperationLoad(wazeroir.UnsignedTypeI64, arg)) + err := compiler.compileLoad(operationPtr(wazeroir.NewOperationLoad(wazeroir.UnsignedTypeI64, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -111,7 +111,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "f32.load", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad(wazeroir.NewOperationLoad(wazeroir.UnsignedTypeF32, arg)) + err := compiler.compileLoad(operationPtr(wazeroir.NewOperationLoad(wazeroir.UnsignedTypeF32, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -122,7 +122,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "f64.load", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad(wazeroir.NewOperationLoad(wazeroir.UnsignedTypeF64, arg)) + err := compiler.compileLoad(operationPtr(wazeroir.NewOperationLoad(wazeroir.UnsignedTypeF64, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -133,7 +133,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "i32.load8s", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad8(wazeroir.NewOperationLoad8(wazeroir.SignedInt32, arg)) + err := compiler.compileLoad8(operationPtr(wazeroir.NewOperationLoad8(wazeroir.SignedInt32, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -143,7 +143,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "i32.load8u", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad8(wazeroir.NewOperationLoad8(wazeroir.SignedUint32, arg)) + err := compiler.compileLoad8(operationPtr(wazeroir.NewOperationLoad8(wazeroir.SignedUint32, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -153,7 +153,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "i64.load8s", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad8(wazeroir.NewOperationLoad8(wazeroir.SignedInt64, arg)) + err := compiler.compileLoad8(operationPtr(wazeroir.NewOperationLoad8(wazeroir.SignedInt64, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -163,7 +163,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "i64.load8u", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad8(wazeroir.NewOperationLoad8(wazeroir.SignedUint64, arg)) + err := compiler.compileLoad8(operationPtr(wazeroir.NewOperationLoad8(wazeroir.SignedUint64, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -173,7 +173,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "i32.load16s", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad16(wazeroir.NewOperationLoad16(wazeroir.SignedInt32, arg)) + err := compiler.compileLoad16(operationPtr(wazeroir.NewOperationLoad16(wazeroir.SignedInt32, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -183,7 +183,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "i32.load16u", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad16(wazeroir.NewOperationLoad16(wazeroir.SignedUint32, arg)) + err := compiler.compileLoad16(operationPtr(wazeroir.NewOperationLoad16(wazeroir.SignedUint32, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -193,7 +193,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "i64.load16s", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad16(wazeroir.NewOperationLoad16(wazeroir.SignedInt64, arg)) + err := compiler.compileLoad16(operationPtr(wazeroir.NewOperationLoad16(wazeroir.SignedInt64, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -203,7 +203,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "i64.load16u", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad16(wazeroir.NewOperationLoad16(wazeroir.SignedUint64, arg)) + err := compiler.compileLoad16(operationPtr(wazeroir.NewOperationLoad16(wazeroir.SignedUint64, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -213,7 +213,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "i64.load32s", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad32(wazeroir.NewOperationLoad32(true, arg)) + err := compiler.compileLoad32(operationPtr(wazeroir.NewOperationLoad32(true, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -223,7 +223,7 @@ func TestCompiler_compileLoad(t *testing.T) { { name: "i64.load32u", operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileLoad32(wazeroir.NewOperationLoad32(false, arg)) + err := compiler.compileLoad32(operationPtr(wazeroir.NewOperationLoad32(false, arg))) require.NoError(t, err) }, loadedValueVerifyFn: func(t *testing.T, loadedValueAsUint64 uint64) { @@ -244,7 +244,7 @@ func TestCompiler_compileLoad(t *testing.T) { binary.LittleEndian.PutUint64(env.memory()[offset:], loadTargetValue) // Before load operation, we must push the base offset value. - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(baseOffset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(baseOffset))) require.NoError(t, err) tc.operationSetupFn(t, compiler) @@ -292,7 +292,7 @@ func TestCompiler_compileStore(t *testing.T) { name: "i32.store", targetSizeInBytes: 32 / 8, operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileStore(wazeroir.NewOperationStore(wazeroir.UnsignedTypeI32, arg)) + err := compiler.compileStore(operationPtr(wazeroir.NewOperationStore(wazeroir.UnsignedTypeI32, arg))) require.NoError(t, err) }, storedValueVerifyFn: func(t *testing.T, mem []byte) { @@ -304,7 +304,7 @@ func TestCompiler_compileStore(t *testing.T) { isFloatTarget: true, targetSizeInBytes: 32 / 8, operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileStore(wazeroir.NewOperationStore(wazeroir.UnsignedTypeF32, arg)) + err := compiler.compileStore(operationPtr(wazeroir.NewOperationStore(wazeroir.UnsignedTypeF32, arg))) require.NoError(t, err) }, storedValueVerifyFn: func(t *testing.T, mem []byte) { @@ -315,7 +315,7 @@ func TestCompiler_compileStore(t *testing.T) { name: "i64.store", targetSizeInBytes: 64 / 8, operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileStore(wazeroir.NewOperationStore(wazeroir.UnsignedTypeI64, arg)) + err := compiler.compileStore(operationPtr(wazeroir.NewOperationStore(wazeroir.UnsignedTypeI64, arg))) require.NoError(t, err) }, storedValueVerifyFn: func(t *testing.T, mem []byte) { @@ -327,7 +327,7 @@ func TestCompiler_compileStore(t *testing.T) { isFloatTarget: true, targetSizeInBytes: 64 / 8, operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileStore(wazeroir.NewOperationStore(wazeroir.UnsignedTypeF64, arg)) + err := compiler.compileStore(operationPtr(wazeroir.NewOperationStore(wazeroir.UnsignedTypeF64, arg))) require.NoError(t, err) }, storedValueVerifyFn: func(t *testing.T, mem []byte) { @@ -338,7 +338,7 @@ func TestCompiler_compileStore(t *testing.T) { name: "store8", targetSizeInBytes: 1, operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileStore8(wazeroir.NewOperationStore8(arg)) + err := compiler.compileStore8(operationPtr(wazeroir.NewOperationStore8(arg))) require.NoError(t, err) }, storedValueVerifyFn: func(t *testing.T, mem []byte) { @@ -349,7 +349,7 @@ func TestCompiler_compileStore(t *testing.T) { name: "store16", targetSizeInBytes: 16 / 8, operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileStore16(wazeroir.NewOperationStore16(arg)) + err := compiler.compileStore16(operationPtr(wazeroir.NewOperationStore16(arg))) require.NoError(t, err) }, storedValueVerifyFn: func(t *testing.T, mem []byte) { @@ -360,7 +360,7 @@ func TestCompiler_compileStore(t *testing.T) { name: "store32", targetSizeInBytes: 32 / 8, operationSetupFn: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileStore32(wazeroir.NewOperationStore32(arg)) + err := compiler.compileStore32(operationPtr(wazeroir.NewOperationStore32(arg))) require.NoError(t, err) }, storedValueVerifyFn: func(t *testing.T, mem []byte) { @@ -379,12 +379,12 @@ func TestCompiler_compileStore(t *testing.T) { require.NoError(t, err) // Before store operations, we must push the base offset, and the store target values. - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(baseOffset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(baseOffset))) require.NoError(t, err) if tc.isFloatTarget { - err = compiler.compileConstF64(wazeroir.NewOperationConstF64(math.Float64frombits(storeTargetValue))) + err = compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(math.Float64frombits(storeTargetValue)))) } else { - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(storeTargetValue)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(storeTargetValue))) } require.NoError(t, err) @@ -443,20 +443,20 @@ func TestCompiler_MemoryOutOfBounds(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(base)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(base))) require.NoError(t, err) arg := wazeroir.MemoryArg{Offset: offset} switch targetSizeInByte { case 1: - err = compiler.compileLoad8(wazeroir.NewOperationLoad8(wazeroir.SignedInt32, arg)) + err = compiler.compileLoad8(operationPtr(wazeroir.NewOperationLoad8(wazeroir.SignedInt32, arg))) case 2: - err = compiler.compileLoad16(wazeroir.NewOperationLoad16(wazeroir.SignedInt32, arg)) + err = compiler.compileLoad16(operationPtr(wazeroir.NewOperationLoad16(wazeroir.SignedInt32, arg))) case 4: - err = compiler.compileLoad32(wazeroir.NewOperationLoad32(false, arg)) + err = compiler.compileLoad32(operationPtr(wazeroir.NewOperationLoad32(false, arg))) case 8: - err = compiler.compileLoad(wazeroir.NewOperationLoad(wazeroir.UnsignedTypeF64, arg)) + err = compiler.compileLoad(operationPtr(wazeroir.NewOperationLoad(wazeroir.UnsignedTypeF64, arg))) default: t.Fail() } diff --git a/internal/engine/compiler/compiler_numeric_test.go b/internal/engine/compiler/compiler_numeric_test.go index 9b8d9c58..76f0aec6 100644 --- a/internal/engine/compiler/compiler_numeric_test.go +++ b/internal/engine/compiler/compiler_numeric_test.go @@ -44,15 +44,15 @@ func TestCompiler_compileConsts(t *testing.T) { switch op { case wazeroir.OperationKindConstI32: - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(val))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(val)))) case wazeroir.OperationKindConstI64: - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(val)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(val))) case wazeroir.OperationKindConstF32: - err = compiler.compileConstF32(wazeroir.NewOperationConstF32(math.Float32frombits(uint32(val)))) + err = compiler.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(math.Float32frombits(uint32(val))))) case wazeroir.OperationKindConstF64: - err = compiler.compileConstF64(wazeroir.NewOperationConstF64(math.Float64frombits(val))) + err = compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(math.Float64frombits(val)))) case wazeroir.OperationKindV128Const: - err = compiler.compileV128Const(wazeroir.OperationV128Const{Lo: val, Hi: ^val}) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(val, ^val))) } require.NoError(t, err) @@ -152,13 +152,13 @@ func TestCompiler_compile_Add_Sub_Mul(t *testing.T) { for _, v := range []uint64{x1, x2} { switch unsignedType { case wazeroir.UnsignedTypeI32: - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(v))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(v)))) case wazeroir.UnsignedTypeI64: - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(v)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(v))) case wazeroir.UnsignedTypeF32: - err = compiler.compileConstF32(wazeroir.NewOperationConstF32(math.Float32frombits(uint32(v)))) + err = compiler.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(math.Float32frombits(uint32(v))))) case wazeroir.UnsignedTypeF64: - err = compiler.compileConstF64(wazeroir.NewOperationConstF64(math.Float64frombits(v))) + err = compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(math.Float64frombits(v)))) } require.NoError(t, err) } @@ -169,11 +169,11 @@ func TestCompiler_compile_Add_Sub_Mul(t *testing.T) { // Emit the operation. switch kind { case wazeroir.OperationKindAdd: - err = compiler.compileAdd(wazeroir.NewOperationAdd(unsignedType)) + err = compiler.compileAdd(operationPtr(wazeroir.NewOperationAdd(unsignedType))) case wazeroir.OperationKindSub: - err = compiler.compileSub(wazeroir.NewOperationSub(unsignedType)) + err = compiler.compileSub(operationPtr(wazeroir.NewOperationSub(unsignedType))) case wazeroir.OperationKindMul: - err = compiler.compileMul(wazeroir.NewOperationMul(unsignedType)) + err = compiler.compileMul(operationPtr(wazeroir.NewOperationMul(unsignedType))) } require.NoError(t, err) @@ -323,16 +323,16 @@ func TestCompiler_compile_And_Or_Xor_Shl_Rotl_Rotr(t *testing.T) { var x1Location *runtimeValueLocation switch unsignedInt { case wazeroir.UnsignedInt32: - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(x1))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(x1)))) require.NoError(t, err) x1Location = compiler.runtimeValueLocationStack().peek() - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(x2)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(x2))) require.NoError(t, err) case wazeroir.UnsignedInt64: - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(x1)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(x1))) require.NoError(t, err) x1Location = compiler.runtimeValueLocationStack().peek() - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(x2)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(x2))) require.NoError(t, err) } @@ -346,17 +346,17 @@ func TestCompiler_compile_And_Or_Xor_Shl_Rotl_Rotr(t *testing.T) { // Emit the operation. switch kind { case wazeroir.OperationKindAnd: - err = compiler.compileAnd(wazeroir.NewOperationAnd(unsignedInt)) + err = compiler.compileAnd(operationPtr(wazeroir.NewOperationAnd(unsignedInt))) case wazeroir.OperationKindOr: - err = compiler.compileOr(wazeroir.NewOperationOr(unsignedInt)) + err = compiler.compileOr(operationPtr(wazeroir.NewOperationOr(unsignedInt))) case wazeroir.OperationKindXor: - err = compiler.compileXor(wazeroir.NewOperationXor(unsignedInt)) + err = compiler.compileXor(operationPtr(wazeroir.NewOperationXor(unsignedInt))) case wazeroir.OperationKindShl: - err = compiler.compileShl(wazeroir.NewOperationShl(unsignedInt)) + err = compiler.compileShl(operationPtr(wazeroir.NewOperationShl(unsignedInt))) case wazeroir.OperationKindRotl: - err = compiler.compileRotl(wazeroir.NewOperationRotl(unsignedInt)) + err = compiler.compileRotl(operationPtr(wazeroir.NewOperationRotl(unsignedInt))) case wazeroir.OperationKindRotr: - err = compiler.compileRotr(wazeroir.NewOperationRotr(unsignedInt)) + err = compiler.compileRotr(operationPtr(wazeroir.NewOperationRotr(unsignedInt))) } require.NoError(t, err) @@ -464,13 +464,13 @@ func TestCompiler_compileShr(t *testing.T) { for _, v := range []uint64{x1, x2} { switch signedInt { case wazeroir.SignedInt32: - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(int32(v)))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(int32(v))))) case wazeroir.SignedInt64: - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(v)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(v))) case wazeroir.SignedUint32: - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(v))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(v)))) case wazeroir.SignedUint64: - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(v)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(v))) } require.NoError(t, err) } @@ -479,7 +479,7 @@ func TestCompiler_compileShr(t *testing.T) { requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) // Emit the operation. - err = compiler.compileShr(wazeroir.NewOperationShr(signedInt)) + err = compiler.compileShr(operationPtr(wazeroir.NewOperationShr(signedInt))) require.NoError(t, err) // We consumed two values, but push the result back. @@ -593,15 +593,15 @@ func TestCompiler_compile_Le_Lt_Gt_Ge_Eq_Eqz_Ne(t *testing.T) { for _, v := range []uint64{x1, x2} { switch signedType { case wazeroir.SignedTypeUint32: - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(v))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(v)))) case wazeroir.SignedTypeInt32: - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(int32(v)))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(int32(v))))) case wazeroir.SignedTypeInt64, wazeroir.SignedTypeUint64: - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(v)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(v))) case wazeroir.SignedTypeFloat32: - err = compiler.compileConstF32(wazeroir.NewOperationConstF32(math.Float32frombits(uint32(v)))) + err = compiler.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(math.Float32frombits(uint32(v))))) case wazeroir.SignedTypeFloat64: - err = compiler.compileConstF64(wazeroir.NewOperationConstF64(math.Float64frombits(v))) + err = compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(math.Float64frombits(v)))) } require.NoError(t, err) } @@ -618,44 +618,44 @@ func TestCompiler_compile_Le_Lt_Gt_Ge_Eq_Eqz_Ne(t *testing.T) { // Emit the operation. switch kind { case wazeroir.OperationKindLe: - err = compiler.compileLe(wazeroir.NewOperationLe(signedType)) + err = compiler.compileLe(operationPtr(wazeroir.NewOperationLe(signedType))) case wazeroir.OperationKindLt: - err = compiler.compileLt(wazeroir.NewOperationLt(signedType)) + err = compiler.compileLt(operationPtr(wazeroir.NewOperationLt(signedType))) case wazeroir.OperationKindGe: - err = compiler.compileGe(wazeroir.NewOperationGe(signedType)) + err = compiler.compileGe(operationPtr(wazeroir.NewOperationGe(signedType))) case wazeroir.OperationKindGt: - err = compiler.compileGt(wazeroir.NewOperationGt(signedType)) + err = compiler.compileGt(operationPtr(wazeroir.NewOperationGt(signedType))) case wazeroir.OperationKindEq: // Eq uses UnsignedType instead, so we translate the signed one. switch signedType { case wazeroir.SignedTypeUint32, wazeroir.SignedTypeInt32: - err = compiler.compileEq(wazeroir.NewOperationEq(wazeroir.UnsignedTypeI32)) + err = compiler.compileEq(operationPtr(wazeroir.NewOperationEq(wazeroir.UnsignedTypeI32))) case wazeroir.SignedTypeUint64, wazeroir.SignedTypeInt64: - err = compiler.compileEq(wazeroir.NewOperationEq(wazeroir.UnsignedTypeI64)) + err = compiler.compileEq(operationPtr(wazeroir.NewOperationEq(wazeroir.UnsignedTypeI64))) case wazeroir.SignedTypeFloat32: - err = compiler.compileEq(wazeroir.NewOperationEq(wazeroir.UnsignedTypeF32)) + err = compiler.compileEq(operationPtr(wazeroir.NewOperationEq(wazeroir.UnsignedTypeF32))) case wazeroir.SignedTypeFloat64: - err = compiler.compileEq(wazeroir.NewOperationEq(wazeroir.UnsignedTypeF64)) + err = compiler.compileEq(operationPtr(wazeroir.NewOperationEq(wazeroir.UnsignedTypeF64))) } case wazeroir.OperationKindNe: // Ne uses UnsignedType, so we translate the signed one. switch signedType { case wazeroir.SignedTypeUint32, wazeroir.SignedTypeInt32: - err = compiler.compileNe(wazeroir.NewOperationNe(wazeroir.UnsignedTypeI32)) + err = compiler.compileNe(operationPtr(wazeroir.NewOperationNe(wazeroir.UnsignedTypeI32))) case wazeroir.SignedTypeUint64, wazeroir.SignedTypeInt64: - err = compiler.compileNe(wazeroir.NewOperationNe(wazeroir.UnsignedTypeI64)) + err = compiler.compileNe(operationPtr(wazeroir.NewOperationNe(wazeroir.UnsignedTypeI64))) case wazeroir.SignedTypeFloat32: - err = compiler.compileNe(wazeroir.NewOperationNe(wazeroir.UnsignedTypeF32)) + err = compiler.compileNe(operationPtr(wazeroir.NewOperationNe(wazeroir.UnsignedTypeF32))) case wazeroir.SignedTypeFloat64: - err = compiler.compileNe(wazeroir.NewOperationNe(wazeroir.UnsignedTypeF64)) + err = compiler.compileNe(operationPtr(wazeroir.NewOperationNe(wazeroir.UnsignedTypeF64))) } case wazeroir.OperationKindEqz: // Eqz uses UnsignedInt, so we translate the signed one. switch signedType { case wazeroir.SignedTypeUint32, wazeroir.SignedTypeInt32: - err = compiler.compileEqz(wazeroir.NewOperationEqz(wazeroir.UnsignedInt32)) + err = compiler.compileEqz(operationPtr(wazeroir.NewOperationEqz(wazeroir.UnsignedInt32))) case wazeroir.SignedTypeUint64, wazeroir.SignedTypeInt64: - err = compiler.compileEqz(wazeroir.NewOperationEqz(wazeroir.UnsignedInt64)) + err = compiler.compileEqz(operationPtr(wazeroir.NewOperationEqz(wazeroir.UnsignedInt64))) } } require.NoError(t, err) @@ -802,19 +802,19 @@ func TestCompiler_compile_Clz_Ctz_Popcnt(t *testing.T) { require.NoError(t, err) if is32bit { - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(v))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(v)))) } else { - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(v)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(v))) } require.NoError(t, err) switch kind { case wazeroir.OperationKindClz: - err = compiler.compileClz(wazeroir.NewOperationClz(tp)) + err = compiler.compileClz(operationPtr(wazeroir.NewOperationClz(tp))) case wazeroir.OperationKindCtz: - err = compiler.compileCtz(wazeroir.NewOperationCtz(tp)) + err = compiler.compileCtz(operationPtr(wazeroir.NewOperationCtz(tp))) case wazeroir.OperationKindPopcnt: - err = compiler.compilePopcnt(wazeroir.NewOperationPopcnt(tp)) + err = compiler.compilePopcnt(operationPtr(wazeroir.NewOperationPopcnt(tp))) } require.NoError(t, err) @@ -868,7 +868,7 @@ func TestCompiler_compile_Min_Max_Copysign(t *testing.T) { name: "min-32-bit", is32bit: true, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileMin(wazeroir.NewOperationMin(wazeroir.Float32)) + err := compiler.compileMin(operationPtr(wazeroir.NewOperationMin(wazeroir.Float32))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, x1, x2 float64, raw uint64) { @@ -885,7 +885,7 @@ func TestCompiler_compile_Min_Max_Copysign(t *testing.T) { name: "min-64-bit", is32bit: false, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileMin(wazeroir.NewOperationMin(wazeroir.Float64)) + err := compiler.compileMin(operationPtr(wazeroir.NewOperationMin(wazeroir.Float64))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, x1, x2 float64, raw uint64) { @@ -902,7 +902,7 @@ func TestCompiler_compile_Min_Max_Copysign(t *testing.T) { name: "max-32-bit", is32bit: true, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileMax(wazeroir.NewOperationMax(wazeroir.Float32)) + err := compiler.compileMax(operationPtr(wazeroir.NewOperationMax(wazeroir.Float32))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, x1, x2 float64, raw uint64) { @@ -919,7 +919,7 @@ func TestCompiler_compile_Min_Max_Copysign(t *testing.T) { name: "max-64-bit", is32bit: false, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileMax(wazeroir.NewOperationMax(wazeroir.Float64)) + err := compiler.compileMax(operationPtr(wazeroir.NewOperationMax(wazeroir.Float64))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, x1, x2 float64, raw uint64) { @@ -936,7 +936,7 @@ func TestCompiler_compile_Min_Max_Copysign(t *testing.T) { name: "copysign-32-bit", is32bit: true, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileCopysign(wazeroir.NewOperationCopysign(wazeroir.Float32)) + err := compiler.compileCopysign(operationPtr(wazeroir.NewOperationCopysign(wazeroir.Float32))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, x1, x2 float64, raw uint64) { @@ -953,7 +953,7 @@ func TestCompiler_compile_Min_Max_Copysign(t *testing.T) { name: "copysign-64-bit", is32bit: false, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileCopysign(wazeroir.NewOperationCopysign(wazeroir.Float64)) + err := compiler.compileCopysign(operationPtr(wazeroir.NewOperationCopysign(wazeroir.Float64))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, x1, x2 float64, raw uint64) { @@ -1014,14 +1014,14 @@ func TestCompiler_compile_Min_Max_Copysign(t *testing.T) { // Setup the target values. if tc.is32bit { - err := compiler.compileConstF32(wazeroir.NewOperationConstF32(float32(x1))) + err := compiler.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(float32(x1)))) require.NoError(t, err) - err = compiler.compileConstF32(wazeroir.NewOperationConstF32(float32(x2))) + err = compiler.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(float32(x2)))) require.NoError(t, err) } else { - err := compiler.compileConstF64(wazeroir.NewOperationConstF64(x1)) + err := compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(x1))) require.NoError(t, err) - err = compiler.compileConstF64(wazeroir.NewOperationConstF64(x2)) + err = compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(x2))) require.NoError(t, err) } @@ -1064,7 +1064,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "abs-32-bit", is32bit: true, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileAbs(wazeroir.NewOperationAbs(wazeroir.Float32)) + err := compiler.compileAbs(operationPtr(wazeroir.NewOperationAbs(wazeroir.Float32))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1081,7 +1081,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "abs-64-bit", is32bit: false, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileAbs(wazeroir.NewOperationAbs(wazeroir.Float64)) + err := compiler.compileAbs(operationPtr(wazeroir.NewOperationAbs(wazeroir.Float64))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1098,7 +1098,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "neg-32-bit", is32bit: true, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileNeg(wazeroir.NewOperationNeg(wazeroir.Float32)) + err := compiler.compileNeg(operationPtr(wazeroir.NewOperationNeg(wazeroir.Float32))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1115,7 +1115,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "neg-64-bit", is32bit: false, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileNeg(wazeroir.NewOperationNeg(wazeroir.Float64)) + err := compiler.compileNeg(operationPtr(wazeroir.NewOperationNeg(wazeroir.Float64))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1132,7 +1132,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "ceil-32-bit", is32bit: true, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileCeil(wazeroir.NewOperationCeil(wazeroir.Float32)) + err := compiler.compileCeil(operationPtr(wazeroir.NewOperationCeil(wazeroir.Float32))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1149,7 +1149,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "ceil-64-bit", is32bit: false, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileCeil(wazeroir.NewOperationCeil(wazeroir.Float64)) + err := compiler.compileCeil(operationPtr(wazeroir.NewOperationCeil(wazeroir.Float64))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1166,7 +1166,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "floor-32-bit", is32bit: true, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileFloor(wazeroir.NewOperationFloor(wazeroir.Float32)) + err := compiler.compileFloor(operationPtr(wazeroir.NewOperationFloor(wazeroir.Float32))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1183,7 +1183,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "floor-64-bit", is32bit: false, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileFloor(wazeroir.NewOperationFloor(wazeroir.Float64)) + err := compiler.compileFloor(operationPtr(wazeroir.NewOperationFloor(wazeroir.Float64))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1200,7 +1200,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "trunc-32-bit", is32bit: true, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileTrunc(wazeroir.NewOperationTrunc(wazeroir.Float32)) + err := compiler.compileTrunc(operationPtr(wazeroir.NewOperationTrunc(wazeroir.Float32))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1217,7 +1217,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "trunc-64-bit", is32bit: false, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileTrunc(wazeroir.NewOperationTrunc(wazeroir.Float64)) + err := compiler.compileTrunc(operationPtr(wazeroir.NewOperationTrunc(wazeroir.Float64))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1234,7 +1234,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "nearest-32-bit", is32bit: true, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileNearest(wazeroir.NewOperationNearest(wazeroir.Float32)) + err := compiler.compileNearest(operationPtr(wazeroir.NewOperationNearest(wazeroir.Float32))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1251,7 +1251,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "nearest-64-bit", is32bit: false, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileNearest(wazeroir.NewOperationNearest(wazeroir.Float64)) + err := compiler.compileNearest(operationPtr(wazeroir.NewOperationNearest(wazeroir.Float64))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1268,7 +1268,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "sqrt-32-bit", is32bit: true, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileSqrt(wazeroir.NewOperationSqrt(wazeroir.Float32)) + err := compiler.compileSqrt(operationPtr(wazeroir.NewOperationSqrt(wazeroir.Float32))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1285,7 +1285,7 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { name: "sqrt-64-bit", is32bit: false, setupFunc: func(t *testing.T, compiler compilerImpl) { - err := compiler.compileSqrt(wazeroir.NewOperationSqrt(wazeroir.Float64)) + err := compiler.compileSqrt(operationPtr(wazeroir.NewOperationSqrt(wazeroir.Float64))) require.NoError(t, err) }, verifyFunc: func(t *testing.T, v float64, raw uint64) { @@ -1321,10 +1321,10 @@ func TestCompiler_compile_Abs_Neg_Ceil_Floor_Trunc_Nearest_Sqrt(t *testing.T) { require.NoError(t, err) if tc.is32bit { - err := compiler.compileConstF32(wazeroir.NewOperationConstF32(float32(v))) + err := compiler.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(float32(v)))) require.NoError(t, err) } else { - err := compiler.compileConstF64(wazeroir.NewOperationConstF64(v)) + err := compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(v))) require.NoError(t, err) } @@ -1446,13 +1446,13 @@ func TestCompiler_compile_Div_Rem(t *testing.T) { require.NoError(t, err) env.stack()[loc.stackPointer] = uint64(v) case wazeroir.SignedTypeInt32: - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(int32(v)))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(int32(v))))) case wazeroir.SignedTypeInt64, wazeroir.SignedTypeUint64: - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(v)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(v))) case wazeroir.SignedTypeFloat32: - err = compiler.compileConstF32(wazeroir.NewOperationConstF32(math.Float32frombits(uint32(v)))) + err = compiler.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(math.Float32frombits(uint32(v))))) case wazeroir.SignedTypeFloat64: - err = compiler.compileConstF64(wazeroir.NewOperationConstF64(math.Float64frombits(v))) + err = compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(math.Float64frombits(v)))) } require.NoError(t, err) } @@ -1462,17 +1462,17 @@ func TestCompiler_compile_Div_Rem(t *testing.T) { switch kind { case wazeroir.OperationKindDiv: - err = compiler.compileDiv(wazeroir.NewOperationDiv(signedType)) + err = compiler.compileDiv(operationPtr(wazeroir.NewOperationDiv(signedType))) case wazeroir.OperationKindRem: switch signedType { case wazeroir.SignedTypeInt32: - err = compiler.compileRem(wazeroir.NewOperationRem(wazeroir.SignedInt32)) + err = compiler.compileRem(operationPtr(wazeroir.NewOperationRem(wazeroir.SignedInt32))) case wazeroir.SignedTypeInt64: - err = compiler.compileRem(wazeroir.NewOperationRem(wazeroir.SignedInt64)) + err = compiler.compileRem(operationPtr(wazeroir.NewOperationRem(wazeroir.SignedInt64))) case wazeroir.SignedTypeUint32: - err = compiler.compileRem(wazeroir.NewOperationRem(wazeroir.SignedUint32)) + err = compiler.compileRem(operationPtr(wazeroir.NewOperationRem(wazeroir.SignedUint32))) case wazeroir.SignedTypeUint64: - err = compiler.compileRem(wazeroir.NewOperationRem(wazeroir.SignedUint64)) + err = compiler.compileRem(operationPtr(wazeroir.NewOperationRem(wazeroir.SignedUint64))) case wazeroir.SignedTypeFloat32: // Rem undefined for float32. return diff --git a/internal/engine/compiler/compiler_post1_0_test.go b/internal/engine/compiler/compiler_post1_0_test.go index 40515a61..296a1eda 100644 --- a/internal/engine/compiler/compiler_post1_0_test.go +++ b/internal/engine/compiler/compiler_post1_0_test.go @@ -49,7 +49,7 @@ func TestCompiler_compileSignExtend(t *testing.T) { require.NoError(t, err) // Setup the promote target. - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(uint32(tc.in))) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(uint32(tc.in)))) require.NoError(t, err) if tc.fromKind == from8 { @@ -120,7 +120,7 @@ func TestCompiler_compileSignExtend(t *testing.T) { require.NoError(t, err) // Setup the promote target. - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(uint64(tc.in))) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(uint64(tc.in)))) require.NoError(t, err) if tc.fromKind == from8 { @@ -201,11 +201,11 @@ func TestCompiler_compileMemoryCopy(t *testing.T) { require.NoError(t, err) // Compile operands. - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.destOffset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.destOffset))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.sourceOffset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.sourceOffset))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.size)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.size))) require.NoError(t, err) err = compiler.compileMemoryCopy() @@ -285,11 +285,11 @@ func TestCompiler_compileMemoryFill(t *testing.T) { require.NoError(t, err) // Compile operands. - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.destOffset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.destOffset))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.v)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.v))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.size)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.size))) require.NoError(t, err) err = compiler.compileMemoryFill() @@ -349,9 +349,7 @@ func TestCompiler_compileDataDrop(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileDataDrop(wazeroir.OperationDataDrop{ - DataIndex: uint32(i), - }) + err = compiler.compileDataDrop(operationPtr(wazeroir.NewOperationDataDrop(uint32(i)))) require.NoError(t, err) // Generate the code under test. @@ -426,16 +424,14 @@ func TestCompiler_compileMemoryInit(t *testing.T) { require.NoError(t, err) // Compile operands. - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.destOffset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.destOffset))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.sourceOffset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.sourceOffset))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.copySize)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.copySize))) require.NoError(t, err) - err = compiler.compileMemoryInit(wazeroir.OperationMemoryInit{ - DataIndex: tc.dataIndex, - }) + err = compiler.compileMemoryInit(operationPtr(wazeroir.NewOperationMemoryInit(tc.dataIndex))) require.NoError(t, err) // Generate the code under test. @@ -490,9 +486,7 @@ func TestCompiler_compileElemDrop(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileElemDrop(wazeroir.OperationElemDrop{ - ElemIndex: uint32(i), - }) + err = compiler.compileElemDrop(operationPtr(wazeroir.NewOperationElemDrop(uint32(i)))) require.NoError(t, err) // Generate the code under test. @@ -563,14 +557,14 @@ func TestCompiler_compileTableCopy(t *testing.T) { require.NoError(t, err) // Compile operands. - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.destOffset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.destOffset))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.sourceOffset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.sourceOffset))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.size)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.size))) require.NoError(t, err) - err = compiler.compileTableCopy(wazeroir.OperationTableCopy{}) + err = compiler.compileTableCopy(operationPtr(wazeroir.NewOperationTableCopy(0, 0))) require.NoError(t, err) // Generate the code under test. @@ -655,16 +649,14 @@ func TestCompiler_compileTableInit(t *testing.T) { require.NoError(t, err) // Compile operands. - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.destOffset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.destOffset))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.sourceOffset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.sourceOffset))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.copySize)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.copySize))) require.NoError(t, err) - err = compiler.compileTableInit(wazeroir.OperationTableInit{ - ElemIndex: tc.elemIndex, - }) + err = compiler.compileTableInit(operationPtr(wazeroir.NewOperationTableInit(tc.elemIndex, 0))) require.NoError(t, err) // Setup the table. @@ -781,13 +773,13 @@ func TestCompiler_compileTableSet(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.offset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.offset))) require.NoError(t, err) - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(uint64(tc.in))) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(uint64(tc.in)))) require.NoError(t, err) - err = compiler.compileTableSet(wazeroir.OperationTableSet{TableIndex: tc.tableIndex}) + err = compiler.compileTableSet(operationPtr(wazeroir.NewOperationTableSet(tc.tableIndex))) require.NoError(t, err) // Generate the code under test. @@ -913,10 +905,10 @@ func TestCompiler_compileTableGet(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.offset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.offset))) require.NoError(t, err) - err = compiler.compileTableGet(wazeroir.OperationTableGet{TableIndex: tc.tableIndex}) + err = compiler.compileTableGet(operationPtr(wazeroir.NewOperationTableGet(tc.tableIndex))) require.NoError(t, err) // Generate the code under test. @@ -960,7 +952,7 @@ func TestCompiler_compileRefFunc(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileRefFunc(wazeroir.OperationRefFunc{FunctionIndex: uint32(i)}) + err = compiler.compileRefFunc(operationPtr(wazeroir.NewOperationRefFunc(uint32(i)))) require.NoError(t, err) // Generate the code under test. diff --git a/internal/engine/compiler/compiler_stack_test.go b/internal/engine/compiler/compiler_stack_test.go index 2db31654..fd4c574b 100644 --- a/internal/engine/compiler/compiler_stack_test.go +++ b/internal/engine/compiler/compiler_stack_test.go @@ -43,9 +43,9 @@ func TestCompiler_releaseRegisterToStack(t *testing.T) { compiler.setRuntimeValueLocationStack(s) if tc.isFloat { - err = compiler.compileConstF64(wazeroir.NewOperationConstF64(math.Float64frombits(val))) + err = compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(math.Float64frombits(val)))) } else { - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(val)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(val))) } require.NoError(t, err) // Release the register allocated value to the memory stack so that we can see the value after exiting. @@ -118,14 +118,14 @@ func TestCompiler_compileLoadValueOnStackToRegister(t *testing.T) { // To verify the behavior, increment the value on the register. if tc.isFloat { - err = compiler.compileConstF64(wazeroir.NewOperationConstF64(1)) + err = compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(1))) require.NoError(t, err) - err = compiler.compileAdd(wazeroir.NewOperationAdd(wazeroir.UnsignedTypeF64)) + err = compiler.compileAdd(operationPtr(wazeroir.NewOperationAdd(wazeroir.UnsignedTypeF64))) require.NoError(t, err) } else { - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(1)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(1))) require.NoError(t, err) - err = compiler.compileAdd(wazeroir.NewOperationAdd(wazeroir.UnsignedTypeI64)) + err = compiler.compileAdd(operationPtr(wazeroir.NewOperationAdd(wazeroir.UnsignedTypeI64))) require.NoError(t, err) } @@ -160,7 +160,7 @@ func TestCompiler_compileLoadValueOnStackToRegister(t *testing.T) { func TestCompiler_compilePick_v128(t *testing.T) { const pickTargetLo, pickTargetHi uint64 = 12345, 6789 - op := wazeroir.NewOperationPick(2, true) + op := operationPtr(wazeroir.NewOperationPick(2, true)) tests := []struct { name string isPickTargetOnRegister bool @@ -179,9 +179,7 @@ func TestCompiler_compilePick_v128(t *testing.T) { // Set up the stack before picking. if tc.isPickTargetOnRegister { - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: pickTargetLo, Hi: pickTargetHi, - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(pickTargetLo, pickTargetHi))) require.NoError(t, err) } else { lo := compiler.runtimeValueLocationStack().pushRuntimeValueLocationOnStack() // lo @@ -231,7 +229,7 @@ func TestCompiler_compilePick_v128(t *testing.T) { func TestCompiler_compilePick(t *testing.T) { const pickTargetValue uint64 = 12345 - op := wazeroir.NewOperationPick(1, false) + op := operationPtr(wazeroir.NewOperationPick(1, false)) tests := []struct { name string pickTargetSetupFunc func(compiler compilerImpl, ce *callEngine) error @@ -240,7 +238,7 @@ func TestCompiler_compilePick(t *testing.T) { { name: "float on register", pickTargetSetupFunc: func(compiler compilerImpl, _ *callEngine) error { - return compiler.compileConstF64(wazeroir.NewOperationConstF64(math.Float64frombits(pickTargetValue))) + return compiler.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(math.Float64frombits(pickTargetValue)))) }, isPickTargetFloat: true, isPickTargetOnRegister: true, @@ -248,7 +246,7 @@ func TestCompiler_compilePick(t *testing.T) { { name: "int on register", pickTargetSetupFunc: func(compiler compilerImpl, _ *callEngine) error { - return compiler.compileConstI64(wazeroir.NewOperationConstI64(pickTargetValue)) + return compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(pickTargetValue))) }, isPickTargetFloat: false, isPickTargetOnRegister: true, @@ -342,7 +340,7 @@ func TestCompiler_compileDrop(t *testing.T) { } requireRuntimeLocationStackPointerEqual(t, uint64(liveNum), compiler) - err = compiler.compileDrop(wazeroir.OperationDrop{Depth: nil}) + err = compiler.compileDrop(operationPtr(wazeroir.NewOperationDrop(nil))) require.NoError(t, err) // After the nil range drop, the stack must remain the same. @@ -372,7 +370,7 @@ func TestCompiler_compileDrop(t *testing.T) { const expectedTopLiveValue = 100 for i := 0; i < liveNum+dropTargetNum; i++ { if i == liveNum-1 { - err := compiler.compileConstI64(wazeroir.NewOperationConstI64(expectedTopLiveValue)) + err := compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(expectedTopLiveValue))) require.NoError(t, err) } else { compiler.runtimeValueLocationStack().pushRuntimeValueLocationOnStack() @@ -380,7 +378,7 @@ func TestCompiler_compileDrop(t *testing.T) { } requireRuntimeLocationStackPointerEqual(t, uint64(liveNum+dropTargetNum), compiler) - err = compiler.compileDrop(wazeroir.OperationDrop{Depth: r}) + err = compiler.compileDrop(operationPtr(wazeroir.NewOperationDrop(r))) require.NoError(t, err) // After the drop operation, the stack contains only live contents. @@ -430,12 +428,12 @@ func TestCompiler_compileDrop(t *testing.T) { // Place the top value. const expectedTopLiveValue = 100 - err = compiler.compileConstI64(wazeroir.NewOperationConstI64(expectedTopLiveValue)) + err = compiler.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(expectedTopLiveValue))) require.NoError(t, err) require.Equal(t, uint64(total), compiler.runtimeValueLocationStack().sp) - err = compiler.compileDrop(wazeroir.OperationDrop{Depth: r}) + err = compiler.compileDrop(operationPtr(wazeroir.NewOperationDrop(r))) require.NoError(t, err) // After the drop operation, the stack contains only live contents. @@ -567,20 +565,20 @@ func TestCompiler_compileSelect(t *testing.T) { err = compiler.compileEnsureOnRegister(c) require.NoError(t, err) } else if tc.condValueOnCondRegister { - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(0)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(0)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0))) require.NoError(t, err) if tc.selectX1 { - err = compiler.compileEq(wazeroir.NewOperationEq(wazeroir.UnsignedTypeI32)) + err = compiler.compileEq(operationPtr(wazeroir.NewOperationEq(wazeroir.UnsignedTypeI32))) } else { - err = compiler.compileNe(wazeroir.NewOperationNe(wazeroir.UnsignedTypeI32)) + err = compiler.compileNe(operationPtr(wazeroir.NewOperationNe(wazeroir.UnsignedTypeI32))) } require.NoError(t, err) } // Now emit code for select. - err = compiler.compileSelect(wazeroir.NewOperationSelect(false)) + err = compiler.compileSelect(operationPtr(wazeroir.NewOperationSelect(false))) require.NoError(t, err) // x1 should be top of the stack. @@ -629,7 +627,7 @@ func TestCompiler_compileSwap_v128(t *testing.T) { require.NoError(t, err) if tc.x1OnRegister { - err = compiler.compileV128Const(wazeroir.OperationV128Const{Lo: x1Lo, Hi: x1Hi}) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(x1Lo, x1Hi))) require.NoError(t, err) } else { lo := compiler.runtimeValueLocationStack().pushRuntimeValueLocationOnStack() // lo @@ -643,7 +641,7 @@ func TestCompiler_compileSwap_v128(t *testing.T) { _ = compiler.runtimeValueLocationStack().pushRuntimeValueLocationOnStack() // Dummy value! if tc.x2OnRegister { - err = compiler.compileV128Const(wazeroir.OperationV128Const{Lo: x2Lo, Hi: x2Hi}) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(x2Lo, x2Hi))) require.NoError(t, err) } else { lo := compiler.runtimeValueLocationStack().pushRuntimeValueLocationOnStack() // lo @@ -655,7 +653,7 @@ func TestCompiler_compileSwap_v128(t *testing.T) { } // Swap x1 and x2. - err = compiler.compileSet(wazeroir.NewOperationSet(4, true)) + err = compiler.compileSet(operationPtr(wazeroir.NewOperationSet(4, true))) require.NoError(t, err) require.NoError(t, compiler.compileReturnFunction()) @@ -720,17 +718,17 @@ func TestCompiler_compileSet(t *testing.T) { x1.valueType = runtimeValueTypeI32 env.stack()[x1.stackPointer] = uint64(x1Value) } else { - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(0)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(0)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0))) require.NoError(t, err) - err = compiler.compileEq(wazeroir.NewOperationEq(wazeroir.UnsignedTypeI32)) + err = compiler.compileEq(operationPtr(wazeroir.NewOperationEq(wazeroir.UnsignedTypeI32))) require.NoError(t, err) x1Value = 1 } // Set x2 into the x1. - err = compiler.compileSet(wazeroir.NewOperationSet(2, false)) + err = compiler.compileSet(operationPtr(wazeroir.NewOperationSet(2, false))) require.NoError(t, err) require.NoError(t, compiler.compileReturnFunction()) diff --git a/internal/engine/compiler/compiler_test.go b/internal/engine/compiler/compiler_test.go index 41751a44..89725c82 100644 --- a/internal/engine/compiler/compiler_test.go +++ b/internal/engine/compiler/compiler_test.go @@ -225,7 +225,7 @@ func (j *compilerEnv) requireNewCompiler(t *testing.T, fn func() compiler, ir *w if ir == nil { ir = &wazeroir.CompilationResult{ - LabelCallers: map[wazeroir.LabelID]uint32{}, + LabelCallers: map[wazeroir.Label]uint32{}, Signature: &wasm.FunctionType{}, } } @@ -286,3 +286,7 @@ func TestCompileI32WrapFromI64(t *testing.T) { require.NoError(t, err) require.Equal(t, runtimeValueTypeI32, loc.valueType) } + +func operationPtr(operation wazeroir.UnionOperation) *wazeroir.UnionOperation { + return &operation +} diff --git a/internal/engine/compiler/compiler_vec_test.go b/internal/engine/compiler/compiler_vec_test.go index 418b601c..2cb53c2e 100644 --- a/internal/engine/compiler/compiler_vec_test.go +++ b/internal/engine/compiler/compiler_vec_test.go @@ -71,19 +71,13 @@ func TestCompiler_compileV128Add(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128Add(wazeroir.OperationV128Add{Shape: tc.shape}) + err = compiler.compileV128Add(operationPtr(wazeroir.NewOperationV128Add(tc.shape))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -168,19 +162,13 @@ func TestCompiler_compileV128Sub(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128Sub(wazeroir.OperationV128Sub{Shape: tc.shape}) + err = compiler.compileV128Sub(operationPtr(wazeroir.NewOperationV128Sub(tc.shape))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -550,12 +538,10 @@ func TestCompiler_compileV128Load(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.offset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.offset))) require.NoError(t, err) - err = compiler.compileV128Load(wazeroir.OperationV128Load{ - Type: tc.loadType, Arg: wazeroir.MemoryArg{}, - }) + err = compiler.compileV128Load(operationPtr(wazeroir.NewOperationV128Load(tc.loadType, wazeroir.MemoryArg{}))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -752,18 +738,14 @@ func TestCompiler_compileV128LoadLane(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.offset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.offset))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: originalVecLo, - Hi: originalVecHi, - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(originalVecLo, originalVecHi))) require.NoError(t, err) - err = compiler.compileV128LoadLane(wazeroir.OperationV128LoadLane{ - LaneIndex: tc.laneIndex, LaneSize: tc.laneSize, Arg: wazeroir.MemoryArg{}, - }) + err = compiler.compileV128LoadLane( + operationPtr(wazeroir.NewOperationV128LoadLane(tc.laneIndex, tc.laneSize, wazeroir.MemoryArg{}))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -811,13 +793,13 @@ func TestCompiler_compileV128Store(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.offset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.offset))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{Lo: ^uint64(0), Hi: ^uint64(0)}) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(^uint64(0), ^uint64(0)))) require.NoError(t, err) - err = compiler.compileV128Store(wazeroir.OperationV128Store{Arg: wazeroir.MemoryArg{}}) + err = compiler.compileV128Store(operationPtr(wazeroir.NewOperationV128Store(wazeroir.MemoryArg{}))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(0), compiler) @@ -950,18 +932,13 @@ func TestCompiler_compileV128StoreLane(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.offset)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.offset))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(vecBytes[:8]), - Hi: binary.LittleEndian.Uint64(vecBytes[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(vecBytes[:8]), binary.LittleEndian.Uint64(vecBytes[8:])))) require.NoError(t, err) - err = compiler.compileV128StoreLane(wazeroir.OperationV128StoreLane{ - LaneIndex: tc.laneIndex, LaneSize: tc.laneSize, Arg: wazeroir.MemoryArg{}, - }) + err = compiler.compileV128StoreLane(operationPtr(wazeroir.NewOperationV128StoreLane(tc.laneIndex, tc.laneSize, wazeroir.MemoryArg{}))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(0), compiler) @@ -1122,17 +1099,10 @@ func TestCompiler_compileV128ExtractLane(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.vecBytes[:8]), - Hi: binary.LittleEndian.Uint64(tc.vecBytes[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.vecBytes[:8]), binary.LittleEndian.Uint64(tc.vecBytes[8:])))) require.NoError(t, err) - err = compiler.compileV128ExtractLane(wazeroir.OperationV128ExtractLane{ - LaneIndex: tc.laneIndex, - Signed: tc.signed, - Shape: tc.shape, - }) + err = compiler.compileV128ExtractLane(operationPtr(wazeroir.NewOperationV128ExtractLane(tc.laneIndex, tc.signed, tc.shape))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(1), compiler) @@ -1182,7 +1152,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeI8x16, laneIndex: 5, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI32(wazeroir.NewOperationConstI32(0xff)) + err := c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0xff))) require.NoError(t, err) }, exp: [16]byte{5: 0xff}, @@ -1192,7 +1162,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeI8x16, laneIndex: 5, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI32(wazeroir.NewOperationConstI32(0xff << 8)) + err := c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0xff << 8))) require.NoError(t, err) }, exp: [16]byte{}, @@ -1202,7 +1172,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeI8x16, laneIndex: 5, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI32(wazeroir.NewOperationConstI32(0xff)) + err := c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0xff))) require.NoError(t, err) }, exp: [16]byte{5: 0xff}, @@ -1212,7 +1182,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeI16x8, laneIndex: 0, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI32(wazeroir.NewOperationConstI32(0xee_ff)) + err := c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0xee_ff))) require.NoError(t, err) }, exp: [16]byte{0: 0xff, 1: 0xee}, @@ -1222,7 +1192,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeI16x8, laneIndex: 3, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI32(wazeroir.NewOperationConstI32(0xaa_00)) + err := c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0xaa_00))) require.NoError(t, err) }, exp: [16]byte{7: 0xaa}, @@ -1232,7 +1202,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeI16x8, laneIndex: 3, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI32(wazeroir.NewOperationConstI32(0xaa_bb << 16)) + err := c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0xaa_bb << 16))) require.NoError(t, err) }, exp: [16]byte{}, @@ -1242,7 +1212,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeI32x4, laneIndex: 0, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI32(wazeroir.NewOperationConstI32(0xaa_bb_cc_dd)) + err := c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0xaa_bb_cc_dd))) require.NoError(t, err) }, exp: [16]byte{0: 0xdd, 1: 0xcc, 2: 0xbb, 3: 0xaa}, @@ -1252,7 +1222,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeI32x4, laneIndex: 3, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI32(wazeroir.NewOperationConstI32(0xaa_bb_cc_dd)) + err := c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0xaa_bb_cc_dd))) require.NoError(t, err) }, exp: [16]byte{12: 0xdd, 13: 0xcc, 14: 0xbb, 15: 0xaa}, @@ -1262,7 +1232,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeI64x2, laneIndex: 0, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI64(wazeroir.NewOperationConstI64(0xaa_bb_cc_dd_01_02_03_04)) + err := c.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(0xaa_bb_cc_dd_01_02_03_04))) require.NoError(t, err) }, exp: [16]byte{0: 0x04, 1: 0x03, 2: 0x02, 3: 0x01, 4: 0xdd, 5: 0xcc, 6: 0xbb, 7: 0xaa}, @@ -1272,7 +1242,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeI64x2, laneIndex: 1, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI64(wazeroir.NewOperationConstI64(0xaa_bb_cc_dd_01_02_03_04)) + err := c.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(0xaa_bb_cc_dd_01_02_03_04))) require.NoError(t, err) }, exp: [16]byte{8: 0x04, 9: 0x03, 10: 0x02, 11: 0x01, 12: 0xdd, 13: 0xcc, 14: 0xbb, 15: 0xaa}, @@ -1282,7 +1252,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeF32x4, laneIndex: 0, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstF32(wazeroir.NewOperationConstF32(math.Float32frombits(0xaa_bb_cc_dd))) + err := c.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(math.Float32frombits(0xaa_bb_cc_dd)))) require.NoError(t, err) }, exp: [16]byte{0: 0xdd, 1: 0xcc, 2: 0xbb, 3: 0xaa}, @@ -1292,7 +1262,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeF32x4, laneIndex: 1, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstF32(wazeroir.NewOperationConstF32(math.Float32frombits(0xaa_bb_cc_dd))) + err := c.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(math.Float32frombits(0xaa_bb_cc_dd)))) require.NoError(t, err) }, exp: [16]byte{4: 0xdd, 5: 0xcc, 6: 0xbb, 7: 0xaa}, @@ -1302,7 +1272,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeF32x4, laneIndex: 2, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstF32(wazeroir.NewOperationConstF32(math.Float32frombits(0xaa_bb_cc_dd))) + err := c.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(math.Float32frombits(0xaa_bb_cc_dd)))) require.NoError(t, err) }, exp: [16]byte{8: 0xdd, 9: 0xcc, 10: 0xbb, 11: 0xaa}, @@ -1312,7 +1282,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeF32x4, laneIndex: 3, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstF32(wazeroir.NewOperationConstF32(math.Float32frombits(0xaa_bb_cc_dd))) + err := c.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(math.Float32frombits(0xaa_bb_cc_dd)))) require.NoError(t, err) }, exp: [16]byte{12: 0xdd, 13: 0xcc, 14: 0xbb, 15: 0xaa}, @@ -1322,7 +1292,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeF64x2, laneIndex: 0, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstF64(wazeroir.NewOperationConstF64(math.Float64frombits(0xaa_bb_cc_dd_01_02_03_04))) + err := c.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(math.Float64frombits(0xaa_bb_cc_dd_01_02_03_04)))) require.NoError(t, err) }, exp: [16]byte{0: 0x04, 1: 0x03, 2: 0x02, 3: 0x01, 4: 0xdd, 5: 0xcc, 6: 0xbb, 7: 0xaa}, @@ -1332,7 +1302,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeF64x2, laneIndex: 1, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstF64(wazeroir.NewOperationConstF64(math.Float64frombits(0xaa_bb_cc_dd_01_02_03_04))) + err := c.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(math.Float64frombits(0xaa_bb_cc_dd_01_02_03_04)))) require.NoError(t, err) }, exp: [16]byte{8: 0x04, 9: 0x03, 10: 0x02, 11: 0x01, 12: 0xdd, 13: 0xcc, 14: 0xbb, 15: 0xaa}, @@ -1342,7 +1312,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeF64x2, laneIndex: 0, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstF64(wazeroir.NewOperationConstF64(math.Float64frombits(0.0))) + err := c.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(math.Float64frombits(0.0)))) require.NoError(t, err) }, lo: math.Float64bits(1.0), @@ -1354,7 +1324,7 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { shape: wazeroir.ShapeF64x2, laneIndex: 1, originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstF64(wazeroir.NewOperationConstF64(math.Float64frombits(0.0))) + err := c.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(math.Float64frombits(0.0)))) require.NoError(t, err) }, lo: math.Float64bits(1.0), @@ -1373,15 +1343,12 @@ func TestCompiler_compileV128ReplaceLane(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{Lo: tc.lo, Hi: tc.hi}) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(tc.lo, tc.hi))) require.NoError(t, err) tc.originValueSetupFn(t, compiler) - err = compiler.compileV128ReplaceLane(wazeroir.OperationV128ReplaceLane{ - LaneIndex: tc.laneIndex, - Shape: tc.shape, - }) + err = compiler.compileV128ReplaceLane(operationPtr(wazeroir.NewOperationV128ReplaceLane(tc.laneIndex, tc.shape))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -1414,7 +1381,7 @@ func TestCompiler_compileV128Splat(t *testing.T) { { name: "i8x16", originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI32(wazeroir.NewOperationConstI32(0x1)) + err := c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0x1))) require.NoError(t, err) }, shape: wazeroir.ShapeI8x16, @@ -1423,7 +1390,7 @@ func TestCompiler_compileV128Splat(t *testing.T) { { name: "i16x8", originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI32(wazeroir.NewOperationConstI32(0xff_11)) + err := c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0xff_11))) require.NoError(t, err) }, shape: wazeroir.ShapeI16x8, @@ -1432,7 +1399,7 @@ func TestCompiler_compileV128Splat(t *testing.T) { { name: "i32x4", originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI32(wazeroir.NewOperationConstI32(0xff_11_ee_22)) + err := c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0xff_11_ee_22))) require.NoError(t, err) }, shape: wazeroir.ShapeI32x4, @@ -1441,7 +1408,7 @@ func TestCompiler_compileV128Splat(t *testing.T) { { name: "i64x2", originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstI64(wazeroir.NewOperationConstI64(0xff_00_ee_00_11_00_22_00)) + err := c.compileConstI64(operationPtr(wazeroir.NewOperationConstI64(0xff_00_ee_00_11_00_22_00))) require.NoError(t, err) }, shape: wazeroir.ShapeI64x2, @@ -1450,7 +1417,7 @@ func TestCompiler_compileV128Splat(t *testing.T) { { name: "f32x4", originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstF32(wazeroir.NewOperationConstF32(math.Float32frombits(0xff_11_ee_22))) + err := c.compileConstF32(operationPtr(wazeroir.NewOperationConstF32(math.Float32frombits(0xff_11_ee_22)))) require.NoError(t, err) }, shape: wazeroir.ShapeF32x4, @@ -1459,7 +1426,7 @@ func TestCompiler_compileV128Splat(t *testing.T) { { name: "f64x2", originValueSetupFn: func(t *testing.T, c compilerImpl) { - err := c.compileConstF64(wazeroir.NewOperationConstF64(math.Float64frombits(0xff_00_ee_00_11_00_22_00))) + err := c.compileConstF64(operationPtr(wazeroir.NewOperationConstF64(math.Float64frombits(0xff_00_ee_00_11_00_22_00)))) require.NoError(t, err) }, shape: wazeroir.ShapeF64x2, @@ -1479,7 +1446,7 @@ func TestCompiler_compileV128Splat(t *testing.T) { tc.originValueSetupFn(t, compiler) - err = compiler.compileV128Splat(wazeroir.OperationV128Splat{Shape: tc.shape}) + err = compiler.compileV128Splat(operationPtr(wazeroir.NewOperationV128Splat(tc.shape))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -1524,10 +1491,10 @@ func TestCompiler_compileV128AnyTrue(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{Lo: tc.lo, Hi: tc.hi}) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(tc.lo, tc.hi))) require.NoError(t, err) - err = compiler.compileV128AnyTrue(wazeroir.OperationV128AnyTrue{}) + err = compiler.compileV128AnyTrue(operationPtr(wazeroir.NewOperationV128AnyTrue())) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(1), compiler) @@ -1686,10 +1653,10 @@ func TestCompiler_compileV128AllTrue(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{Lo: tc.lo, Hi: tc.hi}) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(tc.lo, tc.hi))) require.NoError(t, err) - err = compiler.compileV128AllTrue(wazeroir.OperationV128AllTrue{Shape: tc.shape}) + err = compiler.compileV128AllTrue(operationPtr(wazeroir.NewOperationV128AllTrue(tc.shape))) require.NoError(t, err) require.Equal(t, 0, len(compiler.runtimeValueLocationStack().usedRegisters.list())) @@ -1782,19 +1749,13 @@ func TestCompiler_compileV128Swizzle(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.baseVec[:8]), - Hi: binary.LittleEndian.Uint64(tc.baseVec[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.baseVec[:8]), binary.LittleEndian.Uint64(tc.baseVec[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.indexVec[:8]), - Hi: binary.LittleEndian.Uint64(tc.indexVec[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.indexVec[:8]), binary.LittleEndian.Uint64(tc.indexVec[8:])))) require.NoError(t, err) - err = compiler.compileV128Swizzle(wazeroir.OperationV128Swizzle{}) + err = compiler.compileV128Swizzle(operationPtr(wazeroir.NewOperationV128Swizzle())) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -1821,12 +1782,13 @@ func TestCompiler_compileV128Swizzle(t *testing.T) { func TestCompiler_compileV128Shuffle(t *testing.T) { tests := []struct { - name string - lanes, w, v, exp [16]byte + name string + lanes []uint64 + w, v, exp [16]byte }{ { name: "v only", - lanes: [16]byte{1, 1, 1, 1, 0, 0, 0, 0, 10, 10, 10, 10, 0, 0, 0, 0}, + lanes: []uint64{1, 1, 1, 1, 0, 0, 0, 0, 10, 10, 10, 10, 0, 0, 0, 0}, v: [16]byte{0: 0xa, 1: 0xb, 10: 0xc}, w: [16]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, exp: [16]byte{ @@ -1838,7 +1800,7 @@ func TestCompiler_compileV128Shuffle(t *testing.T) { }, { name: "w only", - lanes: [16]byte{17, 17, 17, 17, 16, 16, 16, 16, 26, 26, 26, 26, 16, 16, 16, 16}, + lanes: []uint64{17, 17, 17, 17, 16, 16, 16, 16, 26, 26, 26, 26, 16, 16, 16, 16}, v: [16]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, w: [16]byte{0: 0xa, 1: 0xb, 10: 0xc}, exp: [16]byte{ @@ -1850,7 +1812,7 @@ func TestCompiler_compileV128Shuffle(t *testing.T) { }, { name: "mix", - lanes: [16]byte{0, 17, 2, 19, 4, 21, 6, 23, 8, 25, 10, 27, 12, 29, 14, 31}, + lanes: []uint64{0, 17, 2, 19, 4, 21, 6, 23, 8, 25, 10, 27, 12, 29, 14, 31}, v: [16]byte{ 0x1, 0xff, 0x2, 0xff, 0x3, 0xff, 0x4, 0xff, 0x5, 0xff, 0x6, 0xff, 0x7, 0xff, 0x8, 0xff, @@ -1866,7 +1828,7 @@ func TestCompiler_compileV128Shuffle(t *testing.T) { }, { name: "mix", - lanes: [16]byte{0, 17, 2, 19, 4, 21, 6, 23, 8, 25, 10, 27, 12, 29, 14, 31}, + lanes: []uint64{0, 17, 2, 19, 4, 21, 6, 23, 8, 25, 10, 27, 12, 29, 14, 31}, v: [16]byte{ 0x1, 0xff, 0x2, 0xff, 0x3, 0xff, 0x4, 0xff, 0x5, 0xff, 0x6, 0xff, 0x7, 0xff, 0x8, 0xff, @@ -1892,19 +1854,13 @@ func TestCompiler_compileV128Shuffle(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.w[:8]), - Hi: binary.LittleEndian.Uint64(tc.w[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.w[:8]), binary.LittleEndian.Uint64(tc.w[8:])))) require.NoError(t, err) - err = compiler.compileV128Shuffle(wazeroir.OperationV128Shuffle{Lanes: tc.lanes}) + err = compiler.compileV128Shuffle(operationPtr(wazeroir.NewOperationV128Shuffle(tc.lanes))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -2033,13 +1989,10 @@ func TestCompiler_compileV128Bitmask(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) - err = compiler.compileV128BitMask(wazeroir.OperationV128BitMask{Shape: tc.shape}) + err = compiler.compileV128BitMask(operationPtr(wazeroir.NewOperationV128BitMask(tc.shape))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(1), compiler) @@ -2070,13 +2023,10 @@ func TestCompiler_compileV128_Not(t *testing.T) { var originalLo, originalHi uint64 = 0xffff_0000_ffff_0000, 0x0000_ffff_0000_ffff - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: originalLo, - Hi: originalHi, - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(originalLo, originalHi))) require.NoError(t, err) - err = compiler.compileV128Not(wazeroir.OperationV128Not{}) + err = compiler.compileV128Not(operationPtr(wazeroir.NewOperationV128Not())) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -2282,27 +2232,21 @@ func TestCompiler_compileV128_And_Or_Xor_AndNot(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) switch tc.op { case wazeroir.OperationKindV128And: - err = compiler.compileV128And(wazeroir.OperationV128And{}) + err = compiler.compileV128And(operationPtr(wazeroir.NewOperationV128And())) case wazeroir.OperationKindV128Or: - err = compiler.compileV128Or(wazeroir.OperationV128Or{}) + err = compiler.compileV128Or(operationPtr(wazeroir.NewOperationV128Or())) case wazeroir.OperationKindV128Xor: - err = compiler.compileV128Xor(wazeroir.OperationV128Xor{}) + err = compiler.compileV128Xor(operationPtr(wazeroir.NewOperationV128Xor())) case wazeroir.OperationKindV128AndNot: - err = compiler.compileV128AndNot(wazeroir.OperationV128AndNot{}) + err = compiler.compileV128AndNot(operationPtr(wazeroir.NewOperationV128AndNot())) } require.NoError(t, err) @@ -2379,25 +2323,16 @@ func TestCompiler_compileV128Bitselect(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.selector[:8]), - Hi: binary.LittleEndian.Uint64(tc.selector[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.selector[:8]), binary.LittleEndian.Uint64(tc.selector[8:])))) require.NoError(t, err) - err = compiler.compileV128Bitselect(wazeroir.OperationV128Bitselect{}) + err = compiler.compileV128Bitselect(operationPtr(wazeroir.NewOperationV128Bitselect())) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -2673,16 +2608,13 @@ func TestCompiler_compileV128Shl(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x[:8]), - Hi: binary.LittleEndian.Uint64(tc.x[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x[:8]), binary.LittleEndian.Uint64(tc.x[8:])))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.s)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.s))) require.NoError(t, err) - err = compiler.compileV128Shl(wazeroir.OperationV128Shl{Shape: tc.shape}) + err = compiler.compileV128Shl(operationPtr(wazeroir.NewOperationV128Shl(tc.shape))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -2949,16 +2881,13 @@ func TestCompiler_compileV128Shr(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x[:8]), - Hi: binary.LittleEndian.Uint64(tc.x[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x[:8]), binary.LittleEndian.Uint64(tc.x[8:])))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(tc.s)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(tc.s))) require.NoError(t, err) - err = compiler.compileV128Shr(wazeroir.OperationV128Shr{Shape: tc.shape, Signed: tc.signed}) + err = compiler.compileV128Shr(operationPtr(wazeroir.NewOperationV128Shr(tc.shape, tc.signed))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -3381,19 +3310,13 @@ func TestCompiler_compileV128Cmp(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128Cmp(wazeroir.OperationV128Cmp{Type: tc.cmpType}) + err = compiler.compileV128Cmp(operationPtr(wazeroir.NewOperationV128Cmp(tc.cmpType))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -3462,19 +3385,13 @@ func TestCompiler_compileV128AvgrU(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128AvgrU(wazeroir.OperationV128AvgrU{Shape: tc.shape}) + err = compiler.compileV128AvgrU(operationPtr(wazeroir.NewOperationV128AvgrU(tc.shape))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -3534,13 +3451,10 @@ func TestCompiler_compileV128Sqrt(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) - err = compiler.compileV128Sqrt(wazeroir.OperationV128Sqrt{Shape: tc.shape}) + err = compiler.compileV128Sqrt(operationPtr(wazeroir.NewOperationV128Sqrt(tc.shape))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -3618,19 +3532,13 @@ func TestCompiler_compileV128Mul(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128Mul(wazeroir.OperationV128Mul{Shape: tc.shape}) + err = compiler.compileV128Mul(operationPtr(wazeroir.NewOperationV128Mul(tc.shape))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -3721,13 +3629,10 @@ func TestCompiler_compileV128Neg(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) - err = compiler.compileV128Neg(wazeroir.OperationV128Neg{Shape: tc.shape}) + err = compiler.compileV128Neg(operationPtr(wazeroir.NewOperationV128Neg(tc.shape))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -3818,13 +3723,10 @@ func TestCompiler_compileV128Abs(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) - err = compiler.compileV128Abs(wazeroir.OperationV128Abs{Shape: tc.shape}) + err = compiler.compileV128Abs(operationPtr(wazeroir.NewOperationV128Abs(tc.shape))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -3888,19 +3790,13 @@ func TestCompiler_compileV128Div(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128Div(wazeroir.OperationV128Div{Shape: tc.shape}) + err = compiler.compileV128Div(operationPtr(wazeroir.NewOperationV128Div(tc.shape))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -4080,19 +3976,13 @@ func TestCompiler_compileV128Min(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128Min(wazeroir.OperationV128Min{Shape: tc.shape, Signed: tc.signed}) + err = compiler.compileV128Min(operationPtr(wazeroir.NewOperationV128Min(tc.shape, tc.signed))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -4307,19 +4197,13 @@ func TestCompiler_compileV128Max(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128Max(wazeroir.OperationV128Max{Shape: tc.shape, Signed: tc.signed}) + err = compiler.compileV128Max(operationPtr(wazeroir.NewOperationV128Max(tc.shape, tc.signed))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -4448,19 +4332,13 @@ func TestCompiler_compileV128AddSat(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128AddSat(wazeroir.OperationV128AddSat{Shape: tc.shape, Signed: tc.signed}) + err = compiler.compileV128AddSat(operationPtr(wazeroir.NewOperationV128AddSat(tc.shape, tc.signed))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -4560,19 +4438,13 @@ func TestCompiler_compileV128SubSat(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128SubSat(wazeroir.OperationV128SubSat{Shape: tc.shape, Signed: tc.signed}) + err = compiler.compileV128SubSat(operationPtr(wazeroir.NewOperationV128SubSat(tc.shape, tc.signed))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -4636,13 +4508,10 @@ func TestCompiler_compileV128Popcnt(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) - err = compiler.compileV128Popcnt(wazeroir.OperationV128Popcnt{}) + err = compiler.compileV128Popcnt(operationPtr(wazeroir.NewOperationV128Popcnt(0))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -4806,22 +4675,19 @@ func TestCompiler_compileV128Round(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) is32bit := tc.shape == wazeroir.ShapeF32x4 switch tc.kind { case wazeroir.OperationKindV128Ceil: - err = compiler.compileV128Ceil(wazeroir.OperationV128Ceil{Shape: tc.shape}) + err = compiler.compileV128Ceil(operationPtr(wazeroir.NewOperationV128Ceil(tc.shape))) case wazeroir.OperationKindV128Floor: - err = compiler.compileV128Floor(wazeroir.OperationV128Floor{Shape: tc.shape}) + err = compiler.compileV128Floor(operationPtr(wazeroir.NewOperationV128Floor(tc.shape))) case wazeroir.OperationKindV128Trunc: - err = compiler.compileV128Trunc(wazeroir.OperationV128Trunc{Shape: tc.shape}) + err = compiler.compileV128Trunc(operationPtr(wazeroir.NewOperationV128Trunc(tc.shape))) case wazeroir.OperationKindV128Nearest: - err = compiler.compileV128Nearest(wazeroir.OperationV128Nearest{Shape: tc.shape}) + err = compiler.compileV128Nearest(operationPtr(wazeroir.NewOperationV128Nearest(tc.shape))) } require.NoError(t, err) @@ -5096,24 +4962,18 @@ func TestCompiler_compileV128_Pmax_Pmin(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) is32bit := tc.shape == wazeroir.ShapeF32x4 switch tc.kind { case wazeroir.OperationKindV128Pmin: - err = compiler.compileV128Pmin(wazeroir.OperationV128Pmin{Shape: tc.shape}) + err = compiler.compileV128Pmin(operationPtr(wazeroir.NewOperationV128Pmin(tc.shape))) case wazeroir.OperationKindV128Pmax: - err = compiler.compileV128Pmax(wazeroir.OperationV128Pmax{Shape: tc.shape}) + err = compiler.compileV128Pmax(operationPtr(wazeroir.NewOperationV128Pmax(tc.shape))) } require.NoError(t, err) @@ -5795,21 +5655,13 @@ func TestCompiler_compileV128ExtMul(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128ExtMul(wazeroir.OperationV128ExtMul{ - OriginShape: tc.shape, Signed: tc.signed, UseLow: tc.useLow, - }) + err = compiler.compileV128ExtMul(operationPtr(wazeroir.NewOperationV128ExtMul(tc.shape, tc.signed, tc.useLow))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -6274,15 +6126,10 @@ func TestCompiler_compileV128Extend(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) - err = compiler.compileV128Extend(wazeroir.OperationV128Extend{ - OriginShape: tc.shape, Signed: tc.signed, UseLow: tc.useLow, - }) + err = compiler.compileV128Extend(operationPtr(wazeroir.NewOperationV128Extend(tc.shape, tc.signed, tc.useLow))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -6354,19 +6201,13 @@ func TestCompiler_compileV128Q15mulrSatS(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128Q15mulrSatS(wazeroir.OperationV128Q15mulrSatS{}) + err = compiler.compileV128Q15mulrSatS(operationPtr(wazeroir.NewOperationV128Q15mulrSatS())) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -6433,13 +6274,10 @@ func TestCompiler_compileFloatPromote(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) - err = compiler.compileV128FloatPromote(wazeroir.OperationV128FloatPromote{}) + err = compiler.compileV128FloatPromote(operationPtr(wazeroir.NewOperationV128FloatPromote())) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -6517,13 +6355,10 @@ func TestCompiler_compileV128FloatDemote(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) - err = compiler.compileV128FloatDemote(wazeroir.OperationV128FloatDemote{}) + err = compiler.compileV128FloatDemote(operationPtr(wazeroir.NewOperationV128FloatDemote())) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -6725,15 +6560,10 @@ func TestCompiler_compileV128ExtAddPairwise(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) - err = compiler.compileV128ExtAddPairwise(wazeroir.OperationV128ExtAddPairwise{ - OriginShape: tc.shape, Signed: tc.signed, - }) + err = compiler.compileV128ExtAddPairwise(operationPtr(wazeroir.NewOperationV128ExtAddPairwise(tc.shape, tc.signed))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -6967,21 +6797,13 @@ func TestCompiler_compileV128Narrow(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128Narrow(wazeroir.OperationV128Narrow{ - OriginShape: tc.shape, Signed: tc.signed, - }) + err = compiler.compileV128Narrow(operationPtr(wazeroir.NewOperationV128Narrow(tc.shape, tc.signed))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -7109,16 +6931,10 @@ func TestCompiler_compileV128FConvertFromI(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) - err = compiler.compileV128FConvertFromI(wazeroir.OperationV128FConvertFromI{ - DestinationShape: tc.destShape, - Signed: tc.signed, - }) + err = compiler.compileV128FConvertFromI(operationPtr(wazeroir.NewOperationV128FConvertFromI(tc.destShape, tc.signed))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -7178,19 +6994,13 @@ func TestCompiler_compileV128Dot(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x2[:8]), - Hi: binary.LittleEndian.Uint64(tc.x2[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x2[:8]), binary.LittleEndian.Uint64(tc.x2[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.x1[:8]), - Hi: binary.LittleEndian.Uint64(tc.x1[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.x1[:8]), binary.LittleEndian.Uint64(tc.x1[8:])))) require.NoError(t, err) - err = compiler.compileV128Dot(wazeroir.OperationV128Dot{}) + err = compiler.compileV128Dot(operationPtr(wazeroir.NewOperationV128Dot())) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -7332,16 +7142,10 @@ func TestCompiler_compileV128ITruncSatFromF(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) - err = compiler.compileV128ITruncSatFromF(wazeroir.OperationV128ITruncSatFromF{ - OriginShape: tc.originShape, - Signed: tc.signed, - }) + err = compiler.compileV128ITruncSatFromF(operationPtr(wazeroir.NewOperationV128ITruncSatFromF(tc.originShape, tc.signed))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) @@ -7379,22 +7183,16 @@ func TestCompiler_compileSelect_v128(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: x1Lo, - Hi: x1Hi, - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(x1Lo, x1Hi))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: x2Lo, - Hi: x2Hi, - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(x2Lo, x2Hi))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(selector)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(selector))) require.NoError(t, err) - err = compiler.compileSelect(wazeroir.NewOperationSelect(true)) + err = compiler.compileSelect(operationPtr(wazeroir.NewOperationSelect(true))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, uint64(2), compiler) diff --git a/internal/engine/compiler/engine.go b/internal/engine/compiler/engine.go index 27245221..a3e9abf2 100644 --- a/internal/engine/compiler/engine.go +++ b/internal/engine/compiler/engine.go @@ -1069,7 +1069,8 @@ func compileWasmFunction(cmp compiler, ir *wazeroir.CompilationResult) (*code, e } var skip bool - for i, op := range ir.Operations { + for i := range ir.Operations { + op := &ir.Operations[i] if needSourceOffsets { // If this compilation requires source offsets for DWARF based back trace, // we emit a NOP node at the beginning of each IR operation to get the @@ -1080,315 +1081,301 @@ func compileWasmFunction(cmp compiler, ir *wazeroir.CompilationResult) (*code, e // Compiler determines whether skip the entire label. // For example, if the label doesn't have any caller, // we don't need to generate native code at all as we never reach the region. - if op.Kind() == wazeroir.OperationKindLabel { - skip = cmp.compileLabel(op.(wazeroir.OperationLabel)) + if op.Kind == wazeroir.OperationKindLabel { + skip = cmp.compileLabel(op) } if skip { continue } if false { - fmt.Printf("compiling op=%s: %s\n", op.Kind(), cmp) + fmt.Printf("compiling op=%s: %s\n", op.Kind, cmp) } var err error - switch o := op.(type) { - case wazeroir.OperationLabel: - // Label op is already handled ^^. - case wazeroir.OperationBr: - err = cmp.compileBr(o) - case wazeroir.OperationBrIf: - err = cmp.compileBrIf(o) - case wazeroir.OperationBrTable: - err = cmp.compileBrTable(o) - case wazeroir.OperationDrop: - err = cmp.compileDrop(o) - case wazeroir.OperationITruncFromF: - err = cmp.compileITruncFromF(o) - case wazeroir.OperationFConvertFromI: - err = cmp.compileFConvertFromI(o) - case wazeroir.OperationExtend: - err = cmp.compileExtend(o) - case wazeroir.OperationDataDrop: - err = cmp.compileDataDrop(o) - case wazeroir.OperationMemoryInit: - err = cmp.compileMemoryInit(o) - case wazeroir.OperationTableInit: - err = cmp.compileTableInit(o) - case wazeroir.OperationTableCopy: - err = cmp.compileTableCopy(o) - case wazeroir.OperationElemDrop: - err = cmp.compileElemDrop(o) - case wazeroir.OperationRefFunc: - err = cmp.compileRefFunc(o) - case wazeroir.OperationTableGet: - err = cmp.compileTableGet(o) - case wazeroir.OperationTableSet: - err = cmp.compileTableSet(o) - case wazeroir.OperationTableGrow: - err = cmp.compileTableGrow(o) - case wazeroir.OperationTableSize: - err = cmp.compileTableSize(o) - case wazeroir.OperationTableFill: - err = cmp.compileTableFill(o) - case wazeroir.OperationV128Const: - err = cmp.compileV128Const(o) - case wazeroir.OperationV128Add: - err = cmp.compileV128Add(o) - case wazeroir.OperationV128Sub: - err = cmp.compileV128Sub(o) - case wazeroir.OperationV128Load: - err = cmp.compileV128Load(o) - case wazeroir.OperationV128LoadLane: - err = cmp.compileV128LoadLane(o) - case wazeroir.OperationV128Store: - err = cmp.compileV128Store(o) - case wazeroir.OperationV128StoreLane: - err = cmp.compileV128StoreLane(o) - case wazeroir.OperationV128ExtractLane: - err = cmp.compileV128ExtractLane(o) - case wazeroir.OperationV128ReplaceLane: - err = cmp.compileV128ReplaceLane(o) - case wazeroir.OperationV128Splat: - err = cmp.compileV128Splat(o) - case wazeroir.OperationV128Shuffle: - err = cmp.compileV128Shuffle(o) - case wazeroir.OperationV128Swizzle: - err = cmp.compileV128Swizzle(o) - case wazeroir.OperationV128AnyTrue: - err = cmp.compileV128AnyTrue(o) - case wazeroir.OperationV128AllTrue: - err = cmp.compileV128AllTrue(o) - case wazeroir.OperationV128BitMask: - err = cmp.compileV128BitMask(o) - case wazeroir.OperationV128And: - err = cmp.compileV128And(o) - case wazeroir.OperationV128Not: - err = cmp.compileV128Not(o) - case wazeroir.OperationV128Or: - err = cmp.compileV128Or(o) - case wazeroir.OperationV128Xor: - err = cmp.compileV128Xor(o) - case wazeroir.OperationV128Bitselect: - err = cmp.compileV128Bitselect(o) - case wazeroir.OperationV128AndNot: - err = cmp.compileV128AndNot(o) - case wazeroir.OperationV128Shr: - err = cmp.compileV128Shr(o) - case wazeroir.OperationV128Shl: - err = cmp.compileV128Shl(o) - case wazeroir.OperationV128Cmp: - err = cmp.compileV128Cmp(o) - case wazeroir.OperationV128AddSat: - err = cmp.compileV128AddSat(o) - case wazeroir.OperationV128SubSat: - err = cmp.compileV128SubSat(o) - case wazeroir.OperationV128Mul: - err = cmp.compileV128Mul(o) - case wazeroir.OperationV128Div: - err = cmp.compileV128Div(o) - case wazeroir.OperationV128Neg: - err = cmp.compileV128Neg(o) - case wazeroir.OperationV128Sqrt: - err = cmp.compileV128Sqrt(o) - case wazeroir.OperationV128Abs: - err = cmp.compileV128Abs(o) - case wazeroir.OperationV128Popcnt: - err = cmp.compileV128Popcnt(o) - case wazeroir.OperationV128Min: - err = cmp.compileV128Min(o) - case wazeroir.OperationV128Max: - err = cmp.compileV128Max(o) - case wazeroir.OperationV128AvgrU: - err = cmp.compileV128AvgrU(o) - case wazeroir.OperationV128Pmin: - err = cmp.compileV128Pmin(o) - case wazeroir.OperationV128Pmax: - err = cmp.compileV128Pmax(o) - case wazeroir.OperationV128Ceil: - err = cmp.compileV128Ceil(o) - case wazeroir.OperationV128Floor: - err = cmp.compileV128Floor(o) - case wazeroir.OperationV128Trunc: - err = cmp.compileV128Trunc(o) - case wazeroir.OperationV128Nearest: - err = cmp.compileV128Nearest(o) - case wazeroir.OperationV128Extend: - err = cmp.compileV128Extend(o) - case wazeroir.OperationV128ExtMul: - err = cmp.compileV128ExtMul(o) - case wazeroir.OperationV128Q15mulrSatS: - err = cmp.compileV128Q15mulrSatS(o) - case wazeroir.OperationV128ExtAddPairwise: - err = cmp.compileV128ExtAddPairwise(o) - case wazeroir.OperationV128FloatPromote: - err = cmp.compileV128FloatPromote(o) - case wazeroir.OperationV128FloatDemote: - err = cmp.compileV128FloatDemote(o) - case wazeroir.OperationV128FConvertFromI: - err = cmp.compileV128FConvertFromI(o) - case wazeroir.OperationV128Dot: - err = cmp.compileV128Dot(o) - case wazeroir.OperationV128Narrow: - err = cmp.compileV128Narrow(o) - case wazeroir.OperationV128ITruncSatFromF: - err = cmp.compileV128ITruncSatFromF(o) - case wazeroir.UnionOperation: - switch op.Kind() { - case wazeroir.OperationKindUnreachable: - err = cmp.compileUnreachable() - case wazeroir.OperationKindCall: - err = cmp.compileCall(o) - case wazeroir.OperationKindCallIndirect: - err = cmp.compileCallIndirect(o) - - case wazeroir.OperationKindSelect: - err = cmp.compileSelect(o) - case wazeroir.OperationKindPick: - err = cmp.compilePick(o) - case wazeroir.OperationKindSet: - err = cmp.compileSet(o) - case wazeroir.OperationKindGlobalGet: - err = cmp.compileGlobalGet(o) - case wazeroir.OperationKindGlobalSet: - err = cmp.compileGlobalSet(o) - case wazeroir.OperationKindLoad: - err = cmp.compileLoad(o) - case wazeroir.OperationKindLoad8: - err = cmp.compileLoad8(o) - case wazeroir.OperationKindLoad16: - err = cmp.compileLoad16(o) - case wazeroir.OperationKindLoad32: - err = cmp.compileLoad32(o) - case wazeroir.OperationKindStore: - err = cmp.compileStore(o) - case wazeroir.OperationKindStore8: - err = cmp.compileStore8(o) - case wazeroir.OperationKindStore16: - err = cmp.compileStore16(o) - case wazeroir.OperationKindStore32: - err = cmp.compileStore32(o) - - case wazeroir.OperationKindMemorySize: - err = cmp.compileMemorySize() - case wazeroir.OperationKindMemoryGrow: - err = cmp.compileMemoryGrow() - case wazeroir.OperationKindConstI32: - err = cmp.compileConstI32(o) - case wazeroir.OperationKindConstI64: - err = cmp.compileConstI64(o) - case wazeroir.OperationKindConstF32: - err = cmp.compileConstF32(o) - case wazeroir.OperationKindConstF64: - err = cmp.compileConstF64(o) - case wazeroir.OperationKindEq: - err = cmp.compileEq(o) - case wazeroir.OperationKindNe: - err = cmp.compileNe(o) - case wazeroir.OperationKindEqz: - err = cmp.compileEqz(o) - case wazeroir.OperationKindLt: - err = cmp.compileLt(o) - case wazeroir.OperationKindGt: - err = cmp.compileGt(o) - case wazeroir.OperationKindLe: - err = cmp.compileLe(o) - case wazeroir.OperationKindGe: - err = cmp.compileGe(o) - case wazeroir.OperationKindAdd: - err = cmp.compileAdd(o) - case wazeroir.OperationKindSub: - err = cmp.compileSub(o) - case wazeroir.OperationKindMul: - err = cmp.compileMul(o) - case wazeroir.OperationKindClz: - err = cmp.compileClz(o) - case wazeroir.OperationKindCtz: - err = cmp.compileCtz(o) - case wazeroir.OperationKindPopcnt: - err = cmp.compilePopcnt(o) - case wazeroir.OperationKindDiv: - err = cmp.compileDiv(o) - case wazeroir.OperationKindRem: - err = cmp.compileRem(o) - case wazeroir.OperationKindAnd: - err = cmp.compileAnd(o) - case wazeroir.OperationKindOr: - err = cmp.compileOr(o) - case wazeroir.OperationKindXor: - err = cmp.compileXor(o) - case wazeroir.OperationKindShl: - err = cmp.compileShl(o) - case wazeroir.OperationKindShr: - err = cmp.compileShr(o) - case wazeroir.OperationKindRotl: - err = cmp.compileRotl(o) - case wazeroir.OperationKindRotr: - err = cmp.compileRotr(o) - case wazeroir.OperationKindAbs: - err = cmp.compileAbs(o) - case wazeroir.OperationKindNeg: - err = cmp.compileNeg(o) - case wazeroir.OperationKindCeil: - err = cmp.compileCeil(o) - case wazeroir.OperationKindFloor: - err = cmp.compileFloor(o) - case wazeroir.OperationKindTrunc: - err = cmp.compileTrunc(o) - case wazeroir.OperationKindNearest: - err = cmp.compileNearest(o) - case wazeroir.OperationKindSqrt: - err = cmp.compileSqrt(o) - case wazeroir.OperationKindMin: - err = cmp.compileMin(o) - case wazeroir.OperationKindMax: - err = cmp.compileMax(o) - case wazeroir.OperationKindCopysign: - err = cmp.compileCopysign(o) - - case wazeroir.OperationKindI32WrapFromI64: - err = cmp.compileI32WrapFromI64() - - case wazeroir.OperationKindF32DemoteFromF64: - err = cmp.compileF32DemoteFromF64() - case wazeroir.OperationKindF64PromoteFromF32: - err = cmp.compileF64PromoteFromF32() - case wazeroir.OperationKindI32ReinterpretFromF32: - err = cmp.compileI32ReinterpretFromF32() - case wazeroir.OperationKindI64ReinterpretFromF64: - err = cmp.compileI64ReinterpretFromF64() - case wazeroir.OperationKindF32ReinterpretFromI32: - err = cmp.compileF32ReinterpretFromI32() - case wazeroir.OperationKindF64ReinterpretFromI64: - err = cmp.compileF64ReinterpretFromI64() - - // OperationExtend - case wazeroir.OperationKindSignExtend32From8: - err = cmp.compileSignExtend32From8() - case wazeroir.OperationKindSignExtend32From16: - err = cmp.compileSignExtend32From16() - case wazeroir.OperationKindSignExtend64From8: - err = cmp.compileSignExtend64From8() - case wazeroir.OperationKindSignExtend64From16: - err = cmp.compileSignExtend64From16() - case wazeroir.OperationKindSignExtend64From32: - err = cmp.compileSignExtend64From32() - - // Drop..Init - // - case wazeroir.OperationKindMemoryCopy: - err = cmp.compileMemoryCopy() - case wazeroir.OperationKindMemoryFill: - err = cmp.compileMemoryFill() - - // ... - case wazeroir.OperationKindBuiltinFunctionCheckExitCode: - err = cmp.compileBuiltinFunctionCheckExitCode() - } + switch op.Kind { + case wazeroir.OperationKindUnreachable: + err = cmp.compileUnreachable() + case wazeroir.OperationKindLabel: + // label op is already handled ^^. + case wazeroir.OperationKindBr: + err = cmp.compileBr(op) + case wazeroir.OperationKindBrIf: + err = cmp.compileBrIf(op) + case wazeroir.OperationKindBrTable: + err = cmp.compileBrTable(op) + case wazeroir.OperationKindCall: + err = cmp.compileCall(op) + case wazeroir.OperationKindCallIndirect: + err = cmp.compileCallIndirect(op) + case wazeroir.OperationKindDrop: + err = cmp.compileDrop(op) + case wazeroir.OperationKindSelect: + err = cmp.compileSelect(op) + case wazeroir.OperationKindPick: + err = cmp.compilePick(op) + case wazeroir.OperationKindSet: + err = cmp.compileSet(op) + case wazeroir.OperationKindGlobalGet: + err = cmp.compileGlobalGet(op) + case wazeroir.OperationKindGlobalSet: + err = cmp.compileGlobalSet(op) + case wazeroir.OperationKindLoad: + err = cmp.compileLoad(op) + case wazeroir.OperationKindLoad8: + err = cmp.compileLoad8(op) + case wazeroir.OperationKindLoad16: + err = cmp.compileLoad16(op) + case wazeroir.OperationKindLoad32: + err = cmp.compileLoad32(op) + case wazeroir.OperationKindStore: + err = cmp.compileStore(op) + case wazeroir.OperationKindStore8: + err = cmp.compileStore8(op) + case wazeroir.OperationKindStore16: + err = cmp.compileStore16(op) + case wazeroir.OperationKindStore32: + err = cmp.compileStore32(op) + case wazeroir.OperationKindMemorySize: + err = cmp.compileMemorySize() + case wazeroir.OperationKindMemoryGrow: + err = cmp.compileMemoryGrow() + case wazeroir.OperationKindConstI32: + err = cmp.compileConstI32(op) + case wazeroir.OperationKindConstI64: + err = cmp.compileConstI64(op) + case wazeroir.OperationKindConstF32: + err = cmp.compileConstF32(op) + case wazeroir.OperationKindConstF64: + err = cmp.compileConstF64(op) + case wazeroir.OperationKindEq: + err = cmp.compileEq(op) + case wazeroir.OperationKindNe: + err = cmp.compileNe(op) + case wazeroir.OperationKindEqz: + err = cmp.compileEqz(op) + case wazeroir.OperationKindLt: + err = cmp.compileLt(op) + case wazeroir.OperationKindGt: + err = cmp.compileGt(op) + case wazeroir.OperationKindLe: + err = cmp.compileLe(op) + case wazeroir.OperationKindGe: + err = cmp.compileGe(op) + case wazeroir.OperationKindAdd: + err = cmp.compileAdd(op) + case wazeroir.OperationKindSub: + err = cmp.compileSub(op) + case wazeroir.OperationKindMul: + err = cmp.compileMul(op) + case wazeroir.OperationKindClz: + err = cmp.compileClz(op) + case wazeroir.OperationKindCtz: + err = cmp.compileCtz(op) + case wazeroir.OperationKindPopcnt: + err = cmp.compilePopcnt(op) + case wazeroir.OperationKindDiv: + err = cmp.compileDiv(op) + case wazeroir.OperationKindRem: + err = cmp.compileRem(op) + case wazeroir.OperationKindAnd: + err = cmp.compileAnd(op) + case wazeroir.OperationKindOr: + err = cmp.compileOr(op) + case wazeroir.OperationKindXor: + err = cmp.compileXor(op) + case wazeroir.OperationKindShl: + err = cmp.compileShl(op) + case wazeroir.OperationKindShr: + err = cmp.compileShr(op) + case wazeroir.OperationKindRotl: + err = cmp.compileRotl(op) + case wazeroir.OperationKindRotr: + err = cmp.compileRotr(op) + case wazeroir.OperationKindAbs: + err = cmp.compileAbs(op) + case wazeroir.OperationKindNeg: + err = cmp.compileNeg(op) + case wazeroir.OperationKindCeil: + err = cmp.compileCeil(op) + case wazeroir.OperationKindFloor: + err = cmp.compileFloor(op) + case wazeroir.OperationKindTrunc: + err = cmp.compileTrunc(op) + case wazeroir.OperationKindNearest: + err = cmp.compileNearest(op) + case wazeroir.OperationKindSqrt: + err = cmp.compileSqrt(op) + case wazeroir.OperationKindMin: + err = cmp.compileMin(op) + case wazeroir.OperationKindMax: + err = cmp.compileMax(op) + case wazeroir.OperationKindCopysign: + err = cmp.compileCopysign(op) + case wazeroir.OperationKindI32WrapFromI64: + err = cmp.compileI32WrapFromI64() + case wazeroir.OperationKindITruncFromF: + err = cmp.compileITruncFromF(op) + case wazeroir.OperationKindFConvertFromI: + err = cmp.compileFConvertFromI(op) + case wazeroir.OperationKindF32DemoteFromF64: + err = cmp.compileF32DemoteFromF64() + case wazeroir.OperationKindF64PromoteFromF32: + err = cmp.compileF64PromoteFromF32() + case wazeroir.OperationKindI32ReinterpretFromF32: + err = cmp.compileI32ReinterpretFromF32() + case wazeroir.OperationKindI64ReinterpretFromF64: + err = cmp.compileI64ReinterpretFromF64() + case wazeroir.OperationKindF32ReinterpretFromI32: + err = cmp.compileF32ReinterpretFromI32() + case wazeroir.OperationKindF64ReinterpretFromI64: + err = cmp.compileF64ReinterpretFromI64() + case wazeroir.OperationKindExtend: + err = cmp.compileExtend(op) + case wazeroir.OperationKindSignExtend32From8: + err = cmp.compileSignExtend32From8() + case wazeroir.OperationKindSignExtend32From16: + err = cmp.compileSignExtend32From16() + case wazeroir.OperationKindSignExtend64From8: + err = cmp.compileSignExtend64From8() + case wazeroir.OperationKindSignExtend64From16: + err = cmp.compileSignExtend64From16() + case wazeroir.OperationKindSignExtend64From32: + err = cmp.compileSignExtend64From32() + case wazeroir.OperationKindMemoryInit: + err = cmp.compileMemoryInit(op) + case wazeroir.OperationKindDataDrop: + err = cmp.compileDataDrop(op) + case wazeroir.OperationKindMemoryCopy: + err = cmp.compileMemoryCopy() + case wazeroir.OperationKindMemoryFill: + err = cmp.compileMemoryFill() + case wazeroir.OperationKindTableInit: + err = cmp.compileTableInit(op) + case wazeroir.OperationKindElemDrop: + err = cmp.compileElemDrop(op) + case wazeroir.OperationKindTableCopy: + err = cmp.compileTableCopy(op) + case wazeroir.OperationKindRefFunc: + err = cmp.compileRefFunc(op) + case wazeroir.OperationKindTableGet: + err = cmp.compileTableGet(op) + case wazeroir.OperationKindTableSet: + err = cmp.compileTableSet(op) + case wazeroir.OperationKindTableGrow: + err = cmp.compileTableGrow(op) + case wazeroir.OperationKindTableSize: + err = cmp.compileTableSize(op) + case wazeroir.OperationKindTableFill: + err = cmp.compileTableFill(op) + case wazeroir.OperationKindV128Const: + err = cmp.compileV128Const(op) + case wazeroir.OperationKindV128Add: + err = cmp.compileV128Add(op) + case wazeroir.OperationKindV128Sub: + err = cmp.compileV128Sub(op) + case wazeroir.OperationKindV128Load: + err = cmp.compileV128Load(op) + case wazeroir.OperationKindV128LoadLane: + err = cmp.compileV128LoadLane(op) + case wazeroir.OperationKindV128Store: + err = cmp.compileV128Store(op) + case wazeroir.OperationKindV128StoreLane: + err = cmp.compileV128StoreLane(op) + case wazeroir.OperationKindV128ExtractLane: + err = cmp.compileV128ExtractLane(op) + case wazeroir.OperationKindV128ReplaceLane: + err = cmp.compileV128ReplaceLane(op) + case wazeroir.OperationKindV128Splat: + err = cmp.compileV128Splat(op) + case wazeroir.OperationKindV128Shuffle: + err = cmp.compileV128Shuffle(op) + case wazeroir.OperationKindV128Swizzle: + err = cmp.compileV128Swizzle(op) + case wazeroir.OperationKindV128AnyTrue: + err = cmp.compileV128AnyTrue(op) + case wazeroir.OperationKindV128AllTrue: + err = cmp.compileV128AllTrue(op) + case wazeroir.OperationKindV128BitMask: + err = cmp.compileV128BitMask(op) + case wazeroir.OperationKindV128And: + err = cmp.compileV128And(op) + case wazeroir.OperationKindV128Not: + err = cmp.compileV128Not(op) + case wazeroir.OperationKindV128Or: + err = cmp.compileV128Or(op) + case wazeroir.OperationKindV128Xor: + err = cmp.compileV128Xor(op) + case wazeroir.OperationKindV128Bitselect: + err = cmp.compileV128Bitselect(op) + case wazeroir.OperationKindV128AndNot: + err = cmp.compileV128AndNot(op) + case wazeroir.OperationKindV128Shl: + err = cmp.compileV128Shl(op) + case wazeroir.OperationKindV128Shr: + err = cmp.compileV128Shr(op) + case wazeroir.OperationKindV128Cmp: + err = cmp.compileV128Cmp(op) + case wazeroir.OperationKindV128AddSat: + err = cmp.compileV128AddSat(op) + case wazeroir.OperationKindV128SubSat: + err = cmp.compileV128SubSat(op) + case wazeroir.OperationKindV128Mul: + err = cmp.compileV128Mul(op) + case wazeroir.OperationKindV128Div: + err = cmp.compileV128Div(op) + case wazeroir.OperationKindV128Neg: + err = cmp.compileV128Neg(op) + case wazeroir.OperationKindV128Sqrt: + err = cmp.compileV128Sqrt(op) + case wazeroir.OperationKindV128Abs: + err = cmp.compileV128Abs(op) + case wazeroir.OperationKindV128Popcnt: + err = cmp.compileV128Popcnt(op) + case wazeroir.OperationKindV128Min: + err = cmp.compileV128Min(op) + case wazeroir.OperationKindV128Max: + err = cmp.compileV128Max(op) + case wazeroir.OperationKindV128AvgrU: + err = cmp.compileV128AvgrU(op) + case wazeroir.OperationKindV128Pmin: + err = cmp.compileV128Pmin(op) + case wazeroir.OperationKindV128Pmax: + err = cmp.compileV128Pmax(op) + case wazeroir.OperationKindV128Ceil: + err = cmp.compileV128Ceil(op) + case wazeroir.OperationKindV128Floor: + err = cmp.compileV128Floor(op) + case wazeroir.OperationKindV128Trunc: + err = cmp.compileV128Trunc(op) + case wazeroir.OperationKindV128Nearest: + err = cmp.compileV128Nearest(op) + case wazeroir.OperationKindV128Extend: + err = cmp.compileV128Extend(op) + case wazeroir.OperationKindV128ExtMul: + err = cmp.compileV128ExtMul(op) + case wazeroir.OperationKindV128Q15mulrSatS: + err = cmp.compileV128Q15mulrSatS(op) + case wazeroir.OperationKindV128ExtAddPairwise: + err = cmp.compileV128ExtAddPairwise(op) + case wazeroir.OperationKindV128FloatPromote: + err = cmp.compileV128FloatPromote(op) + case wazeroir.OperationKindV128FloatDemote: + err = cmp.compileV128FloatDemote(op) + case wazeroir.OperationKindV128FConvertFromI: + err = cmp.compileV128FConvertFromI(op) + case wazeroir.OperationKindV128Dot: + err = cmp.compileV128Dot(op) + case wazeroir.OperationKindV128Narrow: + err = cmp.compileV128Narrow(op) + case wazeroir.OperationKindV128ITruncSatFromF: + err = cmp.compileV128ITruncSatFromF(op) + case wazeroir.OperationKindBuiltinFunctionCheckExitCode: + err = cmp.compileBuiltinFunctionCheckExitCode() default: err = errors.New("unsupported") } if err != nil { - return nil, fmt.Errorf("operation %s: %w", op.Kind().String(), err) + return nil, fmt.Errorf("operation %s: %w", op.Kind.String(), err) } } diff --git a/internal/engine/compiler/impl_amd64.go b/internal/engine/compiler/impl_amd64.go index a7057825..6041e743 100644 --- a/internal/engine/compiler/impl_amd64.go +++ b/internal/engine/compiler/impl_amd64.go @@ -161,10 +161,10 @@ type amd64LabelInfo struct { initialStack runtimeValueLocationStack } -func (c *amd64Compiler) label(labelID wazeroir.LabelID) *amd64LabelInfo { - kind := labelID.Kind() +func (c *amd64Compiler) label(label wazeroir.Label) *amd64LabelInfo { + kind := label.Kind() frames := c.labels[kind] - frameID := labelID.FrameID() + frameID := label.FrameID() // If the frameID is not allocated yet, expand the slice by twice of the diff, // so that we could reduce the allocation in the subsequent compilation. if diff := frameID - len(frames) + 1; diff > 0 { @@ -267,7 +267,7 @@ func (c *amd64Compiler) compileUnreachable() error { } // compileSet implements compiler.compileSet for the amd64 architecture. -func (c *amd64Compiler) compileSet(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileSet(o *wazeroir.UnionOperation) error { depth := int(o.U1) isTargetVector := o.B3 @@ -298,7 +298,7 @@ func (c *amd64Compiler) compileSet(o wazeroir.UnionOperation) error { } // compileGlobalGet implements compiler.compileGlobalGet for the amd64 architecture. -func (c *amd64Compiler) compileGlobalGet(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileGlobalGet(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -365,7 +365,7 @@ func (c *amd64Compiler) compileGlobalGet(o wazeroir.UnionOperation) error { } // compileGlobalSet implements compiler.compileGlobalSet for the amd64 architecture. -func (c *amd64Compiler) compileGlobalSet(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileGlobalSet(o *wazeroir.UnionOperation) error { index := o.U1 wasmValueType := c.ir.Globals[index].ValType @@ -410,11 +410,11 @@ func (c *amd64Compiler) compileGlobalSet(o wazeroir.UnionOperation) error { } // compileBr implements compiler.compileBr for the amd64 architecture. -func (c *amd64Compiler) compileBr(o wazeroir.OperationBr) error { +func (c *amd64Compiler) compileBr(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } - return c.branchInto(o.Target) + return c.branchInto(wazeroir.Label(o.U1)) } // branchInto adds instruction necessary to jump into the given branch target. @@ -422,8 +422,7 @@ func (c *amd64Compiler) branchInto(target wazeroir.Label) error { if target.IsReturnTarget() { return c.compileReturnFunction() } else { - labelID := target.ID() - if c.ir.LabelCallers[labelID] > 1 { + if c.ir.LabelCallers[target] > 1 { // We can only re-use register state if when there's a single call-site. // Release existing values on registers to the stack if there's multiple ones to have // the consistent value location state at the beginning of label. @@ -434,20 +433,20 @@ func (c *amd64Compiler) branchInto(target wazeroir.Label) error { // Set the initial stack of the target label, so we can start compiling the label // with the appropriate value locations. Note we clone the stack here as we maybe // manipulate the stack before compiler reaches the label. - targetLabel := c.label(labelID) + targetLabel := c.label(target) if !targetLabel.initialStack.initialized() { // It seems unnecessary to clone as branchInto is always the tail of the current block. // TODO: verify ^^. targetLabel.initialStack = c.locationStack.clone() } jmp := c.assembler.CompileJump(amd64.JMP) - c.assignJumpTarget(labelID, jmp) + c.assignJumpTarget(target, jmp) } return nil } // compileBrIf implements compiler.compileBrIf for the amd64 architecture. -func (c *amd64Compiler) compileBrIf(o wazeroir.OperationBrIf) error { +func (c *amd64Compiler) compileBrIf(o *wazeroir.UnionOperation) error { cond := c.locationStack.pop() var jmpWithCond asm.Node if cond.onConditionalRegister() { @@ -501,7 +500,9 @@ func (c *amd64Compiler) compileBrIf(o wazeroir.OperationBrIf) error { } // Make sure that the next coming label is the else jump target. - thenTarget, elseTarget := o.Then, o.Else + thenTarget := wazeroir.Label(o.Us[0]) + thenToDrop := o.Rs[0] + elseTarget := wazeroir.Label(o.Us[1]) // Here's the diagram of how we organize the instructions necessarily for brif operation. // @@ -514,13 +515,13 @@ func (c *amd64Compiler) compileBrIf(o wazeroir.OperationBrIf) error { // Emit for else branches saved := c.locationStack c.setLocationStack(saved.clone()) - if elseTarget.Target.IsReturnTarget() { + if elseTarget.IsReturnTarget() { if err := c.compileReturnFunction(); err != nil { return err } } else { - elseLabelID := elseTarget.Target.ID() - if c.ir.LabelCallers[elseLabelID] > 1 { + elseLabel := elseTarget + if c.ir.LabelCallers[elseLabel] > 1 { // We can only re-use register state if when there's a single call-site. // Release existing values on registers to the stack if there's multiple ones to have // the consistent value location state at the beginning of label. @@ -531,26 +532,26 @@ func (c *amd64Compiler) compileBrIf(o wazeroir.OperationBrIf) error { // Set the initial stack of the target label, so we can start compiling the label // with the appropriate value locations. Note we clone the stack here as we maybe // manipulate the stack before compiler reaches the label. - labelInfo := c.label(elseLabelID) + labelInfo := c.label(elseLabel) if !labelInfo.initialStack.initialized() { labelInfo.initialStack = c.locationStack } elseJmp := c.assembler.CompileJump(amd64.JMP) - c.assignJumpTarget(elseLabelID, elseJmp) + c.assignJumpTarget(elseLabel, elseJmp) } // Handle then branch. c.assembler.SetJumpTargetOnNext(jmpWithCond) c.setLocationStack(saved) - if err := compileDropRange(c, thenTarget.ToDrop); err != nil { + if err := compileDropRange(c, thenToDrop); err != nil { return err } - if thenTarget.Target.IsReturnTarget() { + if thenTarget.IsReturnTarget() { return c.compileReturnFunction() } else { - thenLabelID := thenTarget.Target.ID() - if c.ir.LabelCallers[thenLabelID] > 1 { + thenLabel := thenTarget + if c.ir.LabelCallers[thenLabel] > 1 { // We can only re-use register state if when there's a single call-site. // Release existing values on registers to the stack if there's multiple ones to have // the consistent value location state at the beginning of label. @@ -561,27 +562,31 @@ func (c *amd64Compiler) compileBrIf(o wazeroir.OperationBrIf) error { // Set the initial stack of the target label, so we can start compiling the label // with the appropriate value locations. Note we clone the stack here as we maybe // manipulate the stack before compiler reaches the label. - labelInfo := c.label(thenLabelID) + labelInfo := c.label(thenLabel) if !labelInfo.initialStack.initialized() { labelInfo.initialStack = c.locationStack } thenJmp := c.assembler.CompileJump(amd64.JMP) - c.assignJumpTarget(thenLabelID, thenJmp) + c.assignJumpTarget(thenLabel, thenJmp) return nil } } // compileBrTable implements compiler.compileBrTable for the amd64 architecture. -func (c *amd64Compiler) compileBrTable(o wazeroir.OperationBrTable) error { +func (c *amd64Compiler) compileBrTable(o *wazeroir.UnionOperation) error { index := c.locationStack.pop() // If the operation only consists of the default target, we branch into it and return early. - if len(o.Targets) == 0 { + if len(o.Us) == 1 { c.locationStack.releaseRegister(index) - if err := compileDropRange(c, o.Default.ToDrop); err != nil { + var r *wazeroir.InclusiveRange + if len(o.Rs) > 0 { + r = o.Rs[0] + } + if err := compileDropRange(c, r); err != nil { return err } - return c.branchInto(o.Default.Target) + return c.branchInto(wazeroir.Label(o.Us[0])) } // Otherwise, we jump into the selected branch. @@ -595,7 +600,7 @@ func (c *amd64Compiler) compileBrTable(o wazeroir.OperationBrTable) error { } // First, we move the length of target list into the tmp register. - c.assembler.CompileConstToRegister(amd64.MOVQ, int64(len(o.Targets)), tmp) + c.assembler.CompileConstToRegister(amd64.MOVQ, int64(len(o.Us)-1), tmp) // Then, we compare the value with the length of targets. c.assembler.CompileRegisterToRegister(amd64.CMPL, tmp, index.register) @@ -631,7 +636,7 @@ func (c *amd64Compiler) compileBrTable(o wazeroir.OperationBrTable) error { // the above example's offsetData would be [0x0, 0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x8, 0x0, 0x0, 0x0]. // // Note: this is similar to how GCC implements Switch statements in C. - offsetData := asm.NewStaticConst(make([]byte, 4*(len(o.Targets)+1))) + offsetData := asm.NewStaticConst(make([]byte, 4*(len(o.Us)))) // Load the offsetData's address into tmp. if err = c.assembler.CompileStaticConstToRegister(amd64.LEAQ, offsetData, tmp); err != nil { @@ -658,7 +663,7 @@ func (c *amd64Compiler) compileBrTable(o wazeroir.OperationBrTable) error { c.locationStack.markRegisterUnused(index.register) // [Emit the code for each targets and default branch] - labelInitialInstructions := make([]asm.Node, len(o.Targets)+1) + labelInitialInstructions := make([]asm.Node, len(o.Us)) saved := c.locationStack for i := range labelInitialInstructions { // Emit the initial instruction of each target. @@ -667,23 +672,30 @@ func (c *amd64Compiler) compileBrTable(o wazeroir.OperationBrTable) error { labelInitialInstructions[i] = c.assembler.CompileStandAlone(amd64.NOP) var locationStack runtimeValueLocationStack - var target *wazeroir.BranchTargetDrop - if i < len(o.Targets) { - target = o.Targets[i] + var targetToDrop *wazeroir.InclusiveRange + var targetLabel wazeroir.Label + if i < len(o.Us)-1 { + targetLabel = wazeroir.Label(o.Us[i+1]) + if len(o.Rs) > i+1 { + targetToDrop = o.Rs[i+1] + } // Clone the location stack so the branch-specific code doesn't // affect others. locationStack = saved.clone() } else { - target = o.Default + targetLabel = wazeroir.Label(o.Us[0]) + if len(o.Rs) > 0 { + targetToDrop = o.Rs[0] + } // If this is the default branch, we use the original one // as this is the last code in this block. locationStack = saved } c.setLocationStack(locationStack) - if err := compileDropRange(c, target.ToDrop); err != nil { + if err := compileDropRange(c, targetToDrop); err != nil { return err } - if err := c.branchInto(target.Target); err != nil { + if err := c.branchInto(targetLabel); err != nil { return err } } @@ -692,8 +704,8 @@ func (c *amd64Compiler) compileBrTable(o wazeroir.OperationBrTable) error { return nil } -func (c *amd64Compiler) assignJumpTarget(labelID wazeroir.LabelID, jmpInstruction asm.Node) { - jmpTargetLabel := c.label(labelID) +func (c *amd64Compiler) assignJumpTarget(label wazeroir.Label, jmpInstruction asm.Node) { + jmpTargetLabel := c.label(label) targetInst := jmpTargetLabel.initialInstruction if targetInst == nil { // If the label isn't compiled yet, allocate the NOP node, and set as the initial instruction. @@ -704,9 +716,9 @@ func (c *amd64Compiler) assignJumpTarget(labelID wazeroir.LabelID, jmpInstructio } // compileLabel implements compiler.compileLabel for the amd64 architecture. -func (c *amd64Compiler) compileLabel(o wazeroir.OperationLabel) (skipLabel bool) { - labelID := o.Label.ID() - labelInfo := c.label(labelID) +func (c *amd64Compiler) compileLabel(o *wazeroir.UnionOperation) (skipLabel bool) { + label := wazeroir.Label(o.U1) + labelInfo := c.label(label) // If initialStack is not set, that means this label has never been reached. if !labelInfo.initialStack.initialized() { @@ -729,7 +741,7 @@ func (c *amd64Compiler) compileLabel(o wazeroir.OperationLabel) (skipLabel bool) } // compileCall implements compiler.compileCall for the amd64 architecture. -func (c *amd64Compiler) compileCall(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileCall(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -759,7 +771,7 @@ func (c *amd64Compiler) compileCall(o wazeroir.UnionOperation) error { } // compileCallIndirect implements compiler.compileCallIndirect for the amd64 architecture. -func (c *amd64Compiler) compileCallIndirect(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileCallIndirect(o *wazeroir.UnionOperation) error { offset := c.locationStack.pop() if err := c.compileEnsureOnRegister(offset); err != nil { return nil @@ -845,8 +857,9 @@ func (c *amd64Compiler) compileCallIndirect(o wazeroir.UnionOperation) error { } // compileDrop implements compiler.compileDrop for the amd64 architecture. -func (c *amd64Compiler) compileDrop(o wazeroir.OperationDrop) error { - return compileDropRange(c, o.Depth) +func (c *amd64Compiler) compileDrop(o *wazeroir.UnionOperation) error { + depth := o.Rs[0] + return compileDropRange(c, depth) } // compileSelectV128Impl implements compileSelect for vector values. @@ -886,7 +899,7 @@ func (c *amd64Compiler) compileSelectV128Impl(selectorReg asm.Register) error { // // The emitted native code depends on whether the values are on // the physical registers or memory stack, or maybe conditional register. -func (c *amd64Compiler) compileSelect(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileSelect(o *wazeroir.UnionOperation) error { cv := c.locationStack.pop() if err := c.compileEnsureOnRegister(cv); err != nil { return err @@ -942,7 +955,7 @@ func (c *amd64Compiler) compileSelect(o wazeroir.UnionOperation) error { } // compilePick implements compiler.compilePick for the amd64 architecture. -func (c *amd64Compiler) compilePick(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compilePick(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -993,7 +1006,7 @@ func (c *amd64Compiler) compilePick(o wazeroir.UnionOperation) error { } // compileAdd implements compiler.compileAdd for the amd64 architecture. -func (c *amd64Compiler) compileAdd(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileAdd(o *wazeroir.UnionOperation) error { // TODO: if the previous instruction is const, then // this can be optimized. Same goes for other arithmetic instructions. @@ -1031,7 +1044,7 @@ func (c *amd64Compiler) compileAdd(o wazeroir.UnionOperation) error { } // compileSub implements compiler.compileSub for the amd64 architecture. -func (c *amd64Compiler) compileSub(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileSub(o *wazeroir.UnionOperation) error { // TODO: if the previous instruction is const, then // this can be optimized. Same goes for other arithmetic instructions. @@ -1068,7 +1081,7 @@ func (c *amd64Compiler) compileSub(o wazeroir.UnionOperation) error { } // compileMul implements compiler.compileMul for the amd64 architecture. -func (c *amd64Compiler) compileMul(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileMul(o *wazeroir.UnionOperation) (err error) { unsignedType := wazeroir.UnsignedType(o.B1) switch unsignedType { case wazeroir.UnsignedTypeI32: @@ -1189,7 +1202,7 @@ func (c *amd64Compiler) compileMulForFloats(instruction asm.Instruction) error { } // compileClz implements compiler.compileClz for the amd64 architecture. -func (c *amd64Compiler) compileClz(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileClz(o *wazeroir.UnionOperation) error { target := c.locationStack.pop() if err := c.compileEnsureOnRegister(target); err != nil { return err @@ -1253,7 +1266,7 @@ func (c *amd64Compiler) compileClz(o wazeroir.UnionOperation) error { } // compileCtz implements compiler.compileCtz for the amd64 architecture. -func (c *amd64Compiler) compileCtz(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileCtz(o *wazeroir.UnionOperation) error { target := c.locationStack.pop() if err := c.compileEnsureOnRegister(target); err != nil { return err @@ -1306,7 +1319,7 @@ func (c *amd64Compiler) compileCtz(o wazeroir.UnionOperation) error { } // compilePopcnt implements compiler.compilePopcnt for the amd64 architecture. -func (c *amd64Compiler) compilePopcnt(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compilePopcnt(o *wazeroir.UnionOperation) error { target := c.locationStack.pop() if err := c.compileEnsureOnRegister(target); err != nil { return err @@ -1326,7 +1339,7 @@ func (c *amd64Compiler) compilePopcnt(o wazeroir.UnionOperation) error { } // compileDiv implements compiler.compileDiv for the amd64 architecture. -func (c *amd64Compiler) compileDiv(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileDiv(o *wazeroir.UnionOperation) (err error) { signedType := wazeroir.SignedType(o.B1) switch signedType { case wazeroir.SignedTypeUint32: @@ -1364,7 +1377,7 @@ func (c *amd64Compiler) compileDivForInts(is32Bit bool, signed bool) error { } // compileRem implements compiler.compileRem for the amd64 architecture. -func (c *amd64Compiler) compileRem(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileRem(o *wazeroir.UnionOperation) (err error) { var vt runtimeValueType signedInt := wazeroir.SignedInt(o.B1) switch signedInt { @@ -1593,7 +1606,7 @@ func (c *amd64Compiler) compileDivForFloats(is32Bit bool) error { } // compileAnd implements compiler.compileAnd for the amd64 architecture. -func (c *amd64Compiler) compileAnd(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileAnd(o *wazeroir.UnionOperation) (err error) { unsignedInt := wazeroir.UnsignedInt(o.B1) switch unsignedInt { case wazeroir.UnsignedInt32: @@ -1605,7 +1618,7 @@ func (c *amd64Compiler) compileAnd(o wazeroir.UnionOperation) (err error) { } // compileOr implements compiler.compileOr for the amd64 architecture. -func (c *amd64Compiler) compileOr(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileOr(o *wazeroir.UnionOperation) (err error) { unsignedInt := wazeroir.UnsignedInt(o.B1) switch unsignedInt { case wazeroir.UnsignedInt32: @@ -1617,7 +1630,7 @@ func (c *amd64Compiler) compileOr(o wazeroir.UnionOperation) (err error) { } // compileXor implements compiler.compileXor for the amd64 architecture. -func (c *amd64Compiler) compileXor(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileXor(o *wazeroir.UnionOperation) (err error) { unsignedInt := wazeroir.UnsignedInt(o.B1) switch unsignedInt { case wazeroir.UnsignedInt32: @@ -1656,7 +1669,7 @@ func (c *amd64Compiler) compileSimpleBinaryOp(instruction asm.Instruction) error } // compileShl implements compiler.compileShl for the amd64 architecture. -func (c *amd64Compiler) compileShl(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileShl(o *wazeroir.UnionOperation) (err error) { unsignedInt := wazeroir.UnsignedInt(o.B1) switch unsignedInt { case wazeroir.UnsignedInt32: @@ -1668,7 +1681,7 @@ func (c *amd64Compiler) compileShl(o wazeroir.UnionOperation) (err error) { } // compileShr implements compiler.compileShr for the amd64 architecture. -func (c *amd64Compiler) compileShr(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileShr(o *wazeroir.UnionOperation) (err error) { signedInt := wazeroir.SignedInt(o.B1) switch signedInt { case wazeroir.SignedInt32: @@ -1684,7 +1697,7 @@ func (c *amd64Compiler) compileShr(o wazeroir.UnionOperation) (err error) { } // compileRotl implements compiler.compileRotl for the amd64 architecture. -func (c *amd64Compiler) compileRotl(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileRotl(o *wazeroir.UnionOperation) (err error) { unsignedInt := wazeroir.UnsignedInt(o.B1) switch unsignedInt { case wazeroir.UnsignedInt32: @@ -1696,7 +1709,7 @@ func (c *amd64Compiler) compileRotl(o wazeroir.UnionOperation) (err error) { } // compileRotr implements compiler.compileRotr for the amd64 architecture. -func (c *amd64Compiler) compileRotr(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileRotr(o *wazeroir.UnionOperation) (err error) { unsignedInt := wazeroir.UnsignedInt(o.B1) switch unsignedInt { case wazeroir.UnsignedInt32: @@ -1762,7 +1775,7 @@ func (c *amd64Compiler) compileShiftOp(instruction asm.Instruction, is32Bit bool // See the following discussions for how we could take the abs of floats on x86 assembly. // https://stackoverflow.com/questions/32408665/fastest-way-to-compute-absolute-value-using-sse/32422471#32422471 // https://stackoverflow.com/questions/44630015/how-would-fabsdouble-be-implemented-on-x86-is-it-an-expensive-operation -func (c *amd64Compiler) compileAbs(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileAbs(o *wazeroir.UnionOperation) (err error) { target := c.locationStack.peek() // Note this is peek! if err = c.compileEnsureOnRegister(target); err != nil { return err @@ -1780,7 +1793,7 @@ func (c *amd64Compiler) compileAbs(o wazeroir.UnionOperation) (err error) { } // compileNeg implements compiler.compileNeg for the amd64 architecture. -func (c *amd64Compiler) compileNeg(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileNeg(o *wazeroir.UnionOperation) (err error) { target := c.locationStack.peek() // Note this is peek! if err := c.compileEnsureOnRegister(target); err != nil { return err @@ -1811,28 +1824,28 @@ func (c *amd64Compiler) compileNeg(o wazeroir.UnionOperation) (err error) { } // compileCeil implements compiler.compileCeil for the amd64 architecture. -func (c *amd64Compiler) compileCeil(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileCeil(o *wazeroir.UnionOperation) (err error) { // Internally, ceil can be performed via ROUND instruction with 0x02 mode. // See https://android.googlesource.com/platform/bionic/+/882b8af/libm/x86_64/ceilf.S for example. return c.compileRoundInstruction(wazeroir.Float(o.B1) == wazeroir.Float32, 0x02) } // compileFloor implements compiler.compileFloor for the amd64 architecture. -func (c *amd64Compiler) compileFloor(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileFloor(o *wazeroir.UnionOperation) (err error) { // Internally, floor can be performed via ROUND instruction with 0x01 mode. // See https://android.googlesource.com/platform/bionic/+/882b8af/libm/x86_64/floorf.S for example. return c.compileRoundInstruction(wazeroir.Float(o.B1) == wazeroir.Float32, 0x01) } // compileTrunc implements compiler.compileTrunc for the amd64 architecture. -func (c *amd64Compiler) compileTrunc(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileTrunc(o *wazeroir.UnionOperation) error { // Internally, trunc can be performed via ROUND instruction with 0x03 mode. // See https://android.googlesource.com/platform/bionic/+/882b8af/libm/x86_64/truncf.S for example. return c.compileRoundInstruction(wazeroir.Float(o.B1) == wazeroir.Float32, 0x03) } // compileNearest implements compiler.compileNearest for the amd64 architecture. -func (c *amd64Compiler) compileNearest(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileNearest(o *wazeroir.UnionOperation) error { // Nearest can be performed via ROUND instruction with 0x00 mode. return c.compileRoundInstruction(wazeroir.Float(o.B1) == wazeroir.Float32, 0x00) } @@ -1852,7 +1865,7 @@ func (c *amd64Compiler) compileRoundInstruction(is32Bit bool, mode int64) error } // compileMin implements compiler.compileMin for the amd64 architecture. -func (c *amd64Compiler) compileMin(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileMin(o *wazeroir.UnionOperation) error { is32Bit := wazeroir.Float(o.B1) == wazeroir.Float32 if is32Bit { return c.compileMinOrMax(is32Bit, true, amd64.MINSS) @@ -1862,7 +1875,7 @@ func (c *amd64Compiler) compileMin(o wazeroir.UnionOperation) error { } // compileMax implements compiler.compileMax for the amd64 architecture. -func (c *amd64Compiler) compileMax(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileMax(o *wazeroir.UnionOperation) error { is32Bit := wazeroir.Float(o.B1) == wazeroir.Float32 if is32Bit { return c.compileMinOrMax(is32Bit, false, amd64.MAXSS) @@ -1966,7 +1979,7 @@ func (c *amd64Compiler) compileMinOrMax(is32Bit, isMin bool, minOrMaxInstruction } // compileCopysign implements compiler.compileCopysign for the amd64 architecture. -func (c *amd64Compiler) compileCopysign(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileCopysign(o *wazeroir.UnionOperation) error { is32Bit := wazeroir.Float(o.B1) == wazeroir.Float32 x2 := c.locationStack.pop() @@ -2031,7 +2044,7 @@ func (c *amd64Compiler) compileCopysign(o wazeroir.UnionOperation) error { } // compileSqrt implements compiler.compileSqrt for the amd64 architecture. -func (c *amd64Compiler) compileSqrt(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileSqrt(o *wazeroir.UnionOperation) error { target := c.locationStack.peek() // Note this is peek! if err := c.compileEnsureOnRegister(target); err != nil { return err @@ -2065,23 +2078,26 @@ func (c *amd64Compiler) compileI32WrapFromI64() error { // https://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-vol-1-manual.html // // [2] https://xem.github.io/minix86/manual/intel-x86-and-64-manual-vol1/o_7281d5ea06a5b67a-268.html -func (c *amd64Compiler) compileITruncFromF(o wazeroir.OperationITruncFromF) (err error) { - if o.InputType == wazeroir.Float32 && o.OutputType == wazeroir.SignedInt32 { - err = c.emitSignedI32TruncFromFloat(true, o.NonTrapping) - } else if o.InputType == wazeroir.Float32 && o.OutputType == wazeroir.SignedInt64 { - err = c.emitSignedI64TruncFromFloat(true, o.NonTrapping) - } else if o.InputType == wazeroir.Float64 && o.OutputType == wazeroir.SignedInt32 { - err = c.emitSignedI32TruncFromFloat(false, o.NonTrapping) - } else if o.InputType == wazeroir.Float64 && o.OutputType == wazeroir.SignedInt64 { - err = c.emitSignedI64TruncFromFloat(false, o.NonTrapping) - } else if o.InputType == wazeroir.Float32 && o.OutputType == wazeroir.SignedUint32 { - err = c.emitUnsignedI32TruncFromFloat(true, o.NonTrapping) - } else if o.InputType == wazeroir.Float32 && o.OutputType == wazeroir.SignedUint64 { - err = c.emitUnsignedI64TruncFromFloat(true, o.NonTrapping) - } else if o.InputType == wazeroir.Float64 && o.OutputType == wazeroir.SignedUint32 { - err = c.emitUnsignedI32TruncFromFloat(false, o.NonTrapping) - } else if o.InputType == wazeroir.Float64 && o.OutputType == wazeroir.SignedUint64 { - err = c.emitUnsignedI64TruncFromFloat(false, o.NonTrapping) +func (c *amd64Compiler) compileITruncFromF(o *wazeroir.UnionOperation) (err error) { + inputType := wazeroir.Float(o.B1) + outputType := wazeroir.SignedInt(o.B2) + nonTrapping := o.B3 + if inputType == wazeroir.Float32 && outputType == wazeroir.SignedInt32 { + err = c.emitSignedI32TruncFromFloat(true, nonTrapping) + } else if inputType == wazeroir.Float32 && outputType == wazeroir.SignedInt64 { + err = c.emitSignedI64TruncFromFloat(true, nonTrapping) + } else if inputType == wazeroir.Float64 && outputType == wazeroir.SignedInt32 { + err = c.emitSignedI32TruncFromFloat(false, nonTrapping) + } else if inputType == wazeroir.Float64 && outputType == wazeroir.SignedInt64 { + err = c.emitSignedI64TruncFromFloat(false, nonTrapping) + } else if inputType == wazeroir.Float32 && outputType == wazeroir.SignedUint32 { + err = c.emitUnsignedI32TruncFromFloat(true, nonTrapping) + } else if inputType == wazeroir.Float32 && outputType == wazeroir.SignedUint64 { + err = c.emitUnsignedI64TruncFromFloat(true, nonTrapping) + } else if inputType == wazeroir.Float64 && outputType == wazeroir.SignedUint32 { + err = c.emitUnsignedI32TruncFromFloat(false, nonTrapping) + } else if inputType == wazeroir.Float64 && outputType == wazeroir.SignedUint64 { + err = c.emitUnsignedI64TruncFromFloat(false, nonTrapping) } return } @@ -2636,16 +2652,18 @@ func (c *amd64Compiler) emitSignedI64TruncFromFloat(isFloat32Bit, nonTrapping bo } // compileFConvertFromI implements compiler.compileFConvertFromI for the amd64 architecture. -func (c *amd64Compiler) compileFConvertFromI(o wazeroir.OperationFConvertFromI) (err error) { - if o.OutputType == wazeroir.Float32 && o.InputType == wazeroir.SignedInt32 { +func (c *amd64Compiler) compileFConvertFromI(o *wazeroir.UnionOperation) (err error) { + inputType := wazeroir.SignedInt(o.B1) + outputType := wazeroir.Float(o.B2) + if outputType == wazeroir.Float32 && inputType == wazeroir.SignedInt32 { err = c.compileSimpleConversion(amd64.CVTSL2SS, registerTypeVector, runtimeValueTypeF32) // = CVTSI2SS for 32bit int - } else if o.OutputType == wazeroir.Float32 && o.InputType == wazeroir.SignedInt64 { + } else if outputType == wazeroir.Float32 && inputType == wazeroir.SignedInt64 { err = c.compileSimpleConversion(amd64.CVTSQ2SS, registerTypeVector, runtimeValueTypeF32) // = CVTSI2SS for 64bit int - } else if o.OutputType == wazeroir.Float64 && o.InputType == wazeroir.SignedInt32 { + } else if outputType == wazeroir.Float64 && inputType == wazeroir.SignedInt32 { err = c.compileSimpleConversion(amd64.CVTSL2SD, registerTypeVector, runtimeValueTypeF64) // = CVTSI2SD for 32bit int - } else if o.OutputType == wazeroir.Float64 && o.InputType == wazeroir.SignedInt64 { + } else if outputType == wazeroir.Float64 && inputType == wazeroir.SignedInt64 { err = c.compileSimpleConversion(amd64.CVTSQ2SD, registerTypeVector, runtimeValueTypeF64) // = CVTSI2SD for 64bit int - } else if o.OutputType == wazeroir.Float32 && o.InputType == wazeroir.SignedUint32 { + } else if outputType == wazeroir.Float32 && inputType == wazeroir.SignedUint32 { // See the following link for why we use 64bit conversion for unsigned 32bit integer sources: // https://stackoverflow.com/questions/41495498/fpu-operations-generated-by-gcc-during-casting-integer-to-float. // @@ -2655,12 +2673,12 @@ func (c *amd64Compiler) compileFConvertFromI(o wazeroir.OperationFConvertFromI) // >> registers available, so the unsigned 32-bit input values can be stored as signed 64-bit intermediate values, // >> which allows CVTSI2SS to be used after all. err = c.compileSimpleConversion(amd64.CVTSQ2SS, registerTypeVector, runtimeValueTypeF32) // = CVTSI2SS for 64bit int. - } else if o.OutputType == wazeroir.Float64 && o.InputType == wazeroir.SignedUint32 { + } else if outputType == wazeroir.Float64 && inputType == wazeroir.SignedUint32 { // For the same reason above, we use 64bit conversion for unsigned 32bit. err = c.compileSimpleConversion(amd64.CVTSQ2SD, registerTypeVector, runtimeValueTypeF64) // = CVTSI2SD for 64bit int. - } else if o.OutputType == wazeroir.Float32 && o.InputType == wazeroir.SignedUint64 { + } else if outputType == wazeroir.Float32 && inputType == wazeroir.SignedUint64 { err = c.emitUnsignedInt64ToFloatConversion(true) - } else if o.OutputType == wazeroir.Float64 && o.InputType == wazeroir.SignedUint64 { + } else if outputType == wazeroir.Float64 && inputType == wazeroir.SignedUint64 { err = c.emitUnsignedInt64ToFloatConversion(false) } return @@ -2861,9 +2879,10 @@ func (c *amd64Compiler) compileF64ReinterpretFromI64() error { } // compileExtend implements compiler.compileExtend for the amd64 architecture. -func (c *amd64Compiler) compileExtend(o wazeroir.OperationExtend) error { +func (c *amd64Compiler) compileExtend(o *wazeroir.UnionOperation) error { var inst asm.Instruction - if o.Signed { + signed := o.B1 != 0 + if signed { inst = amd64.MOVLQSX // = MOVSXD https://www.felixcloutier.com/x86/movsx:movsxd } else { inst = amd64.MOVL @@ -2908,12 +2927,12 @@ func (c *amd64Compiler) compileExtendImpl(inst asm.Instruction, destinationType } // compileEq implements compiler.compileEq for the amd64 architecture. -func (c *amd64Compiler) compileEq(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileEq(o *wazeroir.UnionOperation) error { return c.compileEqOrNe(wazeroir.UnsignedType(o.B1), true) } // compileNe implements compiler.compileNe for the amd64 architecture. -func (c *amd64Compiler) compileNe(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileNe(o *wazeroir.UnionOperation) error { return c.compileEqOrNe(wazeroir.UnsignedType(o.B1), false) } @@ -3022,7 +3041,7 @@ func (c *amd64Compiler) compileEqOrNeForFloats(x1Reg, x2Reg asm.Register, cmpIns } // compileEqz implements compiler.compileEqz for the amd64 architecture. -func (c *amd64Compiler) compileEqz(o wazeroir.UnionOperation) (err error) { +func (c *amd64Compiler) compileEqz(o *wazeroir.UnionOperation) (err error) { v := c.locationStack.pop() if err = c.compileEnsureOnRegister(v); err != nil { return err @@ -3049,7 +3068,7 @@ func (c *amd64Compiler) compileEqz(o wazeroir.UnionOperation) (err error) { } // compileLt implements compiler.compileLt for the amd64 architecture. -func (c *amd64Compiler) compileLt(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileLt(o *wazeroir.UnionOperation) error { x2 := c.locationStack.pop() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -3097,7 +3116,7 @@ func (c *amd64Compiler) compileLt(o wazeroir.UnionOperation) error { } // compileGt implements compiler.compileGt for the amd64 architecture. -func (c *amd64Compiler) compileGt(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileGt(o *wazeroir.UnionOperation) error { x2 := c.locationStack.pop() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -3143,7 +3162,7 @@ func (c *amd64Compiler) compileGt(o wazeroir.UnionOperation) error { } // compileLe implements compiler.compileLe for the amd64 architecture. -func (c *amd64Compiler) compileLe(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileLe(o *wazeroir.UnionOperation) error { x2 := c.locationStack.pop() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -3191,7 +3210,7 @@ func (c *amd64Compiler) compileLe(o wazeroir.UnionOperation) error { } // compileGe implements compiler.compileGe for the amd64 architecture. -func (c *amd64Compiler) compileGe(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileGe(o *wazeroir.UnionOperation) error { x2 := c.locationStack.pop() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -3237,7 +3256,7 @@ func (c *amd64Compiler) compileGe(o wazeroir.UnionOperation) error { } // compileLoad implements compiler.compileLoad for the amd64 architecture. -func (c *amd64Compiler) compileLoad(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileLoad(o *wazeroir.UnionOperation) error { var ( isIntType bool movInst asm.Instruction @@ -3302,7 +3321,7 @@ func (c *amd64Compiler) compileLoad(o wazeroir.UnionOperation) error { } // compileLoad8 implements compiler.compileLoad8 for the amd64 architecture. -func (c *amd64Compiler) compileLoad8(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileLoad8(o *wazeroir.UnionOperation) error { const targetSizeInBytes = 1 offset := uint32(o.U2) reg, err := c.compileMemoryAccessCeilSetup(offset, targetSizeInBytes) @@ -3340,7 +3359,7 @@ func (c *amd64Compiler) compileLoad8(o wazeroir.UnionOperation) error { } // compileLoad16 implements compiler.compileLoad16 for the amd64 architecture. -func (c *amd64Compiler) compileLoad16(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileLoad16(o *wazeroir.UnionOperation) error { const targetSizeInBytes = 16 / 8 offset := uint32(o.U2) reg, err := c.compileMemoryAccessCeilSetup(offset, targetSizeInBytes) @@ -3378,7 +3397,7 @@ func (c *amd64Compiler) compileLoad16(o wazeroir.UnionOperation) error { } // compileLoad32 implements compiler.compileLoad32 for the amd64 architecture. -func (c *amd64Compiler) compileLoad32(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileLoad32(o *wazeroir.UnionOperation) error { const targetSizeInBytes = 32 / 8 offset := uint32(o.U2) reg, err := c.compileMemoryAccessCeilSetup(offset, targetSizeInBytes) @@ -3453,7 +3472,7 @@ func (c *amd64Compiler) compileMemoryAccessCeilSetup(offsetArg uint32, targetSiz } // compileStore implements compiler.compileStore for the amd64 architecture. -func (c *amd64Compiler) compileStore(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileStore(o *wazeroir.UnionOperation) error { var movInst asm.Instruction var targetSizeInByte int64 unsignedType := wazeroir.UnsignedType(o.B1) @@ -3470,17 +3489,17 @@ func (c *amd64Compiler) compileStore(o wazeroir.UnionOperation) error { } // compileStore8 implements compiler.compileStore8 for the amd64 architecture. -func (c *amd64Compiler) compileStore8(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileStore8(o *wazeroir.UnionOperation) error { return c.compileStoreImpl(uint32(o.U2), amd64.MOVB, 1) } // compileStore32 implements compiler.compileStore32 for the amd64 architecture. -func (c *amd64Compiler) compileStore16(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileStore16(o *wazeroir.UnionOperation) error { return c.compileStoreImpl(uint32(o.U2), amd64.MOVW, 16/8) } // compileStore32 implements compiler.compileStore32 for the amd64 architecture. -func (c *amd64Compiler) compileStore32(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileStore32(o *wazeroir.UnionOperation) error { return c.compileStoreImpl(uint32(o.U2), amd64.MOVL, 32/8) } @@ -3544,8 +3563,9 @@ func (c *amd64Compiler) compileMemorySize() error { } // compileMemoryInit implements compiler.compileMemoryInit for the amd64 architecture. -func (c *amd64Compiler) compileMemoryInit(o wazeroir.OperationMemoryInit) error { - return c.compileInitImpl(false, o.DataIndex, 0) +func (c *amd64Compiler) compileMemoryInit(o *wazeroir.UnionOperation) error { + dataIndex := uint32(o.U1) + return c.compileInitImpl(false, dataIndex, 0) } // compileInitImpl implements compileTableInit and compileMemoryInit. @@ -3681,7 +3701,7 @@ func (c *amd64Compiler) compileInitImpl(isTable bool, index, tableIndex uint32) } // compileDataDrop implements compiler.compileDataDrop for the amd64 architecture. -func (c *amd64Compiler) compileDataDrop(o wazeroir.OperationDataDrop) error { +func (c *amd64Compiler) compileDataDrop(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -3691,7 +3711,8 @@ func (c *amd64Compiler) compileDataDrop(o wazeroir.OperationDataDrop) error { return err } - c.compileLoadDataInstanceAddress(o.DataIndex, tmp) + dataIndex := uint32(o.U1) + c.compileLoadDataInstanceAddress(dataIndex, tmp) // Clears the content of DataInstance[o.DataIndex] (== []byte type). c.assembler.CompileConstToMemory(amd64.MOVQ, 0, tmp, 0) @@ -4022,12 +4043,14 @@ func (c *amd64Compiler) compileMemoryFill() error { } // compileTableInit implements compiler.compileTableInit for the amd64 architecture. -func (c *amd64Compiler) compileTableInit(o wazeroir.OperationTableInit) error { - return c.compileInitImpl(true, o.ElemIndex, o.TableIndex) +func (c *amd64Compiler) compileTableInit(o *wazeroir.UnionOperation) error { + elemIndex := uint32(o.U1) + tableIndex := uint32(o.U2) + return c.compileInitImpl(true, elemIndex, tableIndex) } // compileTableCopyLoopImpl is used for directly copying after bounds/direction check. -func (c *amd64Compiler) compileTableCopyLoopImpl(o wazeroir.OperationTableCopy, destinationOffset, sourceOffset, copySize *runtimeValueLocation, tmp asm.Register, backwards bool) { +func (c *amd64Compiler) compileTableCopyLoopImpl(srcTableIndex, dstTableIndex uint32, destinationOffset, sourceOffset, copySize *runtimeValueLocation, tmp asm.Register, backwards bool) { // Point on first byte to be copied. if !backwards { c.assembler.CompileRegisterToRegister(amd64.SUBQ, copySize.register, sourceOffset.register) @@ -4039,11 +4062,11 @@ func (c *amd64Compiler) compileTableCopyLoopImpl(o wazeroir.OperationTableCopy, c.assembler.CompileConstToRegister(amd64.SHLQ, pointerSizeLog2, destinationOffset.register) // destinationOffset += table buffer's absolute address. c.assembler.CompileMemoryToRegister(amd64.MOVQ, amd64ReservedRegisterForCallEngine, callEngineModuleContextTablesElement0AddressOffset, tmp) - c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, int64(o.DstTableIndex*8), tmp) + c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, int64(dstTableIndex*8), tmp) c.assembler.CompileMemoryToRegister(amd64.ADDQ, tmp, tableInstanceTableOffset, destinationOffset.register) // sourceOffset += table buffer's absolute address. c.assembler.CompileMemoryToRegister(amd64.MOVQ, amd64ReservedRegisterForCallEngine, callEngineModuleContextTablesElement0AddressOffset, tmp) - c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, int64(o.SrcTableIndex*8), tmp) + c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, int64(srcTableIndex*8), tmp) c.assembler.CompileMemoryToRegister(amd64.ADDQ, tmp, tableInstanceTableOffset, sourceOffset.register) c.compileCopyLoopImpl(destinationOffset, sourceOffset, copySize, backwards, 8) @@ -4053,7 +4076,7 @@ func (c *amd64Compiler) compileTableCopyLoopImpl(o wazeroir.OperationTableCopy, // // It uses efficient `REP MOVSB` instructions for optimized copying. It uses backward copying for // overlapped segments. -func (c *amd64Compiler) compileTableCopy(o wazeroir.OperationTableCopy) error { +func (c *amd64Compiler) compileTableCopy(o *wazeroir.UnionOperation) error { copySize := c.locationStack.pop() if err := c.compileEnsureOnRegister(copySize); err != nil { return err @@ -4079,9 +4102,12 @@ func (c *amd64Compiler) compileTableCopy(o wazeroir.OperationTableCopy) error { // destinationOffset += size. c.assembler.CompileRegisterToRegister(amd64.ADDQ, copySize.register, destinationOffset.register) + srcTableIndex := uint32(o.U1) + dstTableIndex := uint32(o.U2) + // Check source bounds and if exceeds the length, exit with out of bounds error. c.assembler.CompileMemoryToRegister(amd64.MOVQ, amd64ReservedRegisterForCallEngine, callEngineModuleContextTablesElement0AddressOffset, tmp) - c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, int64(o.SrcTableIndex*8), tmp) + c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, int64(srcTableIndex*8), tmp) c.assembler.CompileMemoryToRegister(amd64.CMPQ, tmp, tableInstanceTableLenOffset, sourceOffset.register) sourceBoundOKJump := c.assembler.CompileJump(amd64.JCC) c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) @@ -4089,7 +4115,7 @@ func (c *amd64Compiler) compileTableCopy(o wazeroir.OperationTableCopy) error { // Check destination bounds and if exceeds the length, exit with out of bounds error. c.assembler.CompileMemoryToRegister(amd64.MOVQ, amd64ReservedRegisterForCallEngine, callEngineModuleContextTablesElement0AddressOffset, tmp) - c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, int64(o.DstTableIndex*8), tmp) + c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, int64(dstTableIndex*8), tmp) c.assembler.CompileMemoryToRegister(amd64.CMPQ, tmp, tableInstanceTableLenOffset, destinationOffset.register) destinationBoundOKJump := c.assembler.CompileJump(amd64.JCC) c.compileExitFromNativeCode(nativeCallStatusCodeInvalidTableAccess) @@ -4110,13 +4136,13 @@ func (c *amd64Compiler) compileTableCopy(o wazeroir.OperationTableCopy) error { sourceBoundLowerThanDestJump := c.assembler.CompileJump(amd64.JLS) // Copy backwards. - c.compileTableCopyLoopImpl(o, destinationOffset, sourceOffset, copySize, tmp, true) + c.compileTableCopyLoopImpl(srcTableIndex, dstTableIndex, destinationOffset, sourceOffset, copySize, tmp, true) endJump := c.assembler.CompileJump(amd64.JMP) // Copy forwards. c.assembler.SetJumpTargetOnNext(destLowerThanSourceJump) c.assembler.SetJumpTargetOnNext(sourceBoundLowerThanDestJump) - c.compileTableCopyLoopImpl(o, destinationOffset, sourceOffset, copySize, tmp, false) + c.compileTableCopyLoopImpl(srcTableIndex, dstTableIndex, destinationOffset, sourceOffset, copySize, tmp, false) c.locationStack.markRegisterUnused(copySize.register, sourceOffset.register, destinationOffset.register, tmp) @@ -4126,7 +4152,7 @@ func (c *amd64Compiler) compileTableCopy(o wazeroir.OperationTableCopy) error { } // compileElemDrop implements compiler.compileElemDrop for the amd64 architecture. -func (c *amd64Compiler) compileElemDrop(o wazeroir.OperationElemDrop) error { +func (c *amd64Compiler) compileElemDrop(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -4136,7 +4162,8 @@ func (c *amd64Compiler) compileElemDrop(o wazeroir.OperationElemDrop) error { return err } - c.compileLoadElemInstanceAddress(o.ElemIndex, tmp) + elemIndex := uint32(o.U1) + c.compileLoadElemInstanceAddress(elemIndex, tmp) // Clears the content of ElementInstances[o.ElemIndex].References (== []uintptr{} type). c.assembler.CompileConstToMemory(amd64.MOVQ, 0, tmp, 0) @@ -4159,7 +4186,7 @@ func (c *amd64Compiler) compileLoadElemInstanceAddress(elemIndex uint32, dst asm } // compileTableGet implements compiler.compileTableGet for the amd64 architecture. -func (c *amd64Compiler) compileTableGet(o wazeroir.OperationTableGet) error { +func (c *amd64Compiler) compileTableGet(o *wazeroir.UnionOperation) error { ref, err := c.allocateRegister(registerTypeGeneralPurpose) if err != nil { return err @@ -4180,7 +4207,8 @@ func (c *amd64Compiler) compileTableGet(o wazeroir.OperationTableGet) error { // ref = [ref + TableIndex*8] // = [&tables[0] + TableIndex*sizeOf(*tableInstance)] // = [&tables[TableIndex]] = tables[TableIndex]. - c.assembler.CompileMemoryToRegister(amd64.MOVQ, ref, int64(o.TableIndex)*8, ref) + tableIndex := int64(o.U1) + c.assembler.CompileMemoryToRegister(amd64.MOVQ, ref, tableIndex*8, ref) // Out of bounds check. c.assembler.CompileMemoryToRegister(amd64.CMPQ, ref, tableInstanceTableLenOffset, offset.register) @@ -4205,7 +4233,7 @@ func (c *amd64Compiler) compileTableGet(o wazeroir.OperationTableGet) error { } // compileTableSet implements compiler.compileTableSet for the amd64 architecture. -func (c *amd64Compiler) compileTableSet(o wazeroir.OperationTableSet) error { +func (c *amd64Compiler) compileTableSet(o *wazeroir.UnionOperation) error { ref := c.locationStack.pop() if err := c.compileEnsureOnRegister(ref); err != nil { return err @@ -4229,7 +4257,8 @@ func (c *amd64Compiler) compileTableSet(o wazeroir.OperationTableSet) error { // ref = [ref + TableIndex*8] // = [&tables[0] + TableIndex*sizeOf(*tableInstance)] // = [&tables[TableIndex]] = tables[TableIndex]. - c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, int64(o.TableIndex)*8, tmp) + tableIndex := int64(o.U1) + c.assembler.CompileMemoryToRegister(amd64.MOVQ, tmp, tableIndex*8, tmp) // Out of bounds check. c.assembler.CompileMemoryToRegister(amd64.CMPQ, tmp, tableInstanceTableLenOffset, offset.register) @@ -4253,13 +4282,14 @@ func (c *amd64Compiler) compileTableSet(o wazeroir.OperationTableSet) error { } // compileTableGrow implements compiler.compileTableGrow for the amd64 architecture. -func (c *amd64Compiler) compileTableGrow(o wazeroir.OperationTableGrow) error { +func (c *amd64Compiler) compileTableGrow(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } // Pushes the table index. - if err := c.compileConstI32(wazeroir.NewOperationConstI32(o.TableIndex)); err != nil { + tableIndex := uint32(o.U1) + if err := c.compileConstI32Impl(tableIndex); err != nil { return err } @@ -4285,7 +4315,7 @@ func (c *amd64Compiler) compileTableGrow(o wazeroir.OperationTableGrow) error { } // compileTableSize implements compiler.compileTableSize for the amd64 architecture. -func (c *amd64Compiler) compileTableSize(o wazeroir.OperationTableSize) error { +func (c *amd64Compiler) compileTableSize(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -4303,7 +4333,8 @@ func (c *amd64Compiler) compileTableSize(o wazeroir.OperationTableSize) error { // result = [result + TableIndex*8] // = [&tables[0] + TableIndex*sizeOf(*tableInstance)] // = [&tables[TableIndex]] = tables[TableIndex]. - c.assembler.CompileMemoryToRegister(amd64.MOVQ, result, int64(o.TableIndex)*8, result) + tableIndex := int64(o.U1) + c.assembler.CompileMemoryToRegister(amd64.MOVQ, result, tableIndex*8, result) // result = [result + tableInstanceTableLenOffset] // = [tables[TableIndex] + tableInstanceTableLenOffset] @@ -4315,12 +4346,13 @@ func (c *amd64Compiler) compileTableSize(o wazeroir.OperationTableSize) error { } // compileTableFill implements compiler.compileTableFill for the amd64 architecture. -func (c *amd64Compiler) compileTableFill(o wazeroir.OperationTableFill) error { - return c.compileFillImpl(true, o.TableIndex) +func (c *amd64Compiler) compileTableFill(o *wazeroir.UnionOperation) error { + tableIndex := uint32(o.U1) + return c.compileFillImpl(true, tableIndex) } // compileRefFunc implements compiler.compileRefFunc for the amd64 architecture. -func (c *amd64Compiler) compileRefFunc(o wazeroir.OperationRefFunc) error { +func (c *amd64Compiler) compileRefFunc(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -4330,7 +4362,8 @@ func (c *amd64Compiler) compileRefFunc(o wazeroir.OperationRefFunc) error { return err } - c.assembler.CompileConstToRegister(amd64.MOVQ, int64(o.FunctionIndex)*functionSize, ref) + functionIndex := int64(o.U1) + c.assembler.CompileConstToRegister(amd64.MOVQ, functionIndex*functionSize, ref) // ref = [amd64ReservedRegisterForCallEngine + callEngineModuleContextFunctionsElement0AddressOffset + int64(o.FunctionIndex)*functionSize] // = &moduleEngine.functions[index] @@ -4344,7 +4377,11 @@ func (c *amd64Compiler) compileRefFunc(o wazeroir.OperationRefFunc) error { } // compileConstI32 implements compiler.compileConstI32 for the amd64 architecture. -func (c *amd64Compiler) compileConstI32(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileConstI32(o *wazeroir.UnionOperation) error { + return c.compileConstI32Impl(uint32(o.U1)) +} + +func (c *amd64Compiler) compileConstI32Impl(v uint32) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -4354,12 +4391,12 @@ func (c *amd64Compiler) compileConstI32(o wazeroir.UnionOperation) error { return err } c.pushRuntimeValueLocationOnRegister(reg, runtimeValueTypeI32) - c.assembler.CompileConstToRegister(amd64.MOVL, int64(o.U1), reg) + c.assembler.CompileConstToRegister(amd64.MOVL, int64(v), reg) return nil } // compileConstI64 implements compiler.compileConstI64 for the amd64 architecture. -func (c *amd64Compiler) compileConstI64(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileConstI64(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -4375,7 +4412,7 @@ func (c *amd64Compiler) compileConstI64(o wazeroir.UnionOperation) error { } // compileConstF32 implements compiler.compileConstF32 for the amd64 architecture. -func (c *amd64Compiler) compileConstF32(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileConstF32(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -4399,7 +4436,7 @@ func (c *amd64Compiler) compileConstF32(o wazeroir.UnionOperation) error { } // compileConstF64 implements compiler.compileConstF64 for the amd64 architecture. -func (c *amd64Compiler) compileConstF64(o wazeroir.UnionOperation) error { +func (c *amd64Compiler) compileConstF64(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } diff --git a/internal/engine/compiler/impl_amd64_test.go b/internal/engine/compiler/impl_amd64_test.go index fe6e5e1b..124a46a3 100644 --- a/internal/engine/compiler/impl_amd64_test.go +++ b/internal/engine/compiler/impl_amd64_test.go @@ -22,7 +22,7 @@ func TestAmd64Compiler_indirectCallWithTargetOnCallingConvReg(t *testing.T) { env.addTable(&wasm.TableInstance{References: table}) // Ensure that the module instance has the type information for targetOperation.TypeIndex, // and the typeID matches the table[targetOffset]'s type ID. - operation := wazeroir.NewOperationCallIndirect(0, 0) + operation := operationPtr(wazeroir.NewOperationCallIndirect(0, 0)) env.module().TypeIDs = []wasm.FunctionTypeID{0} env.module().Engine = &moduleEngine{functions: []function{}} @@ -165,11 +165,11 @@ func TestAmd64Compiler_compile_Mul_Div_Rem(t *testing.T) { switch kind { case wazeroir.OperationKindDiv: - err = compiler.compileDiv(wazeroir.NewOperationDiv(wazeroir.SignedTypeUint32)) + err = compiler.compileDiv(operationPtr(wazeroir.NewOperationDiv(wazeroir.SignedTypeUint32))) case wazeroir.OperationKindMul: - err = compiler.compileMul(wazeroir.NewOperationMul(wazeroir.UnsignedTypeI32)) + err = compiler.compileMul(operationPtr(wazeroir.NewOperationMul(wazeroir.UnsignedTypeI32))) case wazeroir.OperationKindRem: - err = compiler.compileRem(wazeroir.NewOperationRem(wazeroir.SignedUint32)) + err = compiler.compileRem(operationPtr(wazeroir.NewOperationRem(wazeroir.SignedUint32))) } require.NoError(t, err) @@ -182,7 +182,7 @@ func TestAmd64Compiler_compile_Mul_Div_Rem(t *testing.T) { // We add the value previously on the DX with the multiplication result // in order to ensure that not saving existing DX value would cause // the failure in a subsequent instruction. - err = compiler.compileAdd(wazeroir.NewOperationAdd(wazeroir.UnsignedTypeI32)) + err = compiler.compileAdd(operationPtr(wazeroir.NewOperationAdd(wazeroir.UnsignedTypeI32))) require.NoError(t, err) require.NoError(t, compiler.compileReturnFunction()) @@ -291,11 +291,11 @@ func TestAmd64Compiler_compile_Mul_Div_Rem(t *testing.T) { switch kind { case wazeroir.OperationKindDiv: - err = compiler.compileDiv(wazeroir.NewOperationDiv(wazeroir.SignedTypeInt64)) + err = compiler.compileDiv(operationPtr(wazeroir.NewOperationDiv(wazeroir.SignedTypeInt64))) case wazeroir.OperationKindMul: - err = compiler.compileMul(wazeroir.NewOperationMul(wazeroir.UnsignedTypeI64)) + err = compiler.compileMul(operationPtr(wazeroir.NewOperationMul(wazeroir.UnsignedTypeI64))) case wazeroir.OperationKindRem: - err = compiler.compileRem(wazeroir.NewOperationRem(wazeroir.SignedUint64)) + err = compiler.compileRem(operationPtr(wazeroir.NewOperationRem(wazeroir.SignedUint64))) } require.NoError(t, err) @@ -308,7 +308,7 @@ func TestAmd64Compiler_compile_Mul_Div_Rem(t *testing.T) { // We add the value previously on the DX with the multiplication result // in order to ensure that not saving existing DX value would cause // the failure in a subsequent instruction. - err = compiler.compileAdd(wazeroir.NewOperationAdd(wazeroir.UnsignedTypeI64)) + err = compiler.compileAdd(operationPtr(wazeroir.NewOperationAdd(wazeroir.UnsignedTypeI64))) require.NoError(t, err) require.NoError(t, compiler.compileReturnFunction()) @@ -378,7 +378,7 @@ func TestAmd64Compiler_readInstructionAddress(t *testing.T) { // right after RET. Therefore, the jmp instruction above // must target here. const expectedReturnValue uint32 = 10000 - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(expectedReturnValue)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(expectedReturnValue))) require.NoError(t, err) err = compiler.compileReturnFunction() @@ -498,10 +498,10 @@ func TestAmd64Compiler_ensureClz_ABM(t *testing.T) { compiler := env.requireNewCompiler(t, newCompiler, nil) - err := compiler.compileConstI32(wazeroir.NewOperationConstI32(10)) + err := compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(10))) require.NoError(t, err) - err = compiler.compileClz(wazeroir.NewOperationClz(wazeroir.UnsignedInt64)) + err = compiler.compileClz(operationPtr(wazeroir.NewOperationClz(wazeroir.UnsignedInt64))) require.NoError(t, err) compiler.compileNOP() // pad for jump target (when no ABM) @@ -553,10 +553,10 @@ func TestAmd64Compiler_ensureCtz_ABM(t *testing.T) { compiler := env.requireNewCompiler(t, newCompiler, nil) - err := compiler.compileConstI32(wazeroir.NewOperationConstI32(10)) + err := compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(10))) require.NoError(t, err) - err = compiler.compileCtz(wazeroir.NewOperationCtz(wazeroir.UnsignedInt64)) + err = compiler.compileCtz(operationPtr(wazeroir.NewOperationCtz(wazeroir.UnsignedInt64))) require.NoError(t, err) compiler.compileNOP() // pad for jump target (when no ABM) diff --git a/internal/engine/compiler/impl_arm64.go b/internal/engine/compiler/impl_arm64.go index ce5f31cf..df2ea508 100644 --- a/internal/engine/compiler/impl_arm64.go +++ b/internal/engine/compiler/impl_arm64.go @@ -141,10 +141,10 @@ type arm64LabelInfo struct { initialStack runtimeValueLocationStack } -func (c *arm64Compiler) label(labelID wazeroir.LabelID) *arm64LabelInfo { - kind := labelID.Kind() +func (c *arm64Compiler) label(label wazeroir.Label) *arm64LabelInfo { + kind := label.Kind() frames := c.labels[kind] - frameID := labelID.FrameID() + frameID := label.FrameID() // If the frameID is not allocated yet, expand the slice by twice of the diff, // so that we could reduce the allocation in the subsequent compilation. if diff := frameID - len(frames) + 1; diff > 0 { @@ -466,8 +466,8 @@ func (c *arm64Compiler) compileBuiltinFunctionCheckExitCode() error { } // compileLabel implements compiler.compileLabel for the arm64 architecture. -func (c *arm64Compiler) compileLabel(o wazeroir.OperationLabel) (skipThisLabel bool) { - labelKey := o.Label.ID() +func (c *arm64Compiler) compileLabel(o *wazeroir.UnionOperation) (skipThisLabel bool) { + labelKey := wazeroir.Label(o.U1) labelInfo := c.label(labelKey) // If initialStack is not set, that means this label has never been reached. @@ -496,7 +496,7 @@ func (c *arm64Compiler) compileUnreachable() error { } // compileSet implements compiler.compileSet for the arm64 architecture. -func (c *arm64Compiler) compileSet(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileSet(o *wazeroir.UnionOperation) error { depth := int(o.U1) isTargetVector := o.B3 @@ -527,7 +527,7 @@ func (c *arm64Compiler) compileSet(o wazeroir.UnionOperation) error { } // compileGlobalGet implements compiler.compileGlobalGet for the arm64 architecture. -func (c *arm64Compiler) compileGlobalGet(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileGlobalGet(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -594,7 +594,7 @@ func (c *arm64Compiler) compileGlobalGet(o wazeroir.UnionOperation) error { } // compileGlobalSet implements compiler.compileGlobalSet for the arm64 architecture. -func (c *arm64Compiler) compileGlobalSet(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileGlobalSet(o *wazeroir.UnionOperation) error { index := uint32(o.U1) wasmValueType := c.ir.Globals[index].ValType @@ -679,15 +679,15 @@ func (c *arm64Compiler) compileReadGlobalAddress(globalIndex uint32) (destinatio } // compileBr implements compiler.compileBr for the arm64 architecture. -func (c *arm64Compiler) compileBr(o wazeroir.OperationBr) error { +func (c *arm64Compiler) compileBr(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } - return c.compileBranchInto(o.Target) + return c.compileBranchInto(wazeroir.Label(o.U1)) } // compileBrIf implements compiler.compileBrIf for the arm64 architecture. -func (c *arm64Compiler) compileBrIf(o wazeroir.OperationBrIf) error { +func (c *arm64Compiler) compileBrIf(o *wazeroir.UnionOperation) error { cond := c.locationStack.pop() var conditionalBR asm.Node @@ -748,10 +748,12 @@ func (c *arm64Compiler) compileBrIf(o wazeroir.OperationBrIf) error { // and we have to avoid affecting the code generation for Then branch afterwards. saved := c.locationStack c.setLocationStack(saved.clone()) - if err := compileDropRange(c, o.Else.ToDrop); err != nil { + elseToDrop := o.Rs[1] + elseTarget := wazeroir.Label(o.Us[1]) + if err := compileDropRange(c, elseToDrop); err != nil { return err } - if err := c.compileBranchInto(o.Else.Target); err != nil { + if err := c.compileBranchInto(elseTarget); err != nil { return err } @@ -760,18 +762,19 @@ func (c *arm64Compiler) compileBrIf(o wazeroir.OperationBrIf) error { c.setLocationStack(saved) // We branch into here from the original conditional BR (conditionalBR). c.assembler.SetJumpTargetOnNext(conditionalBR) - if err := compileDropRange(c, o.Then.ToDrop); err != nil { + thenToDrop := o.Rs[0] + thenTarget := wazeroir.Label(o.Us[0]) + if err := compileDropRange(c, thenToDrop); err != nil { return err } - return c.compileBranchInto(o.Then.Target) + return c.compileBranchInto(thenTarget) } func (c *arm64Compiler) compileBranchInto(target wazeroir.Label) error { if target.IsReturnTarget() { return c.compileReturnFunction() } else { - labelID := target.ID() - if c.ir.LabelCallers[labelID] > 1 { + if c.ir.LabelCallers[target] > 1 { // We can only re-use register state if when there's a single call-site. // Release existing values on registers to the stack if there's multiple ones to have // the consistent value location state at the beginning of label. @@ -782,20 +785,20 @@ func (c *arm64Compiler) compileBranchInto(target wazeroir.Label) error { // Set the initial stack of the target label, so we can start compiling the label // with the appropriate value locations. Note we clone the stack here as we maybe // manipulate the stack before compiler reaches the label. - targetLabel := c.label(labelID) + targetLabel := c.label(target) if !targetLabel.initialStack.initialized() { targetLabel.initialStack = c.locationStack.clone() } br := c.assembler.CompileJump(arm64.B) - c.assignBranchTarget(labelID, br) + c.assignBranchTarget(target, br) return nil } } // assignBranchTarget assigns the given label's initial instruction to the destination of br. -func (c *arm64Compiler) assignBranchTarget(labelID wazeroir.LabelID, br asm.Node) { - target := c.label(labelID) +func (c *arm64Compiler) assignBranchTarget(label wazeroir.Label, br asm.Node) { + target := c.label(label) targetInst := target.initialInstruction if targetInst == nil { @@ -808,17 +811,21 @@ func (c *arm64Compiler) assignBranchTarget(labelID wazeroir.LabelID, br asm.Node } // compileBrTable implements compiler.compileBrTable for the arm64 architecture. -func (c *arm64Compiler) compileBrTable(o wazeroir.OperationBrTable) error { +func (c *arm64Compiler) compileBrTable(o *wazeroir.UnionOperation) error { // If the operation only consists of the default target, we branch into it and return early. - if len(o.Targets) == 0 { + if len(o.Us) == 1 { loc := c.locationStack.pop() if loc.onRegister() { c.markRegisterUnused(loc.register) } - if err := compileDropRange(c, o.Default.ToDrop); err != nil { + var r *wazeroir.InclusiveRange + if len(o.Rs) > 0 { + r = o.Rs[0] + } + if err := compileDropRange(c, r); err != nil { return err } - return c.compileBranchInto(o.Default.Target) + return c.compileBranchInto(wazeroir.Label(o.Us[0])) } index := c.locationStack.pop() @@ -845,7 +852,7 @@ func (c *arm64Compiler) compileBrTable(o wazeroir.OperationBrTable) error { // Load the branch table's length. // "tmpReg = len(o.Targets)" - c.assembler.CompileConstToRegister(arm64.MOVW, int64(len(o.Targets)), tmpReg) + c.assembler.CompileConstToRegister(arm64.MOVW, int64(len(o.Us)-1), tmpReg) // Compare the length with offset. c.assembler.CompileTwoRegistersToNone(arm64.CMPW, tmpReg, index.register) // If the value exceeds the length, we will branch into the default target (corresponding to len(o.Targets) index). @@ -878,7 +885,7 @@ func (c *arm64Compiler) compileBrTable(o wazeroir.OperationBrTable) error { // the above example's offsetData would be [0x0, 0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x8, 0x0, 0x0, 0x0]. // // Note: this is similar to how GCC implements Switch statements in C. - offsetData := asm.NewStaticConst(make([]byte, 4*(len(o.Targets)+1))) + offsetData := asm.NewStaticConst(make([]byte, 4*(len(o.Us)))) // "tmpReg = &offsetData[0]" c.assembler.CompileStaticConstToRegister(arm64.ADR, offsetData, tmpReg) @@ -903,7 +910,7 @@ func (c *arm64Compiler) compileBrTable(o wazeroir.OperationBrTable) error { c.markRegisterUnused(index.register) // [Emit the code for each targets and default branch] - labelInitialInstructions := make([]asm.Node, len(o.Targets)+1) + labelInitialInstructions := make([]asm.Node, len(o.Us)) saved := c.locationStack for i := range labelInitialInstructions { // Emit the initial instruction of each target where @@ -912,23 +919,30 @@ func (c *arm64Compiler) compileBrTable(o wazeroir.OperationBrTable) error { labelInitialInstructions[i] = init var locationStack runtimeValueLocationStack - var target *wazeroir.BranchTargetDrop - if i < len(o.Targets) { - target = o.Targets[i] + var targetToDrop *wazeroir.InclusiveRange + var targetLabel wazeroir.Label + if i < len(o.Us)-1 { + targetLabel = wazeroir.Label(o.Us[i+1]) + if len(o.Rs) > i+1 { + targetToDrop = o.Rs[i+1] + } // Clone the location stack so the branch-specific code doesn't // affect others. locationStack = saved.clone() } else { - target = o.Default + targetLabel = wazeroir.Label(o.Us[0]) + if len(o.Rs) > 0 { + targetToDrop = o.Rs[0] + } // If this is the default branch, we use the original one // as this is the last code in this block. locationStack = saved } c.setLocationStack(locationStack) - if err := compileDropRange(c, target.ToDrop); err != nil { + if err := compileDropRange(c, targetToDrop); err != nil { return err } - if err := c.compileBranchInto(target.Target); err != nil { + if err := c.compileBranchInto(targetLabel); err != nil { return err } } @@ -938,7 +952,7 @@ func (c *arm64Compiler) compileBrTable(o wazeroir.OperationBrTable) error { } // compileCall implements compiler.compileCall for the arm64 architecture. -func (c *arm64Compiler) compileCall(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileCall(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -1083,7 +1097,7 @@ func (c *arm64Compiler) compileCallImpl(targetFunctionAddressRegister asm.Regist } // compileCallIndirect implements compiler.compileCallIndirect for the arm64 architecture. -func (c *arm64Compiler) compileCallIndirect(o wazeroir.UnionOperation) (err error) { +func (c *arm64Compiler) compileCallIndirect(o *wazeroir.UnionOperation) (err error) { offset := c.locationStack.pop() if err = c.compileEnsureOnRegister(offset); err != nil { return err @@ -1196,8 +1210,8 @@ func (c *arm64Compiler) compileCallIndirect(o wazeroir.UnionOperation) (err erro } // compileDrop implements compiler.compileDrop for the arm64 architecture. -func (c *arm64Compiler) compileDrop(o wazeroir.OperationDrop) error { - return compileDropRange(c, o.Depth) +func (c *arm64Compiler) compileDrop(o *wazeroir.UnionOperation) error { + return compileDropRange(c, o.Rs[0]) } func (c *arm64Compiler) compileSelectV128Impl(selectorRegister asm.Register) error { @@ -1229,7 +1243,7 @@ func (c *arm64Compiler) compileSelectV128Impl(selectorRegister asm.Register) err } // compileSelect implements compiler.compileSelect for the arm64 architecture. -func (c *arm64Compiler) compileSelect(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileSelect(o *wazeroir.UnionOperation) error { cv, err := c.popValueOnRegister() if err != nil { return err @@ -1306,7 +1320,7 @@ func (c *arm64Compiler) compileSelect(o wazeroir.UnionOperation) error { } // compilePick implements compiler.compilePick for the arm64 architecture. -func (c *arm64Compiler) compilePick(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compilePick(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -1358,7 +1372,7 @@ func (c *arm64Compiler) compilePick(o wazeroir.UnionOperation) error { } // compileAdd implements compiler.compileAdd for the arm64 architecture. -func (c *arm64Compiler) compileAdd(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileAdd(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -1393,7 +1407,7 @@ func (c *arm64Compiler) compileAdd(o wazeroir.UnionOperation) error { } // compileSub implements compiler.compileSub for the arm64 architecture. -func (c *arm64Compiler) compileSub(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileSub(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -1436,7 +1450,7 @@ func (c *arm64Compiler) compileSub(o wazeroir.UnionOperation) error { } // compileMul implements compiler.compileMul for the arm64 architecture. -func (c *arm64Compiler) compileMul(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileMul(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -1473,7 +1487,7 @@ func (c *arm64Compiler) compileMul(o wazeroir.UnionOperation) error { } // compileClz implements compiler.compileClz for the arm64 architecture. -func (c *arm64Compiler) compileClz(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileClz(o *wazeroir.UnionOperation) error { v, err := c.popValueOnRegister() if err != nil { return err @@ -1513,7 +1527,7 @@ func (c *arm64Compiler) compileClz(o wazeroir.UnionOperation) error { } // compileCtz implements compiler.compileCtz for the arm64 architecture. -func (c *arm64Compiler) compileCtz(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileCtz(o *wazeroir.UnionOperation) error { v, err := c.popValueOnRegister() if err != nil { return err @@ -1558,7 +1572,7 @@ func (c *arm64Compiler) compileCtz(o wazeroir.UnionOperation) error { } // compilePopcnt implements compiler.compilePopcnt for the arm64 architecture. -func (c *arm64Compiler) compilePopcnt(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compilePopcnt(o *wazeroir.UnionOperation) error { v, err := c.popValueOnRegister() if err != nil { return err @@ -1605,7 +1619,7 @@ func (c *arm64Compiler) compilePopcnt(o wazeroir.UnionOperation) error { } // compileDiv implements compiler.compileDiv for the arm64 architecture. -func (c *arm64Compiler) compileDiv(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileDiv(o *wazeroir.UnionOperation) error { dividend, divisor, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -1728,7 +1742,7 @@ func (c *arm64Compiler) compileIntegerDivPrecheck(is32Bit, isSigned bool, divide } // compileRem implements compiler.compileRem for the arm64 architecture. -func (c *arm64Compiler) compileRem(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileRem(o *wazeroir.UnionOperation) error { dividend, divisor, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -1802,7 +1816,7 @@ func (c *arm64Compiler) compileRem(o wazeroir.UnionOperation) error { } // compileAnd implements compiler.compileAnd for the arm64 architecture. -func (c *arm64Compiler) compileAnd(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileAnd(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -1837,7 +1851,7 @@ func (c *arm64Compiler) compileAnd(o wazeroir.UnionOperation) error { } // compileOr implements compiler.compileOr for the arm64 architecture. -func (c *arm64Compiler) compileOr(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileOr(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -1867,7 +1881,7 @@ func (c *arm64Compiler) compileOr(o wazeroir.UnionOperation) error { } // compileXor implements compiler.compileXor for the arm64 architecture. -func (c *arm64Compiler) compileXor(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileXor(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -1895,7 +1909,7 @@ func (c *arm64Compiler) compileXor(o wazeroir.UnionOperation) error { } // compileShl implements compiler.compileShl for the arm64 architecture. -func (c *arm64Compiler) compileShl(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileShl(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -1921,7 +1935,7 @@ func (c *arm64Compiler) compileShl(o wazeroir.UnionOperation) error { } // compileShr implements compiler.compileShr for the arm64 architecture. -func (c *arm64Compiler) compileShr(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileShr(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -1951,7 +1965,7 @@ func (c *arm64Compiler) compileShr(o wazeroir.UnionOperation) error { } // compileRotl implements compiler.compileRotl for the arm64 architecture. -func (c *arm64Compiler) compileRotl(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileRotl(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -1983,7 +1997,7 @@ func (c *arm64Compiler) compileRotl(o wazeroir.UnionOperation) error { } // compileRotr implements compiler.compileRotr for the arm64 architecture. -func (c *arm64Compiler) compileRotr(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileRotr(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -2009,7 +2023,7 @@ func (c *arm64Compiler) compileRotr(o wazeroir.UnionOperation) error { } // compileAbs implements compiler.compileAbs for the arm64 architecture. -func (c *arm64Compiler) compileAbs(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileAbs(o *wazeroir.UnionOperation) error { if wazeroir.Float(o.B1) == wazeroir.Float32 { return c.compileSimpleUnop(arm64.FABSS, runtimeValueTypeF32) } else { @@ -2018,7 +2032,7 @@ func (c *arm64Compiler) compileAbs(o wazeroir.UnionOperation) error { } // compileNeg implements compiler.compileNeg for the arm64 architecture. -func (c *arm64Compiler) compileNeg(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileNeg(o *wazeroir.UnionOperation) error { if wazeroir.Float(o.B1) == wazeroir.Float32 { return c.compileSimpleUnop(arm64.FNEGS, runtimeValueTypeF32) } else { @@ -2027,7 +2041,7 @@ func (c *arm64Compiler) compileNeg(o wazeroir.UnionOperation) error { } // compileCeil implements compiler.compileCeil for the arm64 architecture. -func (c *arm64Compiler) compileCeil(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileCeil(o *wazeroir.UnionOperation) error { if wazeroir.Float(o.B1) == wazeroir.Float32 { return c.compileSimpleUnop(arm64.FRINTPS, runtimeValueTypeF32) } else { @@ -2036,7 +2050,7 @@ func (c *arm64Compiler) compileCeil(o wazeroir.UnionOperation) error { } // compileFloor implements compiler.compileFloor for the arm64 architecture. -func (c *arm64Compiler) compileFloor(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileFloor(o *wazeroir.UnionOperation) error { if wazeroir.Float(o.B1) == wazeroir.Float32 { return c.compileSimpleUnop(arm64.FRINTMS, runtimeValueTypeF32) } else { @@ -2045,7 +2059,7 @@ func (c *arm64Compiler) compileFloor(o wazeroir.UnionOperation) error { } // compileTrunc implements compiler.compileTrunc for the arm64 architecture. -func (c *arm64Compiler) compileTrunc(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileTrunc(o *wazeroir.UnionOperation) error { if wazeroir.Float(o.B1) == wazeroir.Float32 { return c.compileSimpleUnop(arm64.FRINTZS, runtimeValueTypeF32) } else { @@ -2054,7 +2068,7 @@ func (c *arm64Compiler) compileTrunc(o wazeroir.UnionOperation) error { } // compileNearest implements compiler.compileNearest for the arm64 architecture. -func (c *arm64Compiler) compileNearest(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileNearest(o *wazeroir.UnionOperation) error { if wazeroir.Float(o.B1) == wazeroir.Float32 { return c.compileSimpleUnop(arm64.FRINTNS, runtimeValueTypeF32) } else { @@ -2063,7 +2077,7 @@ func (c *arm64Compiler) compileNearest(o wazeroir.UnionOperation) error { } // compileSqrt implements compiler.compileSqrt for the arm64 architecture. -func (c *arm64Compiler) compileSqrt(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileSqrt(o *wazeroir.UnionOperation) error { if wazeroir.Float(o.B1) == wazeroir.Float32 { return c.compileSimpleUnop(arm64.FSQRTS, runtimeValueTypeF32) } else { @@ -2072,7 +2086,7 @@ func (c *arm64Compiler) compileSqrt(o wazeroir.UnionOperation) error { } // compileMin implements compiler.compileMin for the arm64 architecture. -func (c *arm64Compiler) compileMin(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileMin(o *wazeroir.UnionOperation) error { if wazeroir.Float(o.B1) == wazeroir.Float32 { return c.compileSimpleFloatBinop(arm64.FMINS) } else { @@ -2081,7 +2095,7 @@ func (c *arm64Compiler) compileMin(o wazeroir.UnionOperation) error { } // compileMax implements compiler.compileMax for the arm64 architecture. -func (c *arm64Compiler) compileMax(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileMax(o *wazeroir.UnionOperation) error { if wazeroir.Float(o.B1) == wazeroir.Float32 { return c.compileSimpleFloatBinop(arm64.FMAXS) } else { @@ -2100,7 +2114,7 @@ func (c *arm64Compiler) compileSimpleFloatBinop(inst asm.Instruction) error { } // compileCopysign implements compiler.compileCopysign for the arm64 architecture. -func (c *arm64Compiler) compileCopysign(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileCopysign(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -2154,35 +2168,39 @@ func (c *arm64Compiler) compileI32WrapFromI64() error { } // compileITruncFromF implements compiler.compileITruncFromF for the arm64 architecture. -func (c *arm64Compiler) compileITruncFromF(o wazeroir.OperationITruncFromF) error { +func (c *arm64Compiler) compileITruncFromF(o *wazeroir.UnionOperation) error { // Clear the floating point status register (FPSR). c.assembler.CompileRegisterToRegister(arm64.MSR, arm64.RegRZR, arm64.RegFPSR) var vt runtimeValueType var convinst asm.Instruction - is32bitFloat := o.InputType == wazeroir.Float32 - if is32bitFloat && o.OutputType == wazeroir.SignedInt32 { + inputType := wazeroir.Float(o.B1) + outputType := wazeroir.SignedInt(o.B2) + nonTrapping := o.B3 + + is32bitFloat := inputType == wazeroir.Float32 + if is32bitFloat && outputType == wazeroir.SignedInt32 { convinst = arm64.FCVTZSSW vt = runtimeValueTypeI32 - } else if is32bitFloat && o.OutputType == wazeroir.SignedInt64 { + } else if is32bitFloat && outputType == wazeroir.SignedInt64 { convinst = arm64.FCVTZSS vt = runtimeValueTypeI64 - } else if !is32bitFloat && o.OutputType == wazeroir.SignedInt32 { + } else if !is32bitFloat && outputType == wazeroir.SignedInt32 { convinst = arm64.FCVTZSDW vt = runtimeValueTypeI32 - } else if !is32bitFloat && o.OutputType == wazeroir.SignedInt64 { + } else if !is32bitFloat && outputType == wazeroir.SignedInt64 { convinst = arm64.FCVTZSD vt = runtimeValueTypeI64 - } else if is32bitFloat && o.OutputType == wazeroir.SignedUint32 { + } else if is32bitFloat && outputType == wazeroir.SignedUint32 { convinst = arm64.FCVTZUSW vt = runtimeValueTypeI32 - } else if is32bitFloat && o.OutputType == wazeroir.SignedUint64 { + } else if is32bitFloat && outputType == wazeroir.SignedUint64 { convinst = arm64.FCVTZUS vt = runtimeValueTypeI64 - } else if !is32bitFloat && o.OutputType == wazeroir.SignedUint32 { + } else if !is32bitFloat && outputType == wazeroir.SignedUint32 { convinst = arm64.FCVTZUDW vt = runtimeValueTypeI32 - } else if !is32bitFloat && o.OutputType == wazeroir.SignedUint64 { + } else if !is32bitFloat && outputType == wazeroir.SignedUint64 { convinst = arm64.FCVTZUD vt = runtimeValueTypeI64 } @@ -2201,7 +2219,7 @@ func (c *arm64Compiler) compileITruncFromF(o wazeroir.OperationITruncFromF) erro c.assembler.CompileRegisterToRegister(convinst, sourceReg, destinationReg) c.pushRuntimeValueLocationOnRegister(destinationReg, vt) - if !o.NonTrapping { + if !nonTrapping { // Obtain the floating point status register value into the general purpose register, // so that we can check if the conversion resulted in undefined behavior. c.assembler.CompileRegisterToRegister(arm64.MRS, arm64.RegFPSR, arm64ReservedRegisterForTemporary) @@ -2237,28 +2255,31 @@ func (c *arm64Compiler) compileITruncFromF(o wazeroir.OperationITruncFromF) erro } // compileFConvertFromI implements compiler.compileFConvertFromI for the arm64 architecture. -func (c *arm64Compiler) compileFConvertFromI(o wazeroir.OperationFConvertFromI) error { +func (c *arm64Compiler) compileFConvertFromI(o *wazeroir.UnionOperation) error { var convinst asm.Instruction - if o.OutputType == wazeroir.Float32 && o.InputType == wazeroir.SignedInt32 { + inputType := wazeroir.SignedInt(o.B1) + outputType := wazeroir.Float(o.B2) + + if outputType == wazeroir.Float32 && inputType == wazeroir.SignedInt32 { convinst = arm64.SCVTFWS - } else if o.OutputType == wazeroir.Float32 && o.InputType == wazeroir.SignedInt64 { + } else if outputType == wazeroir.Float32 && inputType == wazeroir.SignedInt64 { convinst = arm64.SCVTFS - } else if o.OutputType == wazeroir.Float64 && o.InputType == wazeroir.SignedInt32 { + } else if outputType == wazeroir.Float64 && inputType == wazeroir.SignedInt32 { convinst = arm64.SCVTFWD - } else if o.OutputType == wazeroir.Float64 && o.InputType == wazeroir.SignedInt64 { + } else if outputType == wazeroir.Float64 && inputType == wazeroir.SignedInt64 { convinst = arm64.SCVTFD - } else if o.OutputType == wazeroir.Float32 && o.InputType == wazeroir.SignedUint32 { + } else if outputType == wazeroir.Float32 && inputType == wazeroir.SignedUint32 { convinst = arm64.UCVTFWS - } else if o.OutputType == wazeroir.Float32 && o.InputType == wazeroir.SignedUint64 { + } else if outputType == wazeroir.Float32 && inputType == wazeroir.SignedUint64 { convinst = arm64.UCVTFS - } else if o.OutputType == wazeroir.Float64 && o.InputType == wazeroir.SignedUint32 { + } else if outputType == wazeroir.Float64 && inputType == wazeroir.SignedUint32 { convinst = arm64.UCVTFWD - } else if o.OutputType == wazeroir.Float64 && o.InputType == wazeroir.SignedUint64 { + } else if outputType == wazeroir.Float64 && inputType == wazeroir.SignedUint64 { convinst = arm64.UCVTFD } var vt runtimeValueType - if o.OutputType == wazeroir.Float32 { + if outputType == wazeroir.Float32 { vt = runtimeValueTypeF32 } else { vt = runtimeValueTypeF64 @@ -2333,8 +2354,9 @@ func (c *arm64Compiler) compileSimpleConversion(inst asm.Instruction, destinatio } // compileExtend implements compiler.compileExtend for the arm64 architecture. -func (c *arm64Compiler) compileExtend(o wazeroir.OperationExtend) error { - if o.Signed { +func (c *arm64Compiler) compileExtend(o *wazeroir.UnionOperation) error { + signed := o.B1 != 0 + if signed { return c.compileSimpleUnop(arm64.SXTW, runtimeValueTypeI64) } else { return c.compileSimpleUnop(arm64.MOVW, runtimeValueTypeI64) @@ -2378,12 +2400,12 @@ func (c *arm64Compiler) compileSimpleUnop(inst asm.Instruction, resultRuntimeVal } // compileEq implements compiler.compileEq for the arm64 architecture. -func (c *arm64Compiler) compileEq(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileEq(o *wazeroir.UnionOperation) error { return c.emitEqOrNe(true, wazeroir.UnsignedType(o.B1)) } // compileNe implements compiler.compileNe for the arm64 architecture. -func (c *arm64Compiler) compileNe(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileNe(o *wazeroir.UnionOperation) error { return c.emitEqOrNe(false, wazeroir.UnsignedType(o.B1)) } @@ -2418,7 +2440,7 @@ func (c *arm64Compiler) emitEqOrNe(isEq bool, unsignedType wazeroir.UnsignedType } // compileEqz implements compiler.compileEqz for the arm64 architecture. -func (c *arm64Compiler) compileEqz(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileEqz(o *wazeroir.UnionOperation) error { x1, err := c.popValueOnRegister() if err != nil { return err @@ -2441,7 +2463,7 @@ func (c *arm64Compiler) compileEqz(o wazeroir.UnionOperation) error { } // compileLt implements compiler.compileLt for the arm64 architecture. -func (c *arm64Compiler) compileLt(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileLt(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -2479,7 +2501,7 @@ func (c *arm64Compiler) compileLt(o wazeroir.UnionOperation) error { } // compileGt implements compiler.compileGt for the arm64 architecture. -func (c *arm64Compiler) compileGt(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileGt(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -2517,7 +2539,7 @@ func (c *arm64Compiler) compileGt(o wazeroir.UnionOperation) error { } // compileLe implements compiler.compileLe for the arm64 architecture. -func (c *arm64Compiler) compileLe(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileLe(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -2555,7 +2577,7 @@ func (c *arm64Compiler) compileLe(o wazeroir.UnionOperation) error { } // compileGe implements compiler.compileGe for the arm64 architecture. -func (c *arm64Compiler) compileGe(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileGe(o *wazeroir.UnionOperation) error { x1, x2, err := c.popTwoValuesOnRegisters() if err != nil { return err @@ -2593,7 +2615,7 @@ func (c *arm64Compiler) compileGe(o wazeroir.UnionOperation) error { } // compileLoad implements compiler.compileLoad for the arm64 architecture. -func (c *arm64Compiler) compileLoad(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileLoad(o *wazeroir.UnionOperation) error { var ( isFloat bool loadInst asm.Instruction @@ -2628,7 +2650,7 @@ func (c *arm64Compiler) compileLoad(o wazeroir.UnionOperation) error { } // compileLoad8 implements compiler.compileLoad8 for the arm64 architecture. -func (c *arm64Compiler) compileLoad8(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileLoad8(o *wazeroir.UnionOperation) error { var loadInst asm.Instruction var vt runtimeValueType @@ -2653,7 +2675,7 @@ func (c *arm64Compiler) compileLoad8(o wazeroir.UnionOperation) error { } // compileLoad16 implements compiler.compileLoad16 for the arm64 architecture. -func (c *arm64Compiler) compileLoad16(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileLoad16(o *wazeroir.UnionOperation) error { var loadInst asm.Instruction var vt runtimeValueType @@ -2678,7 +2700,7 @@ func (c *arm64Compiler) compileLoad16(o wazeroir.UnionOperation) error { } // compileLoad32 implements compiler.compileLoad32 for the arm64 architecture. -func (c *arm64Compiler) compileLoad32(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileLoad32(o *wazeroir.UnionOperation) error { var loadInst asm.Instruction signed := o.B1 == 1 offset := uint32(o.U2) @@ -2721,7 +2743,7 @@ func (c *arm64Compiler) compileLoadImpl(offsetArg uint32, loadInst asm.Instructi } // compileStore implements compiler.compileStore for the arm64 architecture. -func (c *arm64Compiler) compileStore(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileStore(o *wazeroir.UnionOperation) error { var movInst asm.Instruction var targetSizeInBytes int64 unsignedType := wazeroir.UnsignedType(o.B1) @@ -2744,17 +2766,17 @@ func (c *arm64Compiler) compileStore(o wazeroir.UnionOperation) error { } // compileStore8 implements compiler.compileStore8 for the arm64 architecture. -func (c *arm64Compiler) compileStore8(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileStore8(o *wazeroir.UnionOperation) error { return c.compileStoreImpl(uint32(o.U2), arm64.STRB, 1) } // compileStore16 implements compiler.compileStore16 for the arm64 architecture. -func (c *arm64Compiler) compileStore16(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileStore16(o *wazeroir.UnionOperation) error { return c.compileStoreImpl(uint32(o.U2), arm64.STRH, 16/8) } // compileStore32 implements compiler.compileStore32 for the arm64 architecture. -func (c *arm64Compiler) compileStore32(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileStore32(o *wazeroir.UnionOperation) error { return c.compileStoreImpl(uint32(o.U2), arm64.STRW, 32/8) } @@ -2909,12 +2931,12 @@ func (c *arm64Compiler) compileCallGoFunction(compilerStatus nativeCallStatusCod } // compileConstI32 implements compiler.compileConstI32 for the arm64 architecture. -func (c *arm64Compiler) compileConstI32(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileConstI32(o *wazeroir.UnionOperation) error { return c.compileIntConstant(true, o.U1) } // compileConstI64 implements compiler.compileConstI64 for the arm64 architecture. -func (c *arm64Compiler) compileConstI64(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileConstI64(o *wazeroir.UnionOperation) error { return c.compileIntConstant(false, o.U1) } @@ -2953,12 +2975,12 @@ func (c *arm64Compiler) compileIntConstant(is32bit bool, value uint64) error { } // compileConstF32 implements compiler.compileConstF32 for the arm64 architecture. -func (c *arm64Compiler) compileConstF32(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileConstF32(o *wazeroir.UnionOperation) error { return c.compileFloatConstant(true, o.U1 /*uint64(math.Float32bits(o.Value))*/) } // compileConstF64 implements compiler.compileConstF64 for the arm64 architecture. -func (c *arm64Compiler) compileConstF64(o wazeroir.UnionOperation) error { +func (c *arm64Compiler) compileConstF64(o *wazeroir.UnionOperation) error { return c.compileFloatConstant(false, o.U1 /*math.Float64bits(o.Value)*/) } @@ -3005,8 +3027,9 @@ func (c *arm64Compiler) compileFloatConstant(is32bit bool, value uint64) error { } // compileMemoryInit implements compiler.compileMemoryInit for the arm64 architecture. -func (c *arm64Compiler) compileMemoryInit(o wazeroir.OperationMemoryInit) error { - return c.compileInitImpl(false, o.DataIndex, 0) +func (c *arm64Compiler) compileMemoryInit(o *wazeroir.UnionOperation) error { + dataIndex := uint32(o.U1) + return c.compileInitImpl(false, dataIndex, 0) } // compileInitImpl implements compileTableInit and compileMemoryInit. @@ -3191,7 +3214,7 @@ func (c *arm64Compiler) compileInitImpl(isTable bool, index, tableIndex uint32) } // compileDataDrop implements compiler.compileDataDrop for the arm64 architecture. -func (c *arm64Compiler) compileDataDrop(o wazeroir.OperationDataDrop) error { +func (c *arm64Compiler) compileDataDrop(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -3201,7 +3224,8 @@ func (c *arm64Compiler) compileDataDrop(o wazeroir.OperationDataDrop) error { return err } - c.compileLoadDataInstanceAddress(o.DataIndex, tmp) + dataIndex := uint32(o.U1) + c.compileLoadDataInstanceAddress(dataIndex, tmp) // Clears the content of DataInstance[o.DataIndex] (== []byte type). c.assembler.CompileRegisterToMemory(arm64.STRD, arm64.RegRZR, tmp, 0) @@ -3629,17 +3653,19 @@ func (c *arm64Compiler) compileFillImpl(isTable bool, tableIndex uint32) error { } // compileTableInit implements compiler.compileTableInit for the arm64 architecture. -func (c *arm64Compiler) compileTableInit(o wazeroir.OperationTableInit) error { - return c.compileInitImpl(true, o.ElemIndex, o.TableIndex) +func (c *arm64Compiler) compileTableInit(o *wazeroir.UnionOperation) error { + elemIndex := uint32(o.U1) + tableIndex := uint32(o.U2) + return c.compileInitImpl(true, elemIndex, tableIndex) } // compileTableCopy implements compiler.compileTableCopy for the arm64 architecture. -func (c *arm64Compiler) compileTableCopy(o wazeroir.OperationTableCopy) error { - return c.compileCopyImpl(true, o.SrcTableIndex, o.DstTableIndex) +func (c *arm64Compiler) compileTableCopy(o *wazeroir.UnionOperation) error { + return c.compileCopyImpl(true, uint32(o.U1), uint32(o.U2)) } // compileElemDrop implements compiler.compileElemDrop for the arm64 architecture. -func (c *arm64Compiler) compileElemDrop(o wazeroir.OperationElemDrop) error { +func (c *arm64Compiler) compileElemDrop(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -3649,7 +3675,8 @@ func (c *arm64Compiler) compileElemDrop(o wazeroir.OperationElemDrop) error { return err } - c.compileLoadElemInstanceAddress(o.ElemIndex, tmp) + elemIndex := uint32(o.U1) + c.compileLoadElemInstanceAddress(elemIndex, tmp) // Clears the content of ElementInstances[o.ElemIndex] (== []interface{} type). c.assembler.CompileRegisterToMemory(arm64.STRD, arm64.RegRZR, tmp, 0) @@ -3675,7 +3702,7 @@ func (c *arm64Compiler) compileLoadElemInstanceAddress(elemIndex uint32, dst asm } // compileRefFunc implements compiler.compileRefFunc for the arm64 architecture. -func (c *arm64Compiler) compileRefFunc(o wazeroir.OperationRefFunc) error { +func (c *arm64Compiler) compileRefFunc(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -3692,8 +3719,9 @@ func (c *arm64Compiler) compileRefFunc(o wazeroir.OperationRefFunc) error { // ref = ref + int64(o.FunctionIndex)*sizeOf(function) // = &moduleEngine.functions[index] + functionIndex := int64(o.U1) c.assembler.CompileConstToRegister(arm64.ADD, - int64(o.FunctionIndex)*functionSize, + functionIndex*functionSize, ref, ) @@ -3702,7 +3730,7 @@ func (c *arm64Compiler) compileRefFunc(o wazeroir.OperationRefFunc) error { } // compileTableGet implements compiler.compileTableGet for the arm64 architecture. -func (c *arm64Compiler) compileTableGet(o wazeroir.OperationTableGet) error { +func (c *arm64Compiler) compileTableGet(o *wazeroir.UnionOperation) error { ref, err := c.allocateRegister(registerTypeGeneralPurpose) if err != nil { return err @@ -3721,8 +3749,9 @@ func (c *arm64Compiler) compileTableGet(o wazeroir.OperationTableGet) error { // arm64ReservedRegisterForTemporary = [arm64ReservedRegisterForTemporary + TableIndex*8] // = [&tables[0] + TableIndex*sizeOf(*tableInstance)] // = [&tables[TableIndex]] = tables[TableIndex]. + tableIndex := int64(o.U1) c.assembler.CompileMemoryToRegister(arm64.LDRD, - arm64ReservedRegisterForTemporary, int64(o.TableIndex)*8, + arm64ReservedRegisterForTemporary, tableIndex*8, arm64ReservedRegisterForTemporary) // Out of bounds check. @@ -3759,7 +3788,7 @@ func (c *arm64Compiler) compileTableGet(o wazeroir.OperationTableGet) error { } // compileTableSet implements compiler.compileTableSet for the arm64 architecture. -func (c *arm64Compiler) compileTableSet(o wazeroir.OperationTableSet) error { +func (c *arm64Compiler) compileTableSet(o *wazeroir.UnionOperation) error { ref := c.locationStack.pop() if err := c.compileEnsureOnRegister(ref); err != nil { return err @@ -3782,8 +3811,9 @@ func (c *arm64Compiler) compileTableSet(o wazeroir.OperationTableSet) error { // arm64ReservedRegisterForTemporary = arm64ReservedRegisterForTemporary + TableIndex*8 // = &tables[0] + TableIndex*sizeOf(*tableInstance) // = &tables[TableIndex] + tableIndex := int64(o.U1) c.assembler.CompileMemoryToRegister(arm64.LDRD, - arm64ReservedRegisterForTemporary, int64(o.TableIndex)*8, + arm64ReservedRegisterForTemporary, tableIndex*8, arm64ReservedRegisterForTemporary) // Out of bounds check. @@ -3819,13 +3849,14 @@ func (c *arm64Compiler) compileTableSet(o wazeroir.OperationTableSet) error { } // compileTableGrow implements compiler.compileTableGrow for the arm64 architecture. -func (c *arm64Compiler) compileTableGrow(o wazeroir.OperationTableGrow) error { +func (c *arm64Compiler) compileTableGrow(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } // Pushes the table index. - if err := c.compileConstI32(wazeroir.NewOperationConstI32(o.TableIndex)); err != nil { + tableIndex := o.U1 + if err := c.compileIntConstant(true, tableIndex); err != nil { return err } @@ -3851,7 +3882,7 @@ func (c *arm64Compiler) compileTableGrow(o wazeroir.OperationTableGrow) error { } // compileTableSize implements compiler.compileTableSize for the arm64 architecture. -func (c *arm64Compiler) compileTableSize(o wazeroir.OperationTableSize) error { +func (c *arm64Compiler) compileTableSize(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -3868,8 +3899,9 @@ func (c *arm64Compiler) compileTableSize(o wazeroir.OperationTableSize) error { // arm64ReservedRegisterForTemporary = [arm64ReservedRegisterForTemporary + TableIndex*8] // = [&tables[0] + TableIndex*sizeOf(*tableInstance)] // = [&tables[TableIndex]] = tables[TableIndex]. + tableIndex := int64(o.U1) c.assembler.CompileMemoryToRegister(arm64.LDRD, - arm64ReservedRegisterForTemporary, int64(o.TableIndex)*8, + arm64ReservedRegisterForTemporary, tableIndex*8, arm64ReservedRegisterForTemporary) // result = [&tables[TableIndex] + tableInstanceTableLenOffset] = len(tables[TableIndex]) @@ -3883,8 +3915,9 @@ func (c *arm64Compiler) compileTableSize(o wazeroir.OperationTableSize) error { } // compileTableFill implements compiler.compileTableFill for the arm64 architecture. -func (c *arm64Compiler) compileTableFill(o wazeroir.OperationTableFill) error { - return c.compileFillImpl(true, o.TableIndex) +func (c *arm64Compiler) compileTableFill(o *wazeroir.UnionOperation) error { + tableIndex := uint32(o.U1) + return c.compileFillImpl(true, tableIndex) } // popTwoValuesOnRegisters pops two values from the location stacks, ensures diff --git a/internal/engine/compiler/impl_arm64_test.go b/internal/engine/compiler/impl_arm64_test.go index 72a6fe2b..ab1f27da 100644 --- a/internal/engine/compiler/impl_arm64_test.go +++ b/internal/engine/compiler/impl_arm64_test.go @@ -19,7 +19,7 @@ func TestArm64Compiler_indirectCallWithTargetOnCallingConvReg(t *testing.T) { env.addTable(&wasm.TableInstance{References: table}) // Ensure that the module instance has the type information for targetOperation.TypeIndex, // and the typeID matches the table[targetOffset]'s type ID. - operation := wazeroir.NewOperationCallIndirect(0, 0) + operation := operationPtr(wazeroir.NewOperationCallIndirect(0, 0)) env.module().TypeIDs = []wasm.FunctionTypeID{0} env.module().Engine = &moduleEngine{functions: []function{}} diff --git a/internal/engine/compiler/impl_vec_amd64.go b/internal/engine/compiler/impl_vec_amd64.go index 74d59c7e..d6c111db 100644 --- a/internal/engine/compiler/impl_vec_amd64.go +++ b/internal/engine/compiler/impl_vec_amd64.go @@ -9,11 +9,13 @@ import ( ) // compileV128Const implements compiler.compileV128Const for amd64 architecture. -func (c *amd64Compiler) compileV128Const(o wazeroir.OperationV128Const) error { +func (c *amd64Compiler) compileV128Const(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } + lo, hi := o.U1, o.U2 + result, err := c.allocateRegister(registerTypeVector) if err != nil { return err @@ -27,17 +29,17 @@ func (c *amd64Compiler) compileV128Const(o wazeroir.OperationV128Const) error { } // Move the lower 64-bits. - if o.Lo == 0 { + if lo == 0 { c.assembler.CompileRegisterToRegister(amd64.XORQ, tmpReg, tmpReg) } else { - c.assembler.CompileConstToRegister(amd64.MOVQ, int64(o.Lo), tmpReg) + c.assembler.CompileConstToRegister(amd64.MOVQ, int64(lo), tmpReg) } c.assembler.CompileRegisterToRegister(amd64.MOVQ, tmpReg, result) - if o.Lo != 0 && o.Hi == 0 { + if lo != 0 && hi == 0 { c.assembler.CompileRegisterToRegister(amd64.XORQ, tmpReg, tmpReg) - } else if o.Hi != 0 { - c.assembler.CompileConstToRegister(amd64.MOVQ, int64(o.Hi), tmpReg) + } else if hi != 0 { + c.assembler.CompileConstToRegister(amd64.MOVQ, int64(hi), tmpReg) } // Move the higher 64-bits with PINSRQ at the second element of 64x2 vector. c.assembler.CompileRegisterToRegisterWithArg(amd64.PINSRQ, tmpReg, result, 1) @@ -47,7 +49,7 @@ func (c *amd64Compiler) compileV128Const(o wazeroir.OperationV128Const) error { } // compileV128Add implements compiler.compileV128Add for amd64 architecture. -func (c *amd64Compiler) compileV128Add(o wazeroir.OperationV128Add) error { +func (c *amd64Compiler) compileV128Add(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -58,7 +60,8 @@ func (c *amd64Compiler) compileV128Add(o wazeroir.OperationV128Add) error { return err } var inst asm.Instruction - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeI8x16: inst = amd64.PADDB case wazeroir.ShapeI16x8: @@ -80,7 +83,7 @@ func (c *amd64Compiler) compileV128Add(o wazeroir.OperationV128Add) error { } // compileV128Sub implements compiler.compileV128Sub for amd64 architecture. -func (c *amd64Compiler) compileV128Sub(o wazeroir.OperationV128Sub) error { +func (c *amd64Compiler) compileV128Sub(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -91,7 +94,8 @@ func (c *amd64Compiler) compileV128Sub(o wazeroir.OperationV128Sub) error { return err } var inst asm.Instruction - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeI8x16: inst = amd64.PSUBB case wazeroir.ShapeI16x8: @@ -113,29 +117,32 @@ func (c *amd64Compiler) compileV128Sub(o wazeroir.OperationV128Sub) error { } // compileV128Load implements compiler.compileV128Load for amd64 architecture. -func (c *amd64Compiler) compileV128Load(o wazeroir.OperationV128Load) error { +func (c *amd64Compiler) compileV128Load(o *wazeroir.UnionOperation) error { result, err := c.allocateRegister(registerTypeVector) if err != nil { return err } - switch o.Type { + offset := uint32(o.U2) + loadType := wazeroir.V128LoadType(o.B1) + + switch loadType { case wazeroir.V128LoadType128: - err = c.compileV128LoadImpl(amd64.MOVDQU, o.Arg.Offset, 16, result) + err = c.compileV128LoadImpl(amd64.MOVDQU, offset, 16, result) case wazeroir.V128LoadType8x8s: - err = c.compileV128LoadImpl(amd64.PMOVSXBW, o.Arg.Offset, 8, result) + err = c.compileV128LoadImpl(amd64.PMOVSXBW, offset, 8, result) case wazeroir.V128LoadType8x8u: - err = c.compileV128LoadImpl(amd64.PMOVZXBW, o.Arg.Offset, 8, result) + err = c.compileV128LoadImpl(amd64.PMOVZXBW, offset, 8, result) case wazeroir.V128LoadType16x4s: - err = c.compileV128LoadImpl(amd64.PMOVSXWD, o.Arg.Offset, 8, result) + err = c.compileV128LoadImpl(amd64.PMOVSXWD, offset, 8, result) case wazeroir.V128LoadType16x4u: - err = c.compileV128LoadImpl(amd64.PMOVZXWD, o.Arg.Offset, 8, result) + err = c.compileV128LoadImpl(amd64.PMOVZXWD, offset, 8, result) case wazeroir.V128LoadType32x2s: - err = c.compileV128LoadImpl(amd64.PMOVSXDQ, o.Arg.Offset, 8, result) + err = c.compileV128LoadImpl(amd64.PMOVSXDQ, offset, 8, result) case wazeroir.V128LoadType32x2u: - err = c.compileV128LoadImpl(amd64.PMOVZXDQ, o.Arg.Offset, 8, result) + err = c.compileV128LoadImpl(amd64.PMOVZXDQ, offset, 8, result) case wazeroir.V128LoadType8Splat: - reg, err := c.compileMemoryAccessCeilSetup(o.Arg.Offset, 1) + reg, err := c.compileMemoryAccessCeilSetup(offset, 1) if err != nil { return err } @@ -153,7 +160,7 @@ func (c *amd64Compiler) compileV128Load(o wazeroir.OperationV128Load) error { c.assembler.CompileRegisterToRegister(amd64.PXOR, tmpVReg, tmpVReg) c.assembler.CompileRegisterToRegister(amd64.PSHUFB, tmpVReg, result) case wazeroir.V128LoadType16Splat: - reg, err := c.compileMemoryAccessCeilSetup(o.Arg.Offset, 2) + reg, err := c.compileMemoryAccessCeilSetup(offset, 2) if err != nil { return err } @@ -166,7 +173,7 @@ func (c *amd64Compiler) compileV128Load(o wazeroir.OperationV128Load) error { c.assembler.CompileRegisterToRegisterWithArg(amd64.PINSRW, reg, result, 1) c.assembler.CompileRegisterToRegisterWithArg(amd64.PSHUFD, result, result, 0) case wazeroir.V128LoadType32Splat: - reg, err := c.compileMemoryAccessCeilSetup(o.Arg.Offset, 4) + reg, err := c.compileMemoryAccessCeilSetup(offset, 4) if err != nil { return err } @@ -177,7 +184,7 @@ func (c *amd64Compiler) compileV128Load(o wazeroir.OperationV128Load) error { c.assembler.CompileRegisterToRegisterWithArg(amd64.PINSRD, reg, result, 0) c.assembler.CompileRegisterToRegisterWithArg(amd64.PSHUFD, result, result, 0) case wazeroir.V128LoadType64Splat: - reg, err := c.compileMemoryAccessCeilSetup(o.Arg.Offset, 8) + reg, err := c.compileMemoryAccessCeilSetup(offset, 8) if err != nil { return err } @@ -188,9 +195,9 @@ func (c *amd64Compiler) compileV128Load(o wazeroir.OperationV128Load) error { c.assembler.CompileRegisterToRegisterWithArg(amd64.PINSRQ, reg, result, 0) c.assembler.CompileRegisterToRegisterWithArg(amd64.PINSRQ, reg, result, 1) case wazeroir.V128LoadType32zero: - err = c.compileV128LoadImpl(amd64.MOVL, o.Arg.Offset, 4, result) + err = c.compileV128LoadImpl(amd64.MOVL, offset, 4, result) case wazeroir.V128LoadType64zero: - err = c.compileV128LoadImpl(amd64.MOVQ, o.Arg.Offset, 8, result) + err = c.compileV128LoadImpl(amd64.MOVQ, offset, 8, result) } if err != nil { @@ -212,14 +219,17 @@ func (c *amd64Compiler) compileV128LoadImpl(inst asm.Instruction, offset uint32, } // compileV128LoadLane implements compiler.compileV128LoadLane for amd64. -func (c *amd64Compiler) compileV128LoadLane(o wazeroir.OperationV128LoadLane) error { +func (c *amd64Compiler) compileV128LoadLane(o *wazeroir.UnionOperation) error { targetVector := c.locationStack.popV128() if err := c.compileEnsureOnRegister(targetVector); err != nil { return err } + laneSize, laneIndex := o.B1, o.B2 + offset := uint32(o.U2) + var insertInst asm.Instruction - switch o.LaneSize { + switch laneSize { case 8: insertInst = amd64.PINSRB case 16: @@ -230,27 +240,28 @@ func (c *amd64Compiler) compileV128LoadLane(o wazeroir.OperationV128LoadLane) er insertInst = amd64.PINSRQ } - targetSizeInBytes := int64(o.LaneSize / 8) - offsetReg, err := c.compileMemoryAccessCeilSetup(o.Arg.Offset, targetSizeInBytes) + targetSizeInBytes := int64(laneSize / 8) + offsetReg, err := c.compileMemoryAccessCeilSetup(offset, targetSizeInBytes) if err != nil { return err } c.assembler.CompileMemoryWithIndexAndArgToRegister(insertInst, amd64ReservedRegisterForMemory, -targetSizeInBytes, - offsetReg, 1, targetVector.register, o.LaneIndex) + offsetReg, 1, targetVector.register, laneIndex) c.pushVectorRuntimeValueLocationOnRegister(targetVector.register) return nil } // compileV128Store implements compiler.compileV128Store for amd64. -func (c *amd64Compiler) compileV128Store(o wazeroir.OperationV128Store) error { +func (c *amd64Compiler) compileV128Store(o *wazeroir.UnionOperation) error { val := c.locationStack.popV128() if err := c.compileEnsureOnRegister(val); err != nil { return err } const targetSizeInBytes = 16 - offsetReg, err := c.compileMemoryAccessCeilSetup(o.Arg.Offset, targetSizeInBytes) + offset := uint32(o.U2) + offsetReg, err := c.compileMemoryAccessCeilSetup(offset, targetSizeInBytes) if err != nil { return err } @@ -263,9 +274,12 @@ func (c *amd64Compiler) compileV128Store(o wazeroir.OperationV128Store) error { } // compileV128StoreLane implements compiler.compileV128StoreLane for amd64. -func (c *amd64Compiler) compileV128StoreLane(o wazeroir.OperationV128StoreLane) error { +func (c *amd64Compiler) compileV128StoreLane(o *wazeroir.UnionOperation) error { var storeInst asm.Instruction - switch o.LaneSize { + laneSize := o.B1 + laneIndex := o.B2 + offset := uint32(o.U2) + switch laneSize { case 8: storeInst = amd64.PEXTRB case 16: @@ -281,34 +295,37 @@ func (c *amd64Compiler) compileV128StoreLane(o wazeroir.OperationV128StoreLane) return err } - targetSizeInBytes := int64(o.LaneSize / 8) - offsetReg, err := c.compileMemoryAccessCeilSetup(o.Arg.Offset, targetSizeInBytes) + targetSizeInBytes := int64(laneSize / 8) + offsetReg, err := c.compileMemoryAccessCeilSetup(offset, targetSizeInBytes) if err != nil { return err } c.assembler.CompileRegisterToMemoryWithIndexAndArg(storeInst, val.register, - amd64ReservedRegisterForMemory, -targetSizeInBytes, offsetReg, 1, o.LaneIndex) + amd64ReservedRegisterForMemory, -targetSizeInBytes, offsetReg, 1, laneIndex) c.locationStack.markRegisterUnused(val.register, offsetReg) return nil } // compileV128ExtractLane implements compiler.compileV128ExtractLane for amd64. -func (c *amd64Compiler) compileV128ExtractLane(o wazeroir.OperationV128ExtractLane) error { +func (c *amd64Compiler) compileV128ExtractLane(o *wazeroir.UnionOperation) error { v := c.locationStack.popV128() if err := c.compileEnsureOnRegister(v); err != nil { return err } vreg := v.register - switch o.Shape { + shape := o.B1 + laneIndex := o.B2 + signed := o.B3 + switch shape { case wazeroir.ShapeI8x16: result, err := c.allocateRegister(registerTypeGeneralPurpose) if err != nil { return err } - c.assembler.CompileRegisterToRegisterWithArg(amd64.PEXTRB, vreg, result, o.LaneIndex) - if o.Signed { + c.assembler.CompileRegisterToRegisterWithArg(amd64.PEXTRB, vreg, result, laneIndex) + if signed { c.assembler.CompileRegisterToRegister(amd64.MOVBLSX, result, result) } else { c.assembler.CompileRegisterToRegister(amd64.MOVBLZX, result, result) @@ -320,8 +337,8 @@ func (c *amd64Compiler) compileV128ExtractLane(o wazeroir.OperationV128ExtractLa if err != nil { return err } - c.assembler.CompileRegisterToRegisterWithArg(amd64.PEXTRW, vreg, result, o.LaneIndex) - if o.Signed { + c.assembler.CompileRegisterToRegisterWithArg(amd64.PEXTRW, vreg, result, laneIndex) + if signed { c.assembler.CompileRegisterToRegister(amd64.MOVWLSX, result, result) } else { c.assembler.CompileRegisterToRegister(amd64.MOVWLZX, result, result) @@ -333,7 +350,7 @@ func (c *amd64Compiler) compileV128ExtractLane(o wazeroir.OperationV128ExtractLa if err != nil { return err } - c.assembler.CompileRegisterToRegisterWithArg(amd64.PEXTRD, vreg, result, o.LaneIndex) + c.assembler.CompileRegisterToRegisterWithArg(amd64.PEXTRD, vreg, result, laneIndex) c.pushRuntimeValueLocationOnRegister(result, runtimeValueTypeI32) c.locationStack.markRegisterUnused(vreg) case wazeroir.ShapeI64x2: @@ -341,16 +358,16 @@ func (c *amd64Compiler) compileV128ExtractLane(o wazeroir.OperationV128ExtractLa if err != nil { return err } - c.assembler.CompileRegisterToRegisterWithArg(amd64.PEXTRQ, vreg, result, o.LaneIndex) + c.assembler.CompileRegisterToRegisterWithArg(amd64.PEXTRQ, vreg, result, laneIndex) c.pushRuntimeValueLocationOnRegister(result, runtimeValueTypeI64) c.locationStack.markRegisterUnused(vreg) case wazeroir.ShapeF32x4: - if o.LaneIndex != 0 { - c.assembler.CompileRegisterToRegisterWithArg(amd64.PSHUFD, vreg, vreg, o.LaneIndex) + if laneIndex != 0 { + c.assembler.CompileRegisterToRegisterWithArg(amd64.PSHUFD, vreg, vreg, laneIndex) } c.pushRuntimeValueLocationOnRegister(vreg, runtimeValueTypeF32) case wazeroir.ShapeF64x2: - if o.LaneIndex != 0 { + if laneIndex != 0 { // This case we can assume LaneIndex == 1. // We have to modify the val.register as, for example: // 0b11 0b10 0b01 0b00 @@ -368,7 +385,7 @@ func (c *amd64Compiler) compileV128ExtractLane(o wazeroir.OperationV128ExtractLa } // compileV128ReplaceLane implements compiler.compileV128ReplaceLane for amd64. -func (c *amd64Compiler) compileV128ReplaceLane(o wazeroir.OperationV128ReplaceLane) error { +func (c *amd64Compiler) compileV128ReplaceLane(o *wazeroir.UnionOperation) error { origin := c.locationStack.pop() if err := c.compileEnsureOnRegister(origin); err != nil { return err @@ -379,23 +396,25 @@ func (c *amd64Compiler) compileV128ReplaceLane(o wazeroir.OperationV128ReplaceLa return err } - switch o.Shape { + shape := o.B1 + laneIndex := o.B2 + switch shape { case wazeroir.ShapeI8x16: - c.assembler.CompileRegisterToRegisterWithArg(amd64.PINSRB, origin.register, vector.register, o.LaneIndex) + c.assembler.CompileRegisterToRegisterWithArg(amd64.PINSRB, origin.register, vector.register, laneIndex) case wazeroir.ShapeI16x8: - c.assembler.CompileRegisterToRegisterWithArg(amd64.PINSRW, origin.register, vector.register, o.LaneIndex) + c.assembler.CompileRegisterToRegisterWithArg(amd64.PINSRW, origin.register, vector.register, laneIndex) case wazeroir.ShapeI32x4: - c.assembler.CompileRegisterToRegisterWithArg(amd64.PINSRD, origin.register, vector.register, o.LaneIndex) + c.assembler.CompileRegisterToRegisterWithArg(amd64.PINSRD, origin.register, vector.register, laneIndex) case wazeroir.ShapeI64x2: - c.assembler.CompileRegisterToRegisterWithArg(amd64.PINSRQ, origin.register, vector.register, o.LaneIndex) + c.assembler.CompileRegisterToRegisterWithArg(amd64.PINSRQ, origin.register, vector.register, laneIndex) case wazeroir.ShapeF32x4: c.assembler.CompileRegisterToRegisterWithArg(amd64.INSERTPS, origin.register, vector.register, // In INSERTPS instruction, the destination index is encoded at 4 and 5 bits of the argument. // See https://www.felixcloutier.com/x86/insertps - o.LaneIndex<<4, + laneIndex<<4, ) case wazeroir.ShapeF64x2: - if o.LaneIndex == 0 { + if laneIndex == 0 { c.assembler.CompileRegisterToRegister(amd64.MOVSD, origin.register, vector.register) } else { c.assembler.CompileRegisterToRegister(amd64.MOVLHPS, origin.register, vector.register) @@ -408,14 +427,15 @@ func (c *amd64Compiler) compileV128ReplaceLane(o wazeroir.OperationV128ReplaceLa } // compileV128Splat implements compiler.compileV128Splat for amd64. -func (c *amd64Compiler) compileV128Splat(o wazeroir.OperationV128Splat) (err error) { +func (c *amd64Compiler) compileV128Splat(o *wazeroir.UnionOperation) (err error) { origin := c.locationStack.pop() if err = c.compileEnsureOnRegister(origin); err != nil { return } var result asm.Register - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeI8x16: result, err = c.allocateRegister(registerTypeVector) if err != nil { @@ -471,7 +491,7 @@ func (c *amd64Compiler) compileV128Splat(o wazeroir.OperationV128Splat) (err err } // compileV128Shuffle implements compiler.compileV128Shuffle for amd64. -func (c *amd64Compiler) compileV128Shuffle(o wazeroir.OperationV128Shuffle) error { +func (c *amd64Compiler) compileV128Shuffle(o *wazeroir.UnionOperation) error { w := c.locationStack.popV128() if err := c.compileEnsureOnRegister(w); err != nil { return err @@ -490,7 +510,9 @@ func (c *amd64Compiler) compileV128Shuffle(o wazeroir.OperationV128Shuffle) erro } consts := [32]byte{} - for i, lane := range o.Lanes { + lanes := o.Us + for i, unsignedLane := range lanes { + lane := byte(unsignedLane) if lane < 16 { consts[i+16] = 0x80 consts[i] = lane @@ -523,7 +545,7 @@ var swizzleConst = [16]byte{ } // compileV128Swizzle implements compiler.compileV128Swizzle for amd64. -func (c *amd64Compiler) compileV128Swizzle(wazeroir.OperationV128Swizzle) error { +func (c *amd64Compiler) compileV128Swizzle(*wazeroir.UnionOperation) error { index := c.locationStack.popV128() if err := c.compileEnsureOnRegister(index); err != nil { return err @@ -555,7 +577,7 @@ func (c *amd64Compiler) compileV128Swizzle(wazeroir.OperationV128Swizzle) error } // compileV128AnyTrue implements compiler.compileV128AnyTrue for amd64. -func (c *amd64Compiler) compileV128AnyTrue(wazeroir.OperationV128AnyTrue) error { +func (c *amd64Compiler) compileV128AnyTrue(*wazeroir.UnionOperation) error { v := c.locationStack.popV128() if err := c.compileEnsureOnRegister(v); err != nil { return err @@ -570,7 +592,7 @@ func (c *amd64Compiler) compileV128AnyTrue(wazeroir.OperationV128AnyTrue) error } // compileV128AllTrue implements compiler.compileV128AllTrue for amd64. -func (c *amd64Compiler) compileV128AllTrue(o wazeroir.OperationV128AllTrue) error { +func (c *amd64Compiler) compileV128AllTrue(o *wazeroir.UnionOperation) error { v := c.locationStack.popV128() if err := c.compileEnsureOnRegister(v); err != nil { return err @@ -582,7 +604,8 @@ func (c *amd64Compiler) compileV128AllTrue(o wazeroir.OperationV128AllTrue) erro } var cmpInst asm.Instruction - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeI8x16: cmpInst = amd64.PCMPEQB case wazeroir.ShapeI16x8: @@ -602,7 +625,7 @@ func (c *amd64Compiler) compileV128AllTrue(o wazeroir.OperationV128AllTrue) erro } // compileV128BitMask implements compiler.compileV128BitMask for amd64. -func (c *amd64Compiler) compileV128BitMask(o wazeroir.OperationV128BitMask) error { +func (c *amd64Compiler) compileV128BitMask(o *wazeroir.UnionOperation) error { v := c.locationStack.popV128() if err := c.compileEnsureOnRegister(v); err != nil { return err @@ -613,7 +636,8 @@ func (c *amd64Compiler) compileV128BitMask(o wazeroir.OperationV128BitMask) erro return err } - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeI8x16: c.assembler.CompileRegisterToRegister(amd64.PMOVMSKB, v.register, result) case wazeroir.ShapeI16x8: @@ -653,7 +677,7 @@ func (c *amd64Compiler) compileV128BitMask(o wazeroir.OperationV128BitMask) erro } // compileV128And implements compiler.compileV128And for amd64. -func (c *amd64Compiler) compileV128And(wazeroir.OperationV128And) error { +func (c *amd64Compiler) compileV128And(*wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -672,7 +696,7 @@ func (c *amd64Compiler) compileV128And(wazeroir.OperationV128And) error { } // compileV128Not implements compiler.compileV128Not for amd64. -func (c *amd64Compiler) compileV128Not(wazeroir.OperationV128Not) error { +func (c *amd64Compiler) compileV128Not(*wazeroir.UnionOperation) error { v := c.locationStack.popV128() if err := c.compileEnsureOnRegister(v); err != nil { return err @@ -692,7 +716,7 @@ func (c *amd64Compiler) compileV128Not(wazeroir.OperationV128Not) error { } // compileV128Or implements compiler.compileV128Or for amd64. -func (c *amd64Compiler) compileV128Or(wazeroir.OperationV128Or) error { +func (c *amd64Compiler) compileV128Or(*wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -711,7 +735,7 @@ func (c *amd64Compiler) compileV128Or(wazeroir.OperationV128Or) error { } // compileV128Xor implements compiler.compileV128Xor for amd64. -func (c *amd64Compiler) compileV128Xor(wazeroir.OperationV128Xor) error { +func (c *amd64Compiler) compileV128Xor(*wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -730,7 +754,7 @@ func (c *amd64Compiler) compileV128Xor(wazeroir.OperationV128Xor) error { } // compileV128Bitselect implements compiler.compileV128Bitselect for amd64. -func (c *amd64Compiler) compileV128Bitselect(wazeroir.OperationV128Bitselect) error { +func (c *amd64Compiler) compileV128Bitselect(*wazeroir.UnionOperation) error { selector := c.locationStack.popV128() if err := c.compileEnsureOnRegister(selector); err != nil { return err @@ -758,7 +782,7 @@ func (c *amd64Compiler) compileV128Bitselect(wazeroir.OperationV128Bitselect) er } // compileV128AndNot implements compiler.compileV128AndNot for amd64. -func (c *amd64Compiler) compileV128AndNot(wazeroir.OperationV128AndNot) error { +func (c *amd64Compiler) compileV128AndNot(*wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -777,11 +801,13 @@ func (c *amd64Compiler) compileV128AndNot(wazeroir.OperationV128AndNot) error { } // compileV128Shr implements compiler.compileV128Shr for amd64. -func (c *amd64Compiler) compileV128Shr(o wazeroir.OperationV128Shr) error { +func (c *amd64Compiler) compileV128Shr(o *wazeroir.UnionOperation) error { // https://stackoverflow.com/questions/35002937/sse-simd-shift-with-one-byte-element-size-granularity - if o.Shape == wazeroir.ShapeI8x16 { - return c.compileV128ShrI8x16Impl(o.Signed) - } else if o.Shape == wazeroir.ShapeI64x2 && o.Signed { + shape := o.B1 + signed := o.B3 + if shape == wazeroir.ShapeI8x16 { + return c.compileV128ShrI8x16Impl(signed) + } else if shape == wazeroir.ShapeI64x2 && signed { return c.compileV128ShrI64x2SignedImpl() } else { return c.compileV128ShrImpl(o) @@ -789,7 +815,7 @@ func (c *amd64Compiler) compileV128Shr(o wazeroir.OperationV128Shr) error { } // compileV128ShrImpl implements shift right instructions except for i8x16 (logical/arithmetic) and i64x2 (arithmetic). -func (c *amd64Compiler) compileV128ShrImpl(o wazeroir.OperationV128Shr) error { +func (c *amd64Compiler) compileV128ShrImpl(o *wazeroir.UnionOperation) error { s := c.locationStack.pop() if err := c.compileEnsureOnRegister(s); err != nil { return err @@ -807,17 +833,19 @@ func (c *amd64Compiler) compileV128ShrImpl(o wazeroir.OperationV128Shr) error { var moduleConst int64 var shift asm.Instruction - switch o.Shape { + shape := o.B1 + signed := o.B3 + switch shape { case wazeroir.ShapeI16x8: moduleConst = 0xf // modulo 16. - if o.Signed { + if signed { shift = amd64.PSRAW } else { shift = amd64.PSRLW } case wazeroir.ShapeI32x4: moduleConst = 0x1f // modulo 32. - if o.Signed { + if signed { shift = amd64.PSRAD } else { shift = amd64.PSRLD @@ -1006,7 +1034,7 @@ var i8x16SHLMaskTable = [8 * 16]byte{ // (the number of possible shift amount 0, } // compileV128Shl implements compiler.compileV128Shl for amd64. -func (c *amd64Compiler) compileV128Shl(o wazeroir.OperationV128Shl) error { +func (c *amd64Compiler) compileV128Shl(o *wazeroir.UnionOperation) error { s := c.locationStack.pop() if err := c.compileEnsureOnRegister(s); err != nil { return err @@ -1024,7 +1052,8 @@ func (c *amd64Compiler) compileV128Shl(o wazeroir.OperationV128Shl) error { var modulo int64 var shift asm.Instruction - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeI8x16: modulo = 0x7 // modulo 8. // x86 doesn't have packed bytes shift, so we use PSLLW and mask-out the redundant bits. @@ -1046,7 +1075,7 @@ func (c *amd64Compiler) compileV128Shl(o wazeroir.OperationV128Shl) error { c.assembler.CompileRegisterToRegister(amd64.MOVL, gpShiftAmount, vecTmp) c.assembler.CompileRegisterToRegister(shift, vecTmp, x1.register) - if o.Shape == wazeroir.ShapeI8x16 { + if shape == wazeroir.ShapeI8x16 { gpTmp, err := c.allocateRegister(registerTypeGeneralPurpose) if err != nil { return err @@ -1078,7 +1107,7 @@ func (c *amd64Compiler) compileV128Shl(o wazeroir.OperationV128Shl) error { } // compileV128Cmp implements compiler.compileV128Cmp for amd64. -func (c *amd64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { +func (c *amd64Compiler) compileV128Cmp(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -1098,7 +1127,8 @@ func (c *amd64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { ) x1Reg, x2Reg, result := x1.register, x2.register, asm.NilRegister - switch o.Type { + v128CmpType := o.B1 + switch v128CmpType { case wazeroir.V128CmpTypeF32x4Eq: c.assembler.CompileRegisterToRegisterWithArg(amd64.CMPPS, x2Reg, x1Reg, floatEqualArg) result = x1Reg @@ -1154,7 +1184,7 @@ func (c *amd64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { result = x2Reg case wazeroir.V128CmpTypeI8x16LtU, wazeroir.V128CmpTypeI8x16GtU: // Take the unsigned min/max values on each byte on x1 and x2 onto x1Reg. - if o.Type == wazeroir.V128CmpTypeI8x16LtU { + if v128CmpType == wazeroir.V128CmpTypeI8x16LtU { c.assembler.CompileRegisterToRegister(amd64.PMINUB, x2Reg, x1Reg) } else { c.assembler.CompileRegisterToRegister(amd64.PMAXUB, x2Reg, x1Reg) @@ -1175,7 +1205,7 @@ func (c *amd64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { } // Copy the value on the src to tmp. c.assembler.CompileRegisterToRegister(amd64.MOVDQA, x1Reg, tmp) - if o.Type == wazeroir.V128CmpTypeI8x16LeS { + if v128CmpType == wazeroir.V128CmpTypeI8x16LeS { c.assembler.CompileRegisterToRegister(amd64.PMINSB, x2Reg, tmp) } else { c.assembler.CompileRegisterToRegister(amd64.PMINUB, x2Reg, tmp) @@ -1188,7 +1218,7 @@ func (c *amd64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { return err } c.assembler.CompileRegisterToRegister(amd64.MOVDQA, x1Reg, tmp) - if o.Type == wazeroir.V128CmpTypeI8x16GeS { + if v128CmpType == wazeroir.V128CmpTypeI8x16GeS { c.assembler.CompileRegisterToRegister(amd64.PMAXSB, x2Reg, tmp) } else { c.assembler.CompileRegisterToRegister(amd64.PMAXUB, x2Reg, tmp) @@ -1210,7 +1240,7 @@ func (c *amd64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { result = x2Reg case wazeroir.V128CmpTypeI16x8LtU, wazeroir.V128CmpTypeI16x8GtU: // Take the unsigned min/max values on each byte on x1 and x2 onto x1Reg. - if o.Type == wazeroir.V128CmpTypeI16x8LtU { + if v128CmpType == wazeroir.V128CmpTypeI16x8LtU { c.assembler.CompileRegisterToRegister(amd64.PMINUW, x2Reg, x1Reg) } else { c.assembler.CompileRegisterToRegister(amd64.PMAXUW, x2Reg, x1Reg) @@ -1231,7 +1261,7 @@ func (c *amd64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { } // Copy the value on the src to tmp. c.assembler.CompileRegisterToRegister(amd64.MOVDQA, x1Reg, tmp) - if o.Type == wazeroir.V128CmpTypeI16x8LeS { + if v128CmpType == wazeroir.V128CmpTypeI16x8LeS { c.assembler.CompileRegisterToRegister(amd64.PMINSW, x2Reg, tmp) } else { c.assembler.CompileRegisterToRegister(amd64.PMINUW, x2Reg, tmp) @@ -1244,7 +1274,7 @@ func (c *amd64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { return err } c.assembler.CompileRegisterToRegister(amd64.MOVDQA, x1Reg, tmp) - if o.Type == wazeroir.V128CmpTypeI16x8GeS { + if v128CmpType == wazeroir.V128CmpTypeI16x8GeS { c.assembler.CompileRegisterToRegister(amd64.PMAXSW, x2Reg, tmp) } else { c.assembler.CompileRegisterToRegister(amd64.PMAXUW, x2Reg, tmp) @@ -1266,7 +1296,7 @@ func (c *amd64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { result = x2Reg case wazeroir.V128CmpTypeI32x4LtU, wazeroir.V128CmpTypeI32x4GtU: // Take the unsigned min/max values on each byte on x1 and x2 onto x1Reg. - if o.Type == wazeroir.V128CmpTypeI32x4LtU { + if v128CmpType == wazeroir.V128CmpTypeI32x4LtU { c.assembler.CompileRegisterToRegister(amd64.PMINUD, x2Reg, x1Reg) } else { c.assembler.CompileRegisterToRegister(amd64.PMAXUD, x2Reg, x1Reg) @@ -1287,7 +1317,7 @@ func (c *amd64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { } // Copy the value on the src to tmp. c.assembler.CompileRegisterToRegister(amd64.MOVDQA, x1Reg, tmp) - if o.Type == wazeroir.V128CmpTypeI32x4LeS { + if v128CmpType == wazeroir.V128CmpTypeI32x4LeS { c.assembler.CompileRegisterToRegister(amd64.PMINSD, x2Reg, tmp) } else { c.assembler.CompileRegisterToRegister(amd64.PMINUD, x2Reg, tmp) @@ -1300,7 +1330,7 @@ func (c *amd64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { return err } c.assembler.CompileRegisterToRegister(amd64.MOVDQA, x1Reg, tmp) - if o.Type == wazeroir.V128CmpTypeI32x4GeS { + if v128CmpType == wazeroir.V128CmpTypeI32x4GeS { c.assembler.CompileRegisterToRegister(amd64.PMAXSD, x2Reg, tmp) } else { c.assembler.CompileRegisterToRegister(amd64.PMAXUD, x2Reg, tmp) @@ -1345,17 +1375,19 @@ func (c *amd64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { } // compileV128AddSat implements compiler.compileV128AddSat for amd64. -func (c *amd64Compiler) compileV128AddSat(o wazeroir.OperationV128AddSat) error { +func (c *amd64Compiler) compileV128AddSat(o *wazeroir.UnionOperation) error { var inst asm.Instruction - switch o.Shape { + shape := o.B1 + signed := o.B3 + switch shape { case wazeroir.ShapeI8x16: - if o.Signed { + if signed { inst = amd64.PADDSB } else { inst = amd64.PADDUSB } case wazeroir.ShapeI16x8: - if o.Signed { + if signed { inst = amd64.PADDSW } else { inst = amd64.PADDUSW @@ -1380,17 +1412,19 @@ func (c *amd64Compiler) compileV128AddSat(o wazeroir.OperationV128AddSat) error } // compileV128SubSat implements compiler.compileV128SubSat for amd64. -func (c *amd64Compiler) compileV128SubSat(o wazeroir.OperationV128SubSat) error { +func (c *amd64Compiler) compileV128SubSat(o *wazeroir.UnionOperation) error { var inst asm.Instruction - switch o.Shape { + shape := o.B1 + signed := o.B3 + switch shape { case wazeroir.ShapeI8x16: - if o.Signed { + if signed { inst = amd64.PSUBSB } else { inst = amd64.PSUBUSB } case wazeroir.ShapeI16x8: - if o.Signed { + if signed { inst = amd64.PSUBSW } else { inst = amd64.PSUBUSW @@ -1415,9 +1449,10 @@ func (c *amd64Compiler) compileV128SubSat(o wazeroir.OperationV128SubSat) error } // compileV128Mul implements compiler.compileV128Mul for amd64. -func (c *amd64Compiler) compileV128Mul(o wazeroir.OperationV128Mul) error { +func (c *amd64Compiler) compileV128Mul(o *wazeroir.UnionOperation) error { var inst asm.Instruction - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeI16x8: inst = amd64.PMULLW case wazeroir.ShapeI32x4: @@ -1512,7 +1547,7 @@ func (c *amd64Compiler) compileV128MulI64x2() error { } // compileV128Div implements compiler.compileV128Div for amd64. -func (c *amd64Compiler) compileV128Div(o wazeroir.OperationV128Div) error { +func (c *amd64Compiler) compileV128Div(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -1524,7 +1559,8 @@ func (c *amd64Compiler) compileV128Div(o wazeroir.OperationV128Div) error { } var inst asm.Instruction - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeF32x4: inst = amd64.DIVPS case wazeroir.ShapeF64x2: @@ -1539,11 +1575,12 @@ func (c *amd64Compiler) compileV128Div(o wazeroir.OperationV128Div) error { } // compileV128Neg implements compiler.compileV128Neg for amd64. -func (c *amd64Compiler) compileV128Neg(o wazeroir.OperationV128Neg) error { - if o.Shape <= wazeroir.ShapeI64x2 { - return c.compileV128NegInt(o.Shape) +func (c *amd64Compiler) compileV128Neg(o *wazeroir.UnionOperation) error { + shape := o.B1 + if shape <= wazeroir.ShapeI64x2 { + return c.compileV128NegInt(shape) } else { - return c.compileV128NegFloat(o.Shape) + return c.compileV128NegFloat(shape) } } @@ -1617,14 +1654,15 @@ func (c *amd64Compiler) compileV128NegFloat(s wazeroir.Shape) error { } // compileV128Sqrt implements compiler.compileV128Sqrt for amd64. -func (c *amd64Compiler) compileV128Sqrt(o wazeroir.OperationV128Sqrt) error { +func (c *amd64Compiler) compileV128Sqrt(o *wazeroir.UnionOperation) error { v := c.locationStack.popV128() if err := c.compileEnsureOnRegister(v); err != nil { return err } var inst asm.Instruction - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeF64x2: inst = amd64.SQRTPD case wazeroir.ShapeF32x4: @@ -1637,8 +1675,9 @@ func (c *amd64Compiler) compileV128Sqrt(o wazeroir.OperationV128Sqrt) error { } // compileV128Abs implements compiler.compileV128Abs for amd64. -func (c *amd64Compiler) compileV128Abs(o wazeroir.OperationV128Abs) error { - if o.Shape == wazeroir.ShapeI64x2 { +func (c *amd64Compiler) compileV128Abs(o *wazeroir.UnionOperation) error { + shape := o.B1 + if shape == wazeroir.ShapeI64x2 { return c.compileV128AbsI64x2() } @@ -1648,7 +1687,7 @@ func (c *amd64Compiler) compileV128Abs(o wazeroir.OperationV128Abs) error { } result := v.register - switch o.Shape { + switch shape { case wazeroir.ShapeI8x16: c.assembler.CompileRegisterToRegister(amd64.PABSB, result, result) case wazeroir.ShapeI16x8: @@ -1736,7 +1775,7 @@ var ( ) // compileV128Popcnt implements compiler.compileV128Popcnt for amd64. -func (c *amd64Compiler) compileV128Popcnt(wazeroir.OperationV128Popcnt) error { +func (c *amd64Compiler) compileV128Popcnt(operation *wazeroir.UnionOperation) error { v := c.locationStack.popV128() if err := c.compileEnsureOnRegister(v); err != nil { return err @@ -1811,7 +1850,7 @@ func (c *amd64Compiler) compileV128Popcnt(wazeroir.OperationV128Popcnt) error { } // compileV128Min implements compiler.compileV128Min for amd64. -func (c *amd64Compiler) compileV128Min(o wazeroir.OperationV128Min) error { +func (c *amd64Compiler) compileV128Min(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -1822,26 +1861,28 @@ func (c *amd64Compiler) compileV128Min(o wazeroir.OperationV128Min) error { return err } - if o.Shape >= wazeroir.ShapeF32x4 { - return c.compileV128FloatMinImpl(o.Shape == wazeroir.ShapeF32x4, x1.register, x2.register) + shape := o.B1 + if shape >= wazeroir.ShapeF32x4 { + return c.compileV128FloatMinImpl(shape == wazeroir.ShapeF32x4, x1.register, x2.register) } + signed := o.B3 var inst asm.Instruction - switch o.Shape { + switch shape { case wazeroir.ShapeI8x16: - if o.Signed { + if signed { inst = amd64.PMINSB } else { inst = amd64.PMINUB } case wazeroir.ShapeI16x8: - if o.Signed { + if signed { inst = amd64.PMINSW } else { inst = amd64.PMINUW } case wazeroir.ShapeI32x4: - if o.Signed { + if signed { inst = amd64.PMINSD } else { inst = amd64.PMINUD @@ -1907,7 +1948,7 @@ func (c *amd64Compiler) compileV128FloatMinImpl(is32bit bool, x1r, x2r asm.Regis } // compileV128Max implements compiler.compileV128Max for amd64. -func (c *amd64Compiler) compileV128Max(o wazeroir.OperationV128Max) error { +func (c *amd64Compiler) compileV128Max(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -1918,26 +1959,28 @@ func (c *amd64Compiler) compileV128Max(o wazeroir.OperationV128Max) error { return err } - if o.Shape >= wazeroir.ShapeF32x4 { - return c.compileV128FloatMaxImpl(o.Shape == wazeroir.ShapeF32x4, x1.register, x2.register) + shape := o.B1 + if shape >= wazeroir.ShapeF32x4 { + return c.compileV128FloatMaxImpl(shape == wazeroir.ShapeF32x4, x1.register, x2.register) } + signed := o.B3 var inst asm.Instruction - switch o.Shape { + switch shape { case wazeroir.ShapeI8x16: - if o.Signed { + if signed { inst = amd64.PMAXSB } else { inst = amd64.PMAXUB } case wazeroir.ShapeI16x8: - if o.Signed { + if signed { inst = amd64.PMAXSW } else { inst = amd64.PMAXUW } case wazeroir.ShapeI32x4: - if o.Signed { + if signed { inst = amd64.PMAXSD } else { inst = amd64.PMAXUD @@ -2011,7 +2054,7 @@ func (c *amd64Compiler) compileV128FloatMaxImpl(is32bit bool, x1r, x2r asm.Regis } // compileV128AvgrU implements compiler.compileV128AvgrU for amd64. -func (c *amd64Compiler) compileV128AvgrU(o wazeroir.OperationV128AvgrU) error { +func (c *amd64Compiler) compileV128AvgrU(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -2023,7 +2066,8 @@ func (c *amd64Compiler) compileV128AvgrU(o wazeroir.OperationV128AvgrU) error { } var inst asm.Instruction - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeI8x16: inst = amd64.PAVGB case wazeroir.ShapeI16x8: @@ -2038,7 +2082,7 @@ func (c *amd64Compiler) compileV128AvgrU(o wazeroir.OperationV128AvgrU) error { } // compileV128Pmin implements compiler.compileV128Pmin for amd64. -func (c *amd64Compiler) compileV128Pmin(o wazeroir.OperationV128Pmin) error { +func (c *amd64Compiler) compileV128Pmin(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -2050,7 +2094,7 @@ func (c *amd64Compiler) compileV128Pmin(o wazeroir.OperationV128Pmin) error { } var min asm.Instruction - if o.Shape == wazeroir.ShapeF32x4 { + if o.B1 == wazeroir.ShapeF32x4 { min = amd64.MINPS } else { min = amd64.MINPD @@ -2066,7 +2110,7 @@ func (c *amd64Compiler) compileV128Pmin(o wazeroir.OperationV128Pmin) error { } // compileV128Pmax implements compiler.compileV128Pmax for amd64. -func (c *amd64Compiler) compileV128Pmax(o wazeroir.OperationV128Pmax) error { +func (c *amd64Compiler) compileV128Pmax(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -2078,7 +2122,7 @@ func (c *amd64Compiler) compileV128Pmax(o wazeroir.OperationV128Pmax) error { } var min asm.Instruction - if o.Shape == wazeroir.ShapeF32x4 { + if o.B1 == wazeroir.ShapeF32x4 { min = amd64.MAXPS } else { min = amd64.MAXPD @@ -2094,31 +2138,31 @@ func (c *amd64Compiler) compileV128Pmax(o wazeroir.OperationV128Pmax) error { } // compileV128Ceil implements compiler.compileV128Ceil for amd64. -func (c *amd64Compiler) compileV128Ceil(o wazeroir.OperationV128Ceil) error { +func (c *amd64Compiler) compileV128Ceil(o *wazeroir.UnionOperation) error { // See https://www.felixcloutier.com/x86/roundpd const roundModeCeil = 0x2 - return c.compileV128RoundImpl(o.Shape == wazeroir.ShapeF32x4, roundModeCeil) + return c.compileV128RoundImpl(o.B1 == wazeroir.ShapeF32x4, roundModeCeil) } // compileV128Floor implements compiler.compileV128Floor for amd64. -func (c *amd64Compiler) compileV128Floor(o wazeroir.OperationV128Floor) error { +func (c *amd64Compiler) compileV128Floor(o *wazeroir.UnionOperation) error { // See https://www.felixcloutier.com/x86/roundpd const roundModeFloor = 0x1 - return c.compileV128RoundImpl(o.Shape == wazeroir.ShapeF32x4, roundModeFloor) + return c.compileV128RoundImpl(o.B1 == wazeroir.ShapeF32x4, roundModeFloor) } // compileV128Trunc implements compiler.compileV128Trunc for amd64. -func (c *amd64Compiler) compileV128Trunc(o wazeroir.OperationV128Trunc) error { +func (c *amd64Compiler) compileV128Trunc(o *wazeroir.UnionOperation) error { // See https://www.felixcloutier.com/x86/roundpd const roundModeTrunc = 0x3 - return c.compileV128RoundImpl(o.Shape == wazeroir.ShapeF32x4, roundModeTrunc) + return c.compileV128RoundImpl(o.B1 == wazeroir.ShapeF32x4, roundModeTrunc) } // compileV128Nearest implements compiler.compileV128Nearest for amd64. -func (c *amd64Compiler) compileV128Nearest(o wazeroir.OperationV128Nearest) error { +func (c *amd64Compiler) compileV128Nearest(o *wazeroir.UnionOperation) error { // See https://www.felixcloutier.com/x86/roundpd const roundModeNearest = 0x0 - return c.compileV128RoundImpl(o.Shape == wazeroir.ShapeF32x4, roundModeNearest) + return c.compileV128RoundImpl(o.B1 == wazeroir.ShapeF32x4, roundModeNearest) } // compileV128RoundImpl implements compileV128Nearest compileV128Trunc compileV128Floor and compileV128Ceil @@ -2143,14 +2187,17 @@ func (c *amd64Compiler) compileV128RoundImpl(is32bit bool, mode byte) error { } // compileV128Extend implements compiler.compileV128Extend for amd64. -func (c *amd64Compiler) compileV128Extend(o wazeroir.OperationV128Extend) error { +func (c *amd64Compiler) compileV128Extend(o *wazeroir.UnionOperation) error { v := c.locationStack.popV128() if err := c.compileEnsureOnRegister(v); err != nil { return err } vr := v.register - if !o.UseLow { + originShape := o.B1 + signed := o.B2 == 1 + useLow := o.B3 + if !useLow { // We have to shift the higher 64-bits into the lower ones before the actual extending instruction. // Shifting right by 0x8 * 8 = 64bits and concatenate itself. // See https://www.felixcloutier.com/x86/palignr @@ -2158,21 +2205,21 @@ func (c *amd64Compiler) compileV128Extend(o wazeroir.OperationV128Extend) error } var extend asm.Instruction - switch o.OriginShape { + switch originShape { case wazeroir.ShapeI8x16: - if o.Signed { + if signed { extend = amd64.PMOVSXBW } else { extend = amd64.PMOVZXBW } case wazeroir.ShapeI16x8: - if o.Signed { + if signed { extend = amd64.PMOVSXWD } else { extend = amd64.PMOVZXWD } case wazeroir.ShapeI32x4: - if o.Signed { + if signed { extend = amd64.PMOVSXDQ } else { extend = amd64.PMOVZXDQ @@ -2185,7 +2232,7 @@ func (c *amd64Compiler) compileV128Extend(o wazeroir.OperationV128Extend) error } // compileV128ExtMul implements compiler.compileV128ExtMul for amd64. -func (c *amd64Compiler) compileV128ExtMul(o wazeroir.OperationV128ExtMul) error { +func (c *amd64Compiler) compileV128ExtMul(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -2198,9 +2245,12 @@ func (c *amd64Compiler) compileV128ExtMul(o wazeroir.OperationV128ExtMul) error x1r, x2r := x1.register, x2.register - switch o.OriginShape { + originShape := o.B1 + signed := o.B2 == 1 + useLow := o.B3 + switch originShape { case wazeroir.ShapeI8x16: - if !o.UseLow { + if !useLow { // We have to shift the higher 64-bits into the lower ones before the actual extending instruction. // Shifting right by 0x8 * 8 = 64bits and concatenate itself. // See https://www.felixcloutier.com/x86/palignr @@ -2209,7 +2259,7 @@ func (c *amd64Compiler) compileV128ExtMul(o wazeroir.OperationV128ExtMul) error } var ext asm.Instruction - if o.Signed { + if signed { ext = amd64.PMOVSXBW } else { ext = amd64.PMOVZXBW @@ -2231,7 +2281,7 @@ func (c *amd64Compiler) compileV128ExtMul(o wazeroir.OperationV128ExtMul) error // Multiply the values and store the lower 16-bits into x1r. c.assembler.CompileRegisterToRegister(amd64.PMULLW, x2r, x1r) - if o.Signed { + if signed { // Signed multiply the values and store the higher 16-bits into tmp. c.assembler.CompileRegisterToRegister(amd64.PMULHW, x2r, tmp) } else { @@ -2240,7 +2290,7 @@ func (c *amd64Compiler) compileV128ExtMul(o wazeroir.OperationV128ExtMul) error } // Unpack lower or higher half of vectors (tmp and x1r) and concatenate them. - if o.UseLow { + if useLow { c.assembler.CompileRegisterToRegister(amd64.PUNPCKLWD, tmp, x1r) } else { c.assembler.CompileRegisterToRegister(amd64.PUNPCKHWD, tmp, x1r) @@ -2248,7 +2298,7 @@ func (c *amd64Compiler) compileV128ExtMul(o wazeroir.OperationV128ExtMul) error case wazeroir.ShapeI32x4: var shuffleOrder byte // Given that the original state of the register is as [v1, v2, v3, v4] where vN = a word, - if o.UseLow { + if useLow { // This makes the register as [v1, v1, v2, v2] shuffleOrder = 0b01010000 } else { @@ -2260,7 +2310,7 @@ func (c *amd64Compiler) compileV128ExtMul(o wazeroir.OperationV128ExtMul) error c.assembler.CompileRegisterToRegisterWithArg(amd64.PSHUFD, x2r, x2r, shuffleOrder) var mul asm.Instruction - if o.Signed { + if signed { mul = amd64.PMULDQ } else { mul = amd64.PMULUDQ @@ -2279,7 +2329,7 @@ var q15mulrSatSMask = [16]byte{ } // compileV128Q15mulrSatS implements compiler.compileV128Q15mulrSatS for amd64. -func (c *amd64Compiler) compileV128Q15mulrSatS(wazeroir.OperationV128Q15mulrSatS) error { +func (c *amd64Compiler) compileV128Q15mulrSatS(*wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -2322,14 +2372,16 @@ var ( ) // compileV128ExtAddPairwise implements compiler.compileV128ExtAddPairwise for amd64. -func (c *amd64Compiler) compileV128ExtAddPairwise(o wazeroir.OperationV128ExtAddPairwise) error { +func (c *amd64Compiler) compileV128ExtAddPairwise(o *wazeroir.UnionOperation) error { v := c.locationStack.popV128() if err := c.compileEnsureOnRegister(v); err != nil { return err } vr := v.register - switch o.OriginShape { + originShape := o.B1 + signed := o.B3 + switch originShape { case wazeroir.ShapeI8x16: allOnesReg, err := c.allocateRegister(registerTypeVector) if err != nil { @@ -2343,7 +2395,7 @@ func (c *amd64Compiler) compileV128ExtAddPairwise(o wazeroir.OperationV128ExtAdd var result asm.Register // See https://www.felixcloutier.com/x86/pmaddubsw for detail. - if o.Signed { + if signed { // Interpret vr's value as signed byte and multiply with one and add pairwise, which results in pairwise // signed extadd. c.assembler.CompileRegisterToRegister(amd64.PMADDUBSW, vr, allOnesReg) @@ -2364,7 +2416,7 @@ func (c *amd64Compiler) compileV128ExtAddPairwise(o wazeroir.OperationV128ExtAdd return err } - if o.Signed { + if signed { // See https://www.felixcloutier.com/x86/pmaddwd if err = c.assembler.CompileStaticConstToRegister(amd64.MOVDQU, asm.NewStaticConst(allOnesI16x8[:]), tmp); err != nil { @@ -2410,7 +2462,7 @@ func (c *amd64Compiler) compileV128ExtAddPairwise(o wazeroir.OperationV128ExtAdd } // compileV128FloatPromote implements compiler.compileV128FloatPromote for amd64. -func (c *amd64Compiler) compileV128FloatPromote(wazeroir.OperationV128FloatPromote) error { +func (c *amd64Compiler) compileV128FloatPromote(*wazeroir.UnionOperation) error { v := c.locationStack.popV128() if err := c.compileEnsureOnRegister(v); err != nil { return err @@ -2423,7 +2475,7 @@ func (c *amd64Compiler) compileV128FloatPromote(wazeroir.OperationV128FloatPromo } // compileV128FloatDemote implements compiler.compileV128FloatDemote for amd64. -func (c *amd64Compiler) compileV128FloatDemote(wazeroir.OperationV128FloatDemote) error { +func (c *amd64Compiler) compileV128FloatDemote(*wazeroir.UnionOperation) error { v := c.locationStack.popV128() if err := c.compileEnsureOnRegister(v); err != nil { return err @@ -2436,7 +2488,7 @@ func (c *amd64Compiler) compileV128FloatDemote(wazeroir.OperationV128FloatDemote } // compileV128Dot implements compiler.compileV128Dot for amd64. -func (c *amd64Compiler) compileV128Dot(wazeroir.OperationV128Dot) error { +func (c *amd64Compiler) compileV128Dot(*wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -2459,16 +2511,19 @@ var fConvertFromIMask = [16]byte{ } // compileV128FConvertFromI implements compiler.compileV128FConvertFromI for amd64. -func (c *amd64Compiler) compileV128FConvertFromI(o wazeroir.OperationV128FConvertFromI) error { +func (c *amd64Compiler) compileV128FConvertFromI(o *wazeroir.UnionOperation) error { v := c.locationStack.popV128() if err := c.compileEnsureOnRegister(v); err != nil { return err } vr := v.register - switch o.DestinationShape { + destinationShape := o.B1 + signed := o.B3 + + switch destinationShape { case wazeroir.ShapeF32x4: - if o.Signed { + if signed { c.assembler.CompileRegisterToRegister(amd64.CVTDQ2PS, vr, vr) } else { tmp, err := c.allocateRegister(registerTypeVector) @@ -2500,7 +2555,7 @@ func (c *amd64Compiler) compileV128FConvertFromI(o wazeroir.OperationV128FConver c.assembler.CompileRegisterToRegister(amd64.ADDPS, tmp, vr) } case wazeroir.ShapeF64x2: - if o.Signed { + if signed { c.assembler.CompileRegisterToRegister(amd64.CVTDQ2PD, vr, vr) } else { tmp, err := c.allocateRegister(registerTypeVector) @@ -2538,7 +2593,7 @@ func (c *amd64Compiler) compileV128FConvertFromI(o wazeroir.OperationV128FConver } // compileV128Narrow implements compiler.compileV128Narrow for amd64. -func (c *amd64Compiler) compileV128Narrow(o wazeroir.OperationV128Narrow) error { +func (c *amd64Compiler) compileV128Narrow(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -2550,15 +2605,17 @@ func (c *amd64Compiler) compileV128Narrow(o wazeroir.OperationV128Narrow) error } var narrow asm.Instruction - switch o.OriginShape { + originShape := o.B1 + signed := o.B3 + switch originShape { case wazeroir.ShapeI16x8: - if o.Signed { + if signed { narrow = amd64.PACKSSWB } else { narrow = amd64.PACKUSWB } case wazeroir.ShapeI32x4: - if o.Signed { + if signed { narrow = amd64.PACKSSDW } else { narrow = amd64.PACKUSDW @@ -2599,7 +2656,7 @@ var ( ) // compileV128ITruncSatFromF implements compiler.compileV128ITruncSatFromF for amd64. -func (c *amd64Compiler) compileV128ITruncSatFromF(o wazeroir.OperationV128ITruncSatFromF) error { +func (c *amd64Compiler) compileV128ITruncSatFromF(o *wazeroir.UnionOperation) error { v := c.locationStack.popV128() if err := c.compileEnsureOnRegister(v); err != nil { return err @@ -2613,9 +2670,11 @@ func (c *amd64Compiler) compileV128ITruncSatFromF(o wazeroir.OperationV128ITrunc c.locationStack.markRegisterUsed(tmp) - switch o.OriginShape { + originShape := o.B1 + signed := o.B3 + switch originShape { case wazeroir.ShapeF32x4: - if o.Signed { + if signed { // Copy the value into tmp. c.assembler.CompileRegisterToRegister(amd64.MOVDQA, vr, tmp) @@ -2682,7 +2741,7 @@ func (c *amd64Compiler) compileV128ITruncSatFromF(o wazeroir.OperationV128ITrunc return err } - if o.Signed { + if signed { // Copy the value into tmp. c.assembler.CompileRegisterToRegister(amd64.MOVDQA, vr, tmp) diff --git a/internal/engine/compiler/impl_vec_amd64_test.go b/internal/engine/compiler/impl_vec_amd64_test.go index c5c3d841..f603c3cb 100644 --- a/internal/engine/compiler/impl_vec_amd64_test.go +++ b/internal/engine/compiler/impl_vec_amd64_test.go @@ -22,7 +22,7 @@ func TestAmd64Compiler_V128Shuffle_ConstTable_MiddleOfFunction(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - lanes := [16]byte{1, 1, 1, 1, 0, 0, 0, 0, 10, 10, 10, 10, 0, 0, 0, 0} + lanes := []uint64{1, 1, 1, 1, 0, 0, 0, 0, 10, 10, 10, 10, 0, 0, 0, 0} v := [16]byte{0: 0xa, 1: 0xb, 10: 0xc} w := [16]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} exp := [16]byte{ @@ -32,19 +32,13 @@ func TestAmd64Compiler_V128Shuffle_ConstTable_MiddleOfFunction(t *testing.T) { 0xa, 0xa, 0xa, 0xa, } - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(v[:8]), - Hi: binary.LittleEndian.Uint64(v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(v[:8]), binary.LittleEndian.Uint64(v[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(w[:8]), - Hi: binary.LittleEndian.Uint64(w[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(w[:8]), binary.LittleEndian.Uint64(w[8:])))) require.NoError(t, err) - err = compiler.compileV128Shuffle(wazeroir.OperationV128Shuffle{Lanes: lanes}) + err = compiler.compileV128Shuffle(operationPtr(wazeroir.NewOperationV128Shuffle(lanes))) require.NoError(t, err) assembler := compiler.(*amd64Compiler).assembler.(*amd64.AssemblerImpl) @@ -187,11 +181,11 @@ func TestAmd64Compiler_compileV128ShrI64x2SignedImpl(t *testing.T) { c.locationStack.markRegisterUnused(loc.register) // Instead, push the conditional flag value which is supposed be interpreted as 1 (=shiftAmount). - err := c.compileConstI32(wazeroir.NewOperationConstI32(0)) + err := c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0))) require.NoError(t, err) - err = c.compileConstI32(wazeroir.NewOperationConstI32(0)) + err = c.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(0))) require.NoError(t, err) - err = c.compileEq(wazeroir.NewOperationEq(wazeroir.UnsignedTypeI32)) + err = c.compileEq(operationPtr(wazeroir.NewOperationEq(wazeroir.UnsignedTypeI32))) require.NoError(t, err) }, verifyFn: func(t *testing.T, env *compilerEnv) {}, @@ -208,13 +202,10 @@ func TestAmd64Compiler_compileV128ShrI64x2SignedImpl(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(x[:8]), - Hi: binary.LittleEndian.Uint64(x[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(x[:8]), binary.LittleEndian.Uint64(x[8:])))) require.NoError(t, err) - err = compiler.compileConstI32(wazeroir.NewOperationConstI32(shiftAmount)) + err = compiler.compileConstI32(operationPtr(wazeroir.NewOperationConstI32(shiftAmount))) require.NoError(t, err) amdCompiler := compiler.(*amd64Compiler) @@ -288,18 +279,12 @@ func TestAmd64Compiler_compileV128Neg_NaNOnTemporary(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(tc.v[:8]), - Hi: binary.LittleEndian.Uint64(tc.v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(tc.v[:8]), binary.LittleEndian.Uint64(tc.v[8:])))) require.NoError(t, err) // Ensures that the previous state of temporary register used by Neg holds // NaN values. - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: math.Float64bits(math.NaN()), - Hi: math.Float64bits(math.NaN()), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(math.Float64bits(math.NaN()), math.Float64bits(math.NaN())))) require.NoError(t, err) // Mark that the temp register is available for Neg instruction below. @@ -307,7 +292,7 @@ func TestAmd64Compiler_compileV128Neg_NaNOnTemporary(t *testing.T) { compiler.runtimeValueLocationStack().markRegisterUnused(loc.register) // Now compiling Neg where it uses temporary register holding NaN values at this point. - err = compiler.compileV128Neg(wazeroir.OperationV128Neg{Shape: tc.shape}) + err = compiler.compileV128Neg(operationPtr(wazeroir.NewOperationV128Neg(tc.shape))) require.NoError(t, err) err = compiler.compileReturnFunction() diff --git a/internal/engine/compiler/impl_vec_arm64.go b/internal/engine/compiler/impl_vec_arm64.go index 2e68d9f0..70aa4a01 100644 --- a/internal/engine/compiler/impl_vec_arm64.go +++ b/internal/engine/compiler/impl_vec_arm64.go @@ -7,11 +7,13 @@ import ( ) // compileV128Const implements compiler.compileV128Const for arm64. -func (c *arm64Compiler) compileV128Const(o wazeroir.OperationV128Const) error { +func (c *arm64Compiler) compileV128Const(o *wazeroir.UnionOperation) error { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } + lo, hi := o.U1, o.U2 + result, err := c.allocateRegister(registerTypeVector) if err != nil { return err @@ -19,19 +21,19 @@ func (c *arm64Compiler) compileV128Const(o wazeroir.OperationV128Const) error { // Moves the lower 64-bits as a scalar float. intReg := arm64ReservedRegisterForTemporary - if o.Lo == 0 { + if lo == 0 { intReg = arm64.RegRZR } else { - c.assembler.CompileConstToRegister(arm64.MOVD, int64(o.Lo), arm64ReservedRegisterForTemporary) + c.assembler.CompileConstToRegister(arm64.MOVD, int64(lo), arm64ReservedRegisterForTemporary) } c.assembler.CompileRegisterToRegister(arm64.FMOVD, intReg, result) // Then, insert the higher bits with INS(vector,general). intReg = arm64ReservedRegisterForTemporary - if o.Hi == 0 { + if hi == 0 { intReg = arm64.RegRZR } else { - c.assembler.CompileConstToRegister(arm64.MOVD, int64(o.Hi), arm64ReservedRegisterForTemporary) + c.assembler.CompileConstToRegister(arm64.MOVD, int64(hi), arm64ReservedRegisterForTemporary) } // "ins Vn.D[1], intReg" c.assembler.CompileRegisterToVectorRegister(arm64.INSGEN, intReg, result, arm64.VectorArrangementD, 1) @@ -41,7 +43,7 @@ func (c *arm64Compiler) compileV128Const(o wazeroir.OperationV128Const) error { } // compileV128Add implements compiler.compileV128Add for arm64. -func (c *arm64Compiler) compileV128Add(o wazeroir.OperationV128Add) error { +func (c *arm64Compiler) compileV128Add(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -56,7 +58,8 @@ func (c *arm64Compiler) compileV128Add(o wazeroir.OperationV128Add) error { var arr arm64.VectorArrangement var inst asm.Instruction - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeI8x16: inst = arm64.VADD arr = arm64.VectorArrangement16B @@ -86,7 +89,7 @@ func (c *arm64Compiler) compileV128Add(o wazeroir.OperationV128Add) error { } // compileV128Sub implements compiler.compileV128Sub for arm64. -func (c *arm64Compiler) compileV128Sub(o wazeroir.OperationV128Sub) (err error) { +func (c *arm64Compiler) compileV128Sub(o *wazeroir.UnionOperation) (err error) { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -101,7 +104,8 @@ func (c *arm64Compiler) compileV128Sub(o wazeroir.OperationV128Sub) (err error) var arr arm64.VectorArrangement var inst asm.Instruction - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeI8x16: inst = arm64.VSUB arr = arm64.VectorArrangement16B @@ -131,7 +135,7 @@ func (c *arm64Compiler) compileV128Sub(o wazeroir.OperationV128Sub) (err error) } // compileV128Load implements compiler.compileV128Load for arm64. -func (c *arm64Compiler) compileV128Load(o wazeroir.OperationV128Load) (err error) { +func (c *arm64Compiler) compileV128Load(o *wazeroir.UnionOperation) (err error) { if err := c.maybeCompileMoveTopConditionalToGeneralPurposeRegister(); err != nil { return err } @@ -140,9 +144,12 @@ func (c *arm64Compiler) compileV128Load(o wazeroir.OperationV128Load) (err error return err } - switch o.Type { + offset := uint32(o.U2) + loadType := wazeroir.V128LoadType(o.B1) + + switch loadType { case wazeroir.V128LoadType128: - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, 16) + offset, err := c.compileMemoryAccessOffsetSetup(offset, 16) if err != nil { return err } @@ -150,7 +157,7 @@ func (c *arm64Compiler) compileV128Load(o wazeroir.OperationV128Load) (err error arm64ReservedRegisterForMemory, offset, result, arm64.VectorArrangementQ, ) case wazeroir.V128LoadType8x8s: - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, 8) + offset, err := c.compileMemoryAccessOffsetSetup(offset, 8) if err != nil { return err } @@ -160,7 +167,7 @@ func (c *arm64Compiler) compileV128Load(o wazeroir.OperationV128Load) (err error c.assembler.CompileVectorRegisterToVectorRegister(arm64.SSHLL, result, result, arm64.VectorArrangement8B, arm64.VectorIndexNone, arm64.VectorIndexNone) case wazeroir.V128LoadType8x8u: - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, 8) + offset, err := c.compileMemoryAccessOffsetSetup(offset, 8) if err != nil { return err } @@ -170,7 +177,7 @@ func (c *arm64Compiler) compileV128Load(o wazeroir.OperationV128Load) (err error c.assembler.CompileVectorRegisterToVectorRegister(arm64.USHLL, result, result, arm64.VectorArrangement8B, arm64.VectorIndexNone, arm64.VectorIndexNone) case wazeroir.V128LoadType16x4s: - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, 8) + offset, err := c.compileMemoryAccessOffsetSetup(offset, 8) if err != nil { return err } @@ -180,7 +187,7 @@ func (c *arm64Compiler) compileV128Load(o wazeroir.OperationV128Load) (err error c.assembler.CompileVectorRegisterToVectorRegister(arm64.SSHLL, result, result, arm64.VectorArrangement4H, arm64.VectorIndexNone, arm64.VectorIndexNone) case wazeroir.V128LoadType16x4u: - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, 8) + offset, err := c.compileMemoryAccessOffsetSetup(offset, 8) if err != nil { return err } @@ -190,7 +197,7 @@ func (c *arm64Compiler) compileV128Load(o wazeroir.OperationV128Load) (err error c.assembler.CompileVectorRegisterToVectorRegister(arm64.USHLL, result, result, arm64.VectorArrangement4H, arm64.VectorIndexNone, arm64.VectorIndexNone) case wazeroir.V128LoadType32x2s: - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, 8) + offset, err := c.compileMemoryAccessOffsetSetup(offset, 8) if err != nil { return err } @@ -200,7 +207,7 @@ func (c *arm64Compiler) compileV128Load(o wazeroir.OperationV128Load) (err error c.assembler.CompileVectorRegisterToVectorRegister(arm64.SSHLL, result, result, arm64.VectorArrangement2S, arm64.VectorIndexNone, arm64.VectorIndexNone) case wazeroir.V128LoadType32x2u: - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, 8) + offset, err := c.compileMemoryAccessOffsetSetup(offset, 8) if err != nil { return err } @@ -210,35 +217,35 @@ func (c *arm64Compiler) compileV128Load(o wazeroir.OperationV128Load) (err error c.assembler.CompileVectorRegisterToVectorRegister(arm64.USHLL, result, result, arm64.VectorArrangement2S, arm64.VectorIndexNone, arm64.VectorIndexNone) case wazeroir.V128LoadType8Splat: - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, 1) + offset, err := c.compileMemoryAccessOffsetSetup(offset, 1) if err != nil { return err } c.assembler.CompileRegisterToRegister(arm64.ADD, arm64ReservedRegisterForMemory, offset) c.assembler.CompileMemoryToVectorRegister(arm64.LD1R, offset, 0, result, arm64.VectorArrangement16B) case wazeroir.V128LoadType16Splat: - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, 2) + offset, err := c.compileMemoryAccessOffsetSetup(offset, 2) if err != nil { return err } c.assembler.CompileRegisterToRegister(arm64.ADD, arm64ReservedRegisterForMemory, offset) c.assembler.CompileMemoryToVectorRegister(arm64.LD1R, offset, 0, result, arm64.VectorArrangement8H) case wazeroir.V128LoadType32Splat: - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, 4) + offset, err := c.compileMemoryAccessOffsetSetup(offset, 4) if err != nil { return err } c.assembler.CompileRegisterToRegister(arm64.ADD, arm64ReservedRegisterForMemory, offset) c.assembler.CompileMemoryToVectorRegister(arm64.LD1R, offset, 0, result, arm64.VectorArrangement4S) case wazeroir.V128LoadType64Splat: - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, 8) + offset, err := c.compileMemoryAccessOffsetSetup(offset, 8) if err != nil { return err } c.assembler.CompileRegisterToRegister(arm64.ADD, arm64ReservedRegisterForMemory, offset) c.assembler.CompileMemoryToVectorRegister(arm64.LD1R, offset, 0, result, arm64.VectorArrangement2D) case wazeroir.V128LoadType32zero: - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, 4) + offset, err := c.compileMemoryAccessOffsetSetup(offset, 4) if err != nil { return err } @@ -246,7 +253,7 @@ func (c *arm64Compiler) compileV128Load(o wazeroir.OperationV128Load) (err error arm64ReservedRegisterForMemory, offset, result, arm64.VectorArrangementS, ) case wazeroir.V128LoadType64zero: - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, 8) + offset, err := c.compileMemoryAccessOffsetSetup(offset, 8) if err != nil { return err } @@ -260,21 +267,24 @@ func (c *arm64Compiler) compileV128Load(o wazeroir.OperationV128Load) (err error } // compileV128LoadLane implements compiler.compileV128LoadLane for arm64. -func (c *arm64Compiler) compileV128LoadLane(o wazeroir.OperationV128LoadLane) (err error) { +func (c *arm64Compiler) compileV128LoadLane(o *wazeroir.UnionOperation) (err error) { targetVector := c.locationStack.popV128() if err = c.compileEnsureOnRegister(targetVector); err != nil { return } - targetSizeInBytes := int64(o.LaneSize / 8) - source, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, targetSizeInBytes) + laneSize, laneIndex := o.B1, o.B2 + offset := uint32(o.U2) + + targetSizeInBytes := int64(laneSize / 8) + source, err := c.compileMemoryAccessOffsetSetup(offset, targetSizeInBytes) if err != nil { return err } var loadInst asm.Instruction var arr arm64.VectorArrangement - switch o.LaneSize { + switch laneSize { case 8: arr = arm64.VectorArrangementB loadInst = arm64.LDRB @@ -290,7 +300,7 @@ func (c *arm64Compiler) compileV128LoadLane(o wazeroir.OperationV128LoadLane) (e } c.assembler.CompileMemoryWithRegisterOffsetToRegister(loadInst, arm64ReservedRegisterForMemory, source, source) - c.assembler.CompileRegisterToVectorRegister(arm64.INSGEN, source, targetVector.register, arr, arm64.VectorIndex(o.LaneIndex)) + c.assembler.CompileRegisterToVectorRegister(arm64.INSGEN, source, targetVector.register, arr, arm64.VectorIndex(laneIndex)) c.pushVectorRuntimeValueLocationOnRegister(targetVector.register) c.locationStack.markRegisterUnused(source) @@ -298,30 +308,34 @@ func (c *arm64Compiler) compileV128LoadLane(o wazeroir.OperationV128LoadLane) (e } // compileV128Store implements compiler.compileV128Store for arm64. -func (c *arm64Compiler) compileV128Store(o wazeroir.OperationV128Store) (err error) { +func (c *arm64Compiler) compileV128Store(o *wazeroir.UnionOperation) (err error) { v := c.locationStack.popV128() if err = c.compileEnsureOnRegister(v); err != nil { return } const targetSizeInBytes = 16 - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, targetSizeInBytes) + offset := uint32(o.U2) + offsetReg, err := c.compileMemoryAccessOffsetSetup(offset, targetSizeInBytes) if err != nil { return err } c.assembler.CompileVectorRegisterToMemoryWithRegisterOffset(arm64.VMOV, - v.register, arm64ReservedRegisterForMemory, offset, arm64.VectorArrangementQ) + v.register, arm64ReservedRegisterForMemory, offsetReg, arm64.VectorArrangementQ) c.markRegisterUnused(v.register) return } // compileV128StoreLane implements compiler.compileV128StoreLane for arm64. -func (c *arm64Compiler) compileV128StoreLane(o wazeroir.OperationV128StoreLane) (err error) { +func (c *arm64Compiler) compileV128StoreLane(o *wazeroir.UnionOperation) (err error) { var arr arm64.VectorArrangement var storeInst asm.Instruction - switch o.LaneSize { + laneSize := o.B1 + laneIndex := o.B2 + offset := uint32(o.U2) + switch laneSize { case 8: storeInst = arm64.STRB arr = arm64.VectorArrangementB @@ -341,43 +355,46 @@ func (c *arm64Compiler) compileV128StoreLane(o wazeroir.OperationV128StoreLane) return } - targetSizeInBytes := int64(o.LaneSize / 8) - offset, err := c.compileMemoryAccessOffsetSetup(o.Arg.Offset, targetSizeInBytes) + targetSizeInBytes := int64(laneSize / 8) + offsetReg, err := c.compileMemoryAccessOffsetSetup(offset, targetSizeInBytes) if err != nil { return err } c.assembler.CompileVectorRegisterToRegister(arm64.UMOV, v.register, arm64ReservedRegisterForTemporary, arr, - arm64.VectorIndex(o.LaneIndex)) + arm64.VectorIndex(laneIndex)) c.assembler.CompileRegisterToMemoryWithRegisterOffset(storeInst, - arm64ReservedRegisterForTemporary, arm64ReservedRegisterForMemory, offset) + arm64ReservedRegisterForTemporary, arm64ReservedRegisterForMemory, offsetReg) c.locationStack.markRegisterUnused(v.register) return } // compileV128ExtractLane implements compiler.compileV128ExtractLane for arm64. -func (c *arm64Compiler) compileV128ExtractLane(o wazeroir.OperationV128ExtractLane) (err error) { +func (c *arm64Compiler) compileV128ExtractLane(o *wazeroir.UnionOperation) (err error) { v := c.locationStack.popV128() if err = c.compileEnsureOnRegister(v); err != nil { return } - switch o.Shape { + shape := o.B1 + laneIndex := o.B2 + signed := o.B3 + switch shape { case wazeroir.ShapeI8x16: result, err := c.allocateRegister(registerTypeGeneralPurpose) if err != nil { return err } var inst asm.Instruction - if o.Signed { + if signed { inst = arm64.SMOV32 } else { inst = arm64.UMOV } c.assembler.CompileVectorRegisterToRegister(inst, v.register, result, - arm64.VectorArrangementB, arm64.VectorIndex(o.LaneIndex)) + arm64.VectorArrangementB, arm64.VectorIndex(laneIndex)) c.locationStack.markRegisterUnused(v.register) c.pushRuntimeValueLocationOnRegister(result, runtimeValueTypeI32) @@ -387,13 +404,13 @@ func (c *arm64Compiler) compileV128ExtractLane(o wazeroir.OperationV128ExtractLa return err } var inst asm.Instruction - if o.Signed { + if signed { inst = arm64.SMOV32 } else { inst = arm64.UMOV } c.assembler.CompileVectorRegisterToRegister(inst, v.register, result, - arm64.VectorArrangementH, arm64.VectorIndex(o.LaneIndex)) + arm64.VectorArrangementH, arm64.VectorIndex(laneIndex)) c.locationStack.markRegisterUnused(v.register) c.pushRuntimeValueLocationOnRegister(result, runtimeValueTypeI32) @@ -403,7 +420,7 @@ func (c *arm64Compiler) compileV128ExtractLane(o wazeroir.OperationV128ExtractLa return err } c.assembler.CompileVectorRegisterToRegister(arm64.UMOV, v.register, result, - arm64.VectorArrangementS, arm64.VectorIndex(o.LaneIndex)) + arm64.VectorArrangementS, arm64.VectorIndex(laneIndex)) c.locationStack.markRegisterUnused(v.register) c.pushRuntimeValueLocationOnRegister(result, runtimeValueTypeI32) @@ -413,24 +430,24 @@ func (c *arm64Compiler) compileV128ExtractLane(o wazeroir.OperationV128ExtractLa return err } c.assembler.CompileVectorRegisterToRegister(arm64.UMOV, v.register, result, - arm64.VectorArrangementD, arm64.VectorIndex(o.LaneIndex)) + arm64.VectorArrangementD, arm64.VectorIndex(laneIndex)) c.locationStack.markRegisterUnused(v.register) c.pushRuntimeValueLocationOnRegister(result, runtimeValueTypeI64) case wazeroir.ShapeF32x4: c.assembler.CompileVectorRegisterToVectorRegister(arm64.INSELEM, v.register, v.register, - arm64.VectorArrangementS, arm64.VectorIndex(o.LaneIndex), 0) + arm64.VectorArrangementS, arm64.VectorIndex(laneIndex), 0) c.pushRuntimeValueLocationOnRegister(v.register, runtimeValueTypeF32) case wazeroir.ShapeF64x2: c.assembler.CompileVectorRegisterToVectorRegister(arm64.INSELEM, v.register, v.register, - arm64.VectorArrangementD, arm64.VectorIndex(o.LaneIndex), 0) + arm64.VectorArrangementD, arm64.VectorIndex(laneIndex), 0) c.pushRuntimeValueLocationOnRegister(v.register, runtimeValueTypeF64) } return } // compileV128ReplaceLane implements compiler.compileV128ReplaceLane for arm64. -func (c *arm64Compiler) compileV128ReplaceLane(o wazeroir.OperationV128ReplaceLane) (err error) { +func (c *arm64Compiler) compileV128ReplaceLane(o *wazeroir.UnionOperation) (err error) { origin := c.locationStack.pop() if err = c.compileEnsureOnRegister(origin); err != nil { return @@ -441,25 +458,27 @@ func (c *arm64Compiler) compileV128ReplaceLane(o wazeroir.OperationV128ReplaceLa return } - switch o.Shape { + shape := o.B1 + laneIndex := o.B2 + switch shape { case wazeroir.ShapeI8x16: c.assembler.CompileRegisterToVectorRegister(arm64.INSGEN, origin.register, vector.register, - arm64.VectorArrangementB, arm64.VectorIndex(o.LaneIndex)) + arm64.VectorArrangementB, arm64.VectorIndex(laneIndex)) case wazeroir.ShapeI16x8: c.assembler.CompileRegisterToVectorRegister(arm64.INSGEN, origin.register, vector.register, - arm64.VectorArrangementH, arm64.VectorIndex(o.LaneIndex)) + arm64.VectorArrangementH, arm64.VectorIndex(laneIndex)) case wazeroir.ShapeI32x4: c.assembler.CompileRegisterToVectorRegister(arm64.INSGEN, origin.register, vector.register, - arm64.VectorArrangementS, arm64.VectorIndex(o.LaneIndex)) + arm64.VectorArrangementS, arm64.VectorIndex(laneIndex)) case wazeroir.ShapeI64x2: c.assembler.CompileRegisterToVectorRegister(arm64.INSGEN, origin.register, vector.register, - arm64.VectorArrangementD, arm64.VectorIndex(o.LaneIndex)) + arm64.VectorArrangementD, arm64.VectorIndex(laneIndex)) case wazeroir.ShapeF32x4: c.assembler.CompileVectorRegisterToVectorRegister(arm64.INSELEM, origin.register, vector.register, - arm64.VectorArrangementS, 0, arm64.VectorIndex(o.LaneIndex)) + arm64.VectorArrangementS, 0, arm64.VectorIndex(laneIndex)) case wazeroir.ShapeF64x2: c.assembler.CompileVectorRegisterToVectorRegister(arm64.INSELEM, origin.register, vector.register, - arm64.VectorArrangementD, 0, arm64.VectorIndex(o.LaneIndex)) + arm64.VectorArrangementD, 0, arm64.VectorIndex(laneIndex)) } c.locationStack.markRegisterUnused(origin.register) @@ -468,14 +487,15 @@ func (c *arm64Compiler) compileV128ReplaceLane(o wazeroir.OperationV128ReplaceLa } // compileV128Splat implements compiler.compileV128Splat for arm64. -func (c *arm64Compiler) compileV128Splat(o wazeroir.OperationV128Splat) (err error) { +func (c *arm64Compiler) compileV128Splat(o *wazeroir.UnionOperation) (err error) { origin := c.locationStack.pop() if err = c.compileEnsureOnRegister(origin); err != nil { return } var result asm.Register - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeI8x16: result, err = c.allocateRegister(registerTypeVector) if err != nil { @@ -530,7 +550,7 @@ func (c *arm64Compiler) onValueReleaseRegisterToStack(reg asm.Register) { } // compileV128Shuffle implements compiler.compileV128Shuffle for arm64. -func (c *arm64Compiler) compileV128Shuffle(o wazeroir.OperationV128Shuffle) (err error) { +func (c *arm64Compiler) compileV128Shuffle(o *wazeroir.UnionOperation) (err error) { // Shuffle needs two operands (v, w) must be next to each other. // For simplicity, we use V29 for v and V30 for w values respectively. const vReg, wReg = arm64.RegV29, arm64.RegV30 @@ -575,7 +595,11 @@ func (c *arm64Compiler) compileV128Shuffle(o wazeroir.OperationV128Shuffle) (err return err } - c.assembler.CompileStaticConstToVectorRegister(arm64.VMOV, asm.NewStaticConst(o.Lanes[:]), result, arm64.VectorArrangementQ) + lanes := make([]byte, len(o.Us)) + for i, lane := range o.Us { + lanes[i] = byte(lane) + } + c.assembler.CompileStaticConstToVectorRegister(arm64.VMOV, asm.NewStaticConst(lanes), result, arm64.VectorArrangementQ) c.assembler.CompileVectorRegisterToVectorRegister(arm64.TBL2, vReg, result, arm64.VectorArrangement16B, arm64.VectorIndexNone, arm64.VectorIndexNone) @@ -585,7 +609,7 @@ func (c *arm64Compiler) compileV128Shuffle(o wazeroir.OperationV128Shuffle) (err } // compileV128Swizzle implements compiler.compileV128Swizzle for arm64. -func (c *arm64Compiler) compileV128Swizzle(wazeroir.OperationV128Swizzle) (err error) { +func (c *arm64Compiler) compileV128Swizzle(*wazeroir.UnionOperation) (err error) { indexVec := c.locationStack.popV128() if err = c.compileEnsureOnRegister(indexVec); err != nil { return @@ -604,7 +628,7 @@ func (c *arm64Compiler) compileV128Swizzle(wazeroir.OperationV128Swizzle) (err e } // compileV128AnyTrue implements compiler.compileV128AnyTrue for arm64. -func (c *arm64Compiler) compileV128AnyTrue(wazeroir.OperationV128AnyTrue) (err error) { +func (c *arm64Compiler) compileV128AnyTrue(*wazeroir.UnionOperation) (err error) { vector := c.locationStack.popV128() if err = c.compileEnsureOnRegister(vector); err != nil { return @@ -623,14 +647,15 @@ func (c *arm64Compiler) compileV128AnyTrue(wazeroir.OperationV128AnyTrue) (err e } // compileV128AllTrue implements compiler.compileV128AllTrue for arm64. -func (c *arm64Compiler) compileV128AllTrue(o wazeroir.OperationV128AllTrue) (err error) { +func (c *arm64Compiler) compileV128AllTrue(o *wazeroir.UnionOperation) (err error) { vector := c.locationStack.popV128() if err = c.compileEnsureOnRegister(vector); err != nil { return } v := vector.register - if o.Shape == wazeroir.ShapeI64x2 { + shape := o.B1 + if shape == wazeroir.ShapeI64x2 { c.assembler.CompileVectorRegisterToVectorRegister(arm64.CMEQZERO, arm64.RegRZR, v, arm64.VectorArrangement2D, arm64.VectorIndexNone, arm64.VectorIndexNone) c.assembler.CompileVectorRegisterToVectorRegister(arm64.ADDP, v, v, @@ -639,7 +664,7 @@ func (c *arm64Compiler) compileV128AllTrue(o wazeroir.OperationV128AllTrue) (err c.locationStack.pushRuntimeValueLocationOnConditionalRegister(arm64.CondEQ) } else { var arr arm64.VectorArrangement - switch o.Shape { + switch shape { case wazeroir.ShapeI8x16: arr = arm64.VectorArrangement16B case wazeroir.ShapeI16x8: @@ -675,7 +700,7 @@ var ( ) // compileV128BitMask implements compiler.compileV128BitMask for arm64. -func (c *arm64Compiler) compileV128BitMask(o wazeroir.OperationV128BitMask) (err error) { +func (c *arm64Compiler) compileV128BitMask(o *wazeroir.UnionOperation) (err error) { vector := c.locationStack.popV128() if err = c.compileEnsureOnRegister(vector); err != nil { return @@ -688,7 +713,8 @@ func (c *arm64Compiler) compileV128BitMask(o wazeroir.OperationV128BitMask) (err return err } - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeI8x16: vecTmp, err := c.allocateRegister(registerTypeVector) if err != nil { @@ -792,27 +818,27 @@ func (c *arm64Compiler) compileV128BitMask(o wazeroir.OperationV128BitMask) (err } // compileV128And implements compiler.compileV128And for arm64. -func (c *arm64Compiler) compileV128And(wazeroir.OperationV128And) error { +func (c *arm64Compiler) compileV128And(*wazeroir.UnionOperation) error { return c.compileV128x2BinOp(arm64.VAND, arm64.VectorArrangement16B) } // compileV128Not implements compiler.compileV128Not for arm64. -func (c *arm64Compiler) compileV128Not(wazeroir.OperationV128Not) error { +func (c *arm64Compiler) compileV128Not(*wazeroir.UnionOperation) error { return c.compileV128UniOp(arm64.NOT, arm64.VectorArrangement16B) } // compileV128Or implements compiler.compileV128Or for arm64. -func (c *arm64Compiler) compileV128Or(wazeroir.OperationV128Or) error { +func (c *arm64Compiler) compileV128Or(*wazeroir.UnionOperation) error { return c.compileV128x2BinOp(arm64.VORR, arm64.VectorArrangement16B) } // compileV128Xor implements compiler.compileV128Xor for arm64. -func (c *arm64Compiler) compileV128Xor(wazeroir.OperationV128Xor) error { +func (c *arm64Compiler) compileV128Xor(*wazeroir.UnionOperation) error { return c.compileV128x2BinOp(arm64.EOR, arm64.VectorArrangement16B) } // compileV128Bitselect implements compiler.compileV128Bitselect for arm64. -func (c *arm64Compiler) compileV128Bitselect(wazeroir.OperationV128Bitselect) error { +func (c *arm64Compiler) compileV128Bitselect(*wazeroir.UnionOperation) error { selector := c.locationStack.popV128() if err := c.compileEnsureOnRegister(selector); err != nil { return err @@ -837,7 +863,7 @@ func (c *arm64Compiler) compileV128Bitselect(wazeroir.OperationV128Bitselect) er } // compileV128AndNot implements compiler.compileV128AndNot for arm64. -func (c *arm64Compiler) compileV128AndNot(wazeroir.OperationV128AndNot) error { +func (c *arm64Compiler) compileV128AndNot(*wazeroir.UnionOperation) error { return c.compileV128x2BinOp(arm64.BIC, arm64.VectorArrangement16B) } @@ -872,19 +898,21 @@ func (c *arm64Compiler) compileV128x2BinOp(inst asm.Instruction, arr arm64.Vecto } // compileV128Shr implements compiler.compileV128Shr for arm64. -func (c *arm64Compiler) compileV128Shr(o wazeroir.OperationV128Shr) error { +func (c *arm64Compiler) compileV128Shr(o *wazeroir.UnionOperation) error { var inst asm.Instruction - if o.Signed { + shape := o.B1 + signed := o.B3 + if signed { inst = arm64.SSHL } else { inst = arm64.USHL } - return c.compileV128ShiftImpl(o.Shape, inst, true) + return c.compileV128ShiftImpl(shape, inst, true) } // compileV128Shl implements compiler.compileV128Shl for arm64. -func (c *arm64Compiler) compileV128Shl(o wazeroir.OperationV128Shl) error { - return c.compileV128ShiftImpl(o.Shape, arm64.SSHL, false) +func (c *arm64Compiler) compileV128Shl(o *wazeroir.UnionOperation) error { + return c.compileV128ShiftImpl(o.B1 /*shape*/, arm64.SSHL, false) } func (c *arm64Compiler) compileV128ShiftImpl(shape wazeroir.Shape, ins asm.Instruction, rightShift bool) error { @@ -945,7 +973,7 @@ func (c *arm64Compiler) compileV128ShiftImpl(shape wazeroir.Shape, ins asm.Instr } // compileV128Cmp implements compiler.compileV128Cmp for arm64. -func (c *arm64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { +func (c *arm64Compiler) compileV128Cmp(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -957,22 +985,23 @@ func (c *arm64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { } var arr arm64.VectorArrangement - if o.Type <= wazeroir.V128CmpTypeI8x16GeU { + v128CmpType := o.B1 + if v128CmpType <= wazeroir.V128CmpTypeI8x16GeU { arr = arm64.VectorArrangement16B - } else if o.Type <= wazeroir.V128CmpTypeI16x8GeU { + } else if v128CmpType <= wazeroir.V128CmpTypeI16x8GeU { arr = arm64.VectorArrangement8H - } else if o.Type <= wazeroir.V128CmpTypeI32x4GeU { + } else if v128CmpType <= wazeroir.V128CmpTypeI32x4GeU { arr = arm64.VectorArrangement4S - } else if o.Type <= wazeroir.V128CmpTypeI64x2GeS { + } else if v128CmpType <= wazeroir.V128CmpTypeI64x2GeS { arr = arm64.VectorArrangement2D - } else if o.Type <= wazeroir.V128CmpTypeF32x4Ge { + } else if v128CmpType <= wazeroir.V128CmpTypeF32x4Ge { arr = arm64.VectorArrangement4S } else { // f64x2 arr = arm64.VectorArrangement2D } result := x1.register - switch o.Type { + switch v128CmpType { case wazeroir.V128CmpTypeI8x16Eq, wazeroir.V128CmpTypeI16x8Eq, wazeroir.V128CmpTypeI32x4Eq, wazeroir.V128CmpTypeI64x2Eq: c.assembler.CompileTwoVectorRegistersToVectorRegister(arm64.CMEQ, x1.register, x2.register, result, arr) case wazeroir.V128CmpTypeI8x16Ne, wazeroir.V128CmpTypeI16x8Ne, wazeroir.V128CmpTypeI32x4Ne, wazeroir.V128CmpTypeI64x2Ne: @@ -1019,34 +1048,39 @@ func (c *arm64Compiler) compileV128Cmp(o wazeroir.OperationV128Cmp) error { } // compileV128AddSat implements compiler.compileV128AddSat for arm64. -func (c *arm64Compiler) compileV128AddSat(o wazeroir.OperationV128AddSat) error { +func (c *arm64Compiler) compileV128AddSat(o *wazeroir.UnionOperation) error { var inst asm.Instruction - if o.Signed { + shape := o.B1 + signed := o.B3 + if signed { inst = arm64.VSQADD } else { inst = arm64.VUQADD } - return c.compileV128x2BinOp(inst, defaultArrangementForShape(o.Shape)) + return c.compileV128x2BinOp(inst, defaultArrangementForShape(shape)) } // compileV128SubSat implements compiler.compileV128SubSat for arm64. -func (c *arm64Compiler) compileV128SubSat(o wazeroir.OperationV128SubSat) error { +func (c *arm64Compiler) compileV128SubSat(o *wazeroir.UnionOperation) error { var inst asm.Instruction - if o.Signed { + shape := o.B1 + signed := o.B3 + if signed { inst = arm64.VSQSUB } else { inst = arm64.VUQSUB } - return c.compileV128x2BinOp(inst, defaultArrangementForShape(o.Shape)) + return c.compileV128x2BinOp(inst, defaultArrangementForShape(shape)) } // compileV128Mul implements compiler.compileV128Mul for arm64. -func (c *arm64Compiler) compileV128Mul(o wazeroir.OperationV128Mul) (err error) { - switch o.Shape { +func (c *arm64Compiler) compileV128Mul(o *wazeroir.UnionOperation) (err error) { + shape := o.B1 + switch shape { case wazeroir.ShapeI8x16, wazeroir.ShapeI16x8, wazeroir.ShapeI32x4: - err = c.compileV128x2BinOp(arm64.VMUL, defaultArrangementForShape(o.Shape)) + err = c.compileV128x2BinOp(arm64.VMUL, defaultArrangementForShape(shape)) case wazeroir.ShapeF32x4, wazeroir.ShapeF64x2: - err = c.compileV128x2BinOp(arm64.VFMUL, defaultArrangementForShape(o.Shape)) + err = c.compileV128x2BinOp(arm64.VFMUL, defaultArrangementForShape(shape)) case wazeroir.ShapeI64x2: x2 := c.locationStack.popV128() if err = c.compileEnsureOnRegister(x2); err != nil { @@ -1105,10 +1139,11 @@ func (c *arm64Compiler) compileV128Mul(o wazeroir.OperationV128Mul) (err error) } // compileV128Div implements compiler.compileV128Div for arm64. -func (c *arm64Compiler) compileV128Div(o wazeroir.OperationV128Div) error { +func (c *arm64Compiler) compileV128Div(o *wazeroir.UnionOperation) error { var arr arm64.VectorArrangement var inst asm.Instruction - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeF32x4: arr = arm64.VectorArrangement4S inst = arm64.VFDIV @@ -1120,20 +1155,22 @@ func (c *arm64Compiler) compileV128Div(o wazeroir.OperationV128Div) error { } // compileV128Neg implements compiler.compileV128Neg for arm64. -func (c *arm64Compiler) compileV128Neg(o wazeroir.OperationV128Neg) error { +func (c *arm64Compiler) compileV128Neg(o *wazeroir.UnionOperation) error { var inst asm.Instruction - if o.Shape <= wazeroir.ShapeI64x2 { // Integer lanes + shape := o.B1 + if shape <= wazeroir.ShapeI64x2 { // Integer lanes inst = arm64.VNEG } else { // Floating point lanes inst = arm64.VFNEG } - return c.compileV128UniOp(inst, defaultArrangementForShape(o.Shape)) + return c.compileV128UniOp(inst, defaultArrangementForShape(shape)) } // compileV128Sqrt implements compiler.compileV128Sqrt for arm64. -func (c *arm64Compiler) compileV128Sqrt(o wazeroir.OperationV128Sqrt) error { +func (c *arm64Compiler) compileV128Sqrt(o *wazeroir.UnionOperation) error { var arr arm64.VectorArrangement - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeF32x4: arr = arm64.VectorArrangement4S case wazeroir.ShapeF64x2: @@ -1143,26 +1180,29 @@ func (c *arm64Compiler) compileV128Sqrt(o wazeroir.OperationV128Sqrt) error { } // compileV128Abs implements compiler.compileV128Abs for arm64. -func (c *arm64Compiler) compileV128Abs(o wazeroir.OperationV128Abs) error { +func (c *arm64Compiler) compileV128Abs(o *wazeroir.UnionOperation) error { var inst asm.Instruction - if o.Shape <= wazeroir.ShapeI64x2 { // Integer lanes + shape := o.B1 + if shape <= wazeroir.ShapeI64x2 { // Integer lanes inst = arm64.VABS } else { // Floating point lanes inst = arm64.VFABS } - return c.compileV128UniOp(inst, defaultArrangementForShape(o.Shape)) + return c.compileV128UniOp(inst, defaultArrangementForShape(shape)) } // compileV128Popcnt implements compiler.compileV128Popcnt for arm64. -func (c *arm64Compiler) compileV128Popcnt(o wazeroir.OperationV128Popcnt) error { - return c.compileV128UniOp(arm64.VCNT, defaultArrangementForShape(o.Shape)) +func (c *arm64Compiler) compileV128Popcnt(o *wazeroir.UnionOperation) error { + return c.compileV128UniOp(arm64.VCNT, defaultArrangementForShape(o.B1)) } // compileV128Min implements compiler.compileV128Min for arm64. -func (c *arm64Compiler) compileV128Min(o wazeroir.OperationV128Min) error { +func (c *arm64Compiler) compileV128Min(o *wazeroir.UnionOperation) error { var inst asm.Instruction - if o.Shape <= wazeroir.ShapeI64x2 { // Integer lanes - if o.Signed { + shape := o.B1 + signed := o.B3 + if shape <= wazeroir.ShapeI64x2 { // Integer lanes + if signed { inst = arm64.SMIN } else { inst = arm64.UMIN @@ -1170,7 +1210,7 @@ func (c *arm64Compiler) compileV128Min(o wazeroir.OperationV128Min) error { } else { // Floating point lanes inst = arm64.VFMIN } - return c.compileV128x2BinOp(inst, defaultArrangementForShape(o.Shape)) + return c.compileV128x2BinOp(inst, defaultArrangementForShape(shape)) } func defaultArrangementForShape(s wazeroir.Shape) (arr arm64.VectorArrangement) { @@ -1192,10 +1232,12 @@ func defaultArrangementForShape(s wazeroir.Shape) (arr arm64.VectorArrangement) } // compileV128Max implements compiler.compileV128Max for arm64. -func (c *arm64Compiler) compileV128Max(o wazeroir.OperationV128Max) error { +func (c *arm64Compiler) compileV128Max(o *wazeroir.UnionOperation) error { var inst asm.Instruction - if o.Shape <= wazeroir.ShapeI64x2 { // Integer lanes - if o.Signed { + shape := o.B1 + signed := o.B3 + if shape <= wazeroir.ShapeI64x2 { // Integer lanes + if signed { inst = arm64.SMAX } else { inst = arm64.UMAX @@ -1203,22 +1245,22 @@ func (c *arm64Compiler) compileV128Max(o wazeroir.OperationV128Max) error { } else { // Floating point lanes inst = arm64.VFMAX } - return c.compileV128x2BinOp(inst, defaultArrangementForShape(o.Shape)) + return c.compileV128x2BinOp(inst, defaultArrangementForShape(shape)) } // compileV128AvgrU implements compiler.compileV128AvgrU for arm64. -func (c *arm64Compiler) compileV128AvgrU(o wazeroir.OperationV128AvgrU) error { - return c.compileV128x2BinOp(arm64.URHADD, defaultArrangementForShape(o.Shape)) +func (c *arm64Compiler) compileV128AvgrU(o *wazeroir.UnionOperation) error { + return c.compileV128x2BinOp(arm64.URHADD, defaultArrangementForShape(o.B1)) } // compileV128Pmin implements compiler.compileV128Pmin for arm64. -func (c *arm64Compiler) compileV128Pmin(o wazeroir.OperationV128Pmin) error { - return c.compileV128PseudoMinOrMax(defaultArrangementForShape(o.Shape), false) +func (c *arm64Compiler) compileV128Pmin(o *wazeroir.UnionOperation) error { + return c.compileV128PseudoMinOrMax(defaultArrangementForShape(o.B1), false) } // compileV128Pmax implements compiler.compileV128Pmax for arm64. -func (c *arm64Compiler) compileV128Pmax(o wazeroir.OperationV128Pmax) error { - return c.compileV128PseudoMinOrMax(defaultArrangementForShape(o.Shape), true) +func (c *arm64Compiler) compileV128Pmax(o *wazeroir.UnionOperation) error { + return c.compileV128PseudoMinOrMax(defaultArrangementForShape(o.B1), true) } // compileV128PseudoMinOrMax implements compileV128Pmax and compileV128Pmin. @@ -1255,9 +1297,10 @@ func (c *arm64Compiler) compileV128PseudoMinOrMax(arr arm64.VectorArrangement, m } // compileV128Ceil implements compiler.compileV128Ceil for arm64. -func (c *arm64Compiler) compileV128Ceil(o wazeroir.OperationV128Ceil) error { +func (c *arm64Compiler) compileV128Ceil(o *wazeroir.UnionOperation) error { var arr arm64.VectorArrangement - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeF32x4: arr = arm64.VectorArrangement4S case wazeroir.ShapeF64x2: @@ -1267,9 +1310,10 @@ func (c *arm64Compiler) compileV128Ceil(o wazeroir.OperationV128Ceil) error { } // compileV128Floor implements compiler.compileV128Floor for arm64. -func (c *arm64Compiler) compileV128Floor(o wazeroir.OperationV128Floor) error { +func (c *arm64Compiler) compileV128Floor(o *wazeroir.UnionOperation) error { var arr arm64.VectorArrangement - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeF32x4: arr = arm64.VectorArrangement4S case wazeroir.ShapeF64x2: @@ -1279,9 +1323,10 @@ func (c *arm64Compiler) compileV128Floor(o wazeroir.OperationV128Floor) error { } // compileV128Trunc implements compiler.compileV128Trunc for arm64. -func (c *arm64Compiler) compileV128Trunc(o wazeroir.OperationV128Trunc) error { +func (c *arm64Compiler) compileV128Trunc(o *wazeroir.UnionOperation) error { var arr arm64.VectorArrangement - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeF32x4: arr = arm64.VectorArrangement4S case wazeroir.ShapeF64x2: @@ -1291,9 +1336,10 @@ func (c *arm64Compiler) compileV128Trunc(o wazeroir.OperationV128Trunc) error { } // compileV128Nearest implements compiler.compileV128Nearest for arm64. -func (c *arm64Compiler) compileV128Nearest(o wazeroir.OperationV128Nearest) error { +func (c *arm64Compiler) compileV128Nearest(o *wazeroir.UnionOperation) error { var arr arm64.VectorArrangement - switch o.Shape { + shape := o.B1 + switch shape { case wazeroir.ShapeF32x4: arr = arm64.VectorArrangement4S case wazeroir.ShapeF64x2: @@ -1303,17 +1349,20 @@ func (c *arm64Compiler) compileV128Nearest(o wazeroir.OperationV128Nearest) erro } // compileV128Extend implements compiler.compileV128Extend for arm64. -func (c *arm64Compiler) compileV128Extend(o wazeroir.OperationV128Extend) error { +func (c *arm64Compiler) compileV128Extend(o *wazeroir.UnionOperation) error { var inst asm.Instruction var arr arm64.VectorArrangement - if o.UseLow { - if o.Signed { + originShape := o.B1 + signed := o.B2 == 1 + useLow := o.B3 + if useLow { + if signed { inst = arm64.SSHLL } else { inst = arm64.USHLL } - switch o.OriginShape { + switch originShape { case wazeroir.ShapeI8x16: arr = arm64.VectorArrangement8B case wazeroir.ShapeI16x8: @@ -1322,29 +1371,32 @@ func (c *arm64Compiler) compileV128Extend(o wazeroir.OperationV128Extend) error arr = arm64.VectorArrangement2S } } else { - if o.Signed { + if signed { inst = arm64.SSHLL2 } else { inst = arm64.USHLL2 } - arr = defaultArrangementForShape(o.OriginShape) + arr = defaultArrangementForShape(originShape) } return c.compileV128UniOp(inst, arr) } // compileV128ExtMul implements compiler.compileV128ExtMul for arm64. -func (c *arm64Compiler) compileV128ExtMul(o wazeroir.OperationV128ExtMul) error { +func (c *arm64Compiler) compileV128ExtMul(o *wazeroir.UnionOperation) error { var inst asm.Instruction var arr arm64.VectorArrangement - if o.UseLow { - if o.Signed { + originShape := o.B1 + signed := o.B2 == 1 + useLow := o.B3 + if useLow { + if signed { inst = arm64.SMULL } else { inst = arm64.UMULL } - switch o.OriginShape { + switch originShape { case wazeroir.ShapeI8x16: arr = arm64.VectorArrangement8B case wazeroir.ShapeI16x8: @@ -1353,50 +1405,55 @@ func (c *arm64Compiler) compileV128ExtMul(o wazeroir.OperationV128ExtMul) error arr = arm64.VectorArrangement2S } } else { - if o.Signed { + if signed { inst = arm64.SMULL2 } else { inst = arm64.UMULL2 } - arr = defaultArrangementForShape(o.OriginShape) + arr = defaultArrangementForShape(originShape) } return c.compileV128x2BinOp(inst, arr) } // compileV128Q15mulrSatS implements compiler.compileV128Q15mulrSatS for arm64. -func (c *arm64Compiler) compileV128Q15mulrSatS(wazeroir.OperationV128Q15mulrSatS) error { +func (c *arm64Compiler) compileV128Q15mulrSatS(*wazeroir.UnionOperation) error { return c.compileV128x2BinOp(arm64.SQRDMULH, arm64.VectorArrangement8H) } // compileV128ExtAddPairwise implements compiler.compileV128ExtAddPairwise for arm64. -func (c *arm64Compiler) compileV128ExtAddPairwise(o wazeroir.OperationV128ExtAddPairwise) error { +func (c *arm64Compiler) compileV128ExtAddPairwise(o *wazeroir.UnionOperation) error { var inst asm.Instruction - if o.Signed { + originShape := o.B1 + signed := o.B3 + if signed { inst = arm64.SADDLP } else { inst = arm64.UADDLP } - return c.compileV128UniOp(inst, defaultArrangementForShape(o.OriginShape)) + return c.compileV128UniOp(inst, defaultArrangementForShape(originShape)) } // compileV128FloatPromote implements compiler.compileV128FloatPromote for arm64. -func (c *arm64Compiler) compileV128FloatPromote(wazeroir.OperationV128FloatPromote) error { +func (c *arm64Compiler) compileV128FloatPromote(*wazeroir.UnionOperation) error { return c.compileV128UniOp(arm64.FCVTL, arm64.VectorArrangement2S) } // compileV128FloatDemote implements compiler.compileV128FloatDemote for arm64. -func (c *arm64Compiler) compileV128FloatDemote(wazeroir.OperationV128FloatDemote) error { +func (c *arm64Compiler) compileV128FloatDemote(*wazeroir.UnionOperation) error { return c.compileV128UniOp(arm64.FCVTN, arm64.VectorArrangement2S) } // compileV128FConvertFromI implements compiler.compileV128FConvertFromI for arm64. -func (c *arm64Compiler) compileV128FConvertFromI(o wazeroir.OperationV128FConvertFromI) (err error) { - if o.DestinationShape == wazeroir.ShapeF32x4 { - if o.Signed { - err = c.compileV128UniOp(arm64.VSCVTF, defaultArrangementForShape(o.DestinationShape)) +func (c *arm64Compiler) compileV128FConvertFromI(o *wazeroir.UnionOperation) (err error) { + destinationShape := o.B1 + signed := o.B3 + + if destinationShape == wazeroir.ShapeF32x4 { + if signed { + err = c.compileV128UniOp(arm64.VSCVTF, defaultArrangementForShape(destinationShape)) } else { - err = c.compileV128UniOp(arm64.VUCVTF, defaultArrangementForShape(o.DestinationShape)) + err = c.compileV128UniOp(arm64.VUCVTF, defaultArrangementForShape(destinationShape)) } return } else { // f64x2 @@ -1407,7 +1464,7 @@ func (c *arm64Compiler) compileV128FConvertFromI(o wazeroir.OperationV128FConver vr := v.register var expand, convert asm.Instruction - if o.Signed { + if signed { expand, convert = arm64.SSHLL, arm64.VSCVTF } else { expand, convert = arm64.USHLL, arm64.VUCVTF @@ -1424,7 +1481,7 @@ func (c *arm64Compiler) compileV128FConvertFromI(o wazeroir.OperationV128FConver } // compileV128Dot implements compiler.compileV128Dot for arm64. -func (c *arm64Compiler) compileV128Dot(wazeroir.OperationV128Dot) error { +func (c *arm64Compiler) compileV128Dot(*wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -1456,7 +1513,7 @@ func (c *arm64Compiler) compileV128Dot(wazeroir.OperationV128Dot) error { } // compileV128Narrow implements compiler.compileV128Narrow for arm64. -func (c *arm64Compiler) compileV128Narrow(o wazeroir.OperationV128Narrow) error { +func (c *arm64Compiler) compileV128Narrow(o *wazeroir.UnionOperation) error { x2 := c.locationStack.popV128() if err := c.compileEnsureOnRegister(x2); err != nil { return err @@ -1470,7 +1527,9 @@ func (c *arm64Compiler) compileV128Narrow(o wazeroir.OperationV128Narrow) error x1r, x2r := x1.register, x2.register var arr, arr2 arm64.VectorArrangement - switch o.OriginShape { + originShape := o.B1 + signed := o.B3 + switch originShape { case wazeroir.ShapeI16x8: arr = arm64.VectorArrangement8B arr2 = arm64.VectorArrangement16B @@ -1480,7 +1539,7 @@ func (c *arm64Compiler) compileV128Narrow(o wazeroir.OperationV128Narrow) error } var lo, hi asm.Instruction - if o.Signed { + if signed { lo, hi = arm64.SQXTN, arm64.SQXTN2 } else { lo, hi = arm64.SQXTUN, arm64.SQXTUN2 @@ -1497,26 +1556,28 @@ func (c *arm64Compiler) compileV128Narrow(o wazeroir.OperationV128Narrow) error } // compileV128ITruncSatFromF implements compiler.compileV128ITruncSatFromF for arm64. -func (c *arm64Compiler) compileV128ITruncSatFromF(o wazeroir.OperationV128ITruncSatFromF) (err error) { +func (c *arm64Compiler) compileV128ITruncSatFromF(o *wazeroir.UnionOperation) (err error) { v := c.locationStack.popV128() if err = c.compileEnsureOnRegister(v); err != nil { return err } + originShape := o.B1 + signed := o.B3 var cvt asm.Instruction - if o.Signed { + if signed { cvt = arm64.VFCVTZS } else { cvt = arm64.VFCVTZU } c.assembler.CompileVectorRegisterToVectorRegister(cvt, v.register, v.register, - defaultArrangementForShape(o.OriginShape), arm64.VectorIndexNone, arm64.VectorIndexNone, + defaultArrangementForShape(originShape), arm64.VectorIndexNone, arm64.VectorIndexNone, ) - if o.OriginShape == wazeroir.ShapeF64x2 { + if originShape == wazeroir.ShapeF64x2 { var narrow asm.Instruction - if o.Signed { + if signed { narrow = arm64.SQXTN } else { narrow = arm64.UQXTN diff --git a/internal/engine/compiler/impl_vec_arm64_test.go b/internal/engine/compiler/impl_vec_arm64_test.go index d14c17a1..0322faba 100644 --- a/internal/engine/compiler/impl_vec_arm64_test.go +++ b/internal/engine/compiler/impl_vec_arm64_test.go @@ -21,7 +21,7 @@ func TestArm64Compiler_V128Shuffle_ConstTable_MiddleOfFunction(t *testing.T) { err := compiler.compilePreamble() require.NoError(t, err) - lanes := [16]byte{1, 1, 1, 1, 0, 0, 0, 0, 10, 10, 10, 10, 0, 0, 0, 0} + lanes := []uint64{1, 1, 1, 1, 0, 0, 0, 0, 10, 10, 10, 10, 0, 0, 0, 0} v := [16]byte{0: 0xa, 1: 0xb, 10: 0xc} w := [16]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} exp := [16]byte{ @@ -31,19 +31,13 @@ func TestArm64Compiler_V128Shuffle_ConstTable_MiddleOfFunction(t *testing.T) { 0xa, 0xa, 0xa, 0xa, } - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(v[:8]), - Hi: binary.LittleEndian.Uint64(v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(v[:8]), binary.LittleEndian.Uint64(v[8:])))) require.NoError(t, err) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(w[:8]), - Hi: binary.LittleEndian.Uint64(w[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(w[:8]), binary.LittleEndian.Uint64(w[8:])))) require.NoError(t, err) - err = compiler.compileV128Shuffle(wazeroir.OperationV128Shuffle{Lanes: lanes}) + err = compiler.compileV128Shuffle(operationPtr(wazeroir.NewOperationV128Shuffle(lanes))) require.NoError(t, err) assembler := compiler.(*arm64Compiler).assembler.(*arm64.AssemblerImpl) @@ -113,10 +107,7 @@ func TestArm64Compiler_V128Shuffle_combinations(t *testing.T) { vReg: arm64.RegV30, // will be moved to v29. init: func(t *testing.T, c *arm64Compiler) { // Set up the previous value on the v3 register. - err := c.compileV128Const(wazeroir.OperationV128Const{ - Lo: 1234, - Hi: 5678, - }) + err := c.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(1234, 5678))) require.NoError(t, err) movValueRegisterToRegister(t, c, c.locationStack.peek(), arm64.RegV29) }, @@ -134,10 +125,7 @@ func TestArm64Compiler_V128Shuffle_combinations(t *testing.T) { vReg: arm64.RegV12, // will be moved to v29. init: func(t *testing.T, c *arm64Compiler) { // Set up the previous value on the v3 register. - err := c.compileV128Const(wazeroir.OperationV128Const{ - Lo: 1234, - Hi: 5678, - }) + err := c.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(1234, 5678))) require.NoError(t, err) movValueRegisterToRegister(t, c, c.locationStack.peek(), arm64.RegV30) }, @@ -151,7 +139,7 @@ func TestArm64Compiler_V128Shuffle_combinations(t *testing.T) { }, } - lanes := [16]byte{1, 1, 1, 1, 0, 0, 0, 0, 10, 10, 10, 10, 0, 0, 0, 31} + lanes := []uint64{1, 1, 1, 1, 0, 0, 0, 0, 10, 10, 10, 10, 0, 0, 0, 31} v := [16]byte{0: 0xa, 1: 0xb, 10: 0xc} w := [16]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 1} exp := [16]byte{ @@ -174,25 +162,19 @@ func TestArm64Compiler_V128Shuffle_combinations(t *testing.T) { ac := compiler.(*arm64Compiler) tc.init(t, ac) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(v[:8]), - Hi: binary.LittleEndian.Uint64(v[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(v[:8]), binary.LittleEndian.Uint64(v[8:])))) require.NoError(t, err) vLocation := compiler.runtimeValueLocationStack().peek() movValueRegisterToRegister(t, ac, vLocation, tc.vReg) - err = compiler.compileV128Const(wazeroir.OperationV128Const{ - Lo: binary.LittleEndian.Uint64(w[:8]), - Hi: binary.LittleEndian.Uint64(w[8:]), - }) + err = compiler.compileV128Const(operationPtr(wazeroir.NewOperationV128Const(binary.LittleEndian.Uint64(w[:8]), binary.LittleEndian.Uint64(w[8:])))) require.NoError(t, err) wLocation := compiler.runtimeValueLocationStack().peek() movValueRegisterToRegister(t, ac, wLocation, tc.wReg) - err = compiler.compileV128Shuffle(wazeroir.OperationV128Shuffle{Lanes: lanes}) + err = compiler.compileV128Shuffle(operationPtr(wazeroir.NewOperationV128Shuffle(lanes))) require.NoError(t, err) requireRuntimeLocationStackPointerEqual(t, tc.expStackPointerAfterShuffle, compiler) diff --git a/internal/engine/interpreter/interpreter.go b/internal/engine/interpreter/interpreter.go index d95b8f79..0d58808c 100644 --- a/internal/engine/interpreter/interpreter.go +++ b/internal/engine/interpreter/interpreter.go @@ -274,108 +274,38 @@ func (e *engine) lowerIR(ir *wazeroir.CompilationResult) (*code, error) { hasSourcePCs := len(ir.IROperationSourceOffsetsInWasmBinary) > 0 ops := ir.Operations ret := &code{} - labelAddress := map[wazeroir.LabelID]uint64{} - onLabelAddressResolved := map[wazeroir.LabelID][]func(addr uint64){} - for i, original := range ops { - var op *wazeroir.UnionOperation - if o, ok := original.(wazeroir.UnionOperation); ok { - op = &o - } else { - op = &wazeroir.UnionOperation{OpKind: original.Kind()} - } + labelAddress := map[wazeroir.Label]uint64{} + onLabelAddressResolved := map[wazeroir.Label][]func(addr uint64){} + for i := range ops { + op := &ops[i] if hasSourcePCs { op.SourcePC = ir.IROperationSourceOffsetsInWasmBinary[i] } - switch o := original.(type) { - case wazeroir.UnionOperation: - // Nullary operations don't need any further processing. - switch o.Kind() { - case wazeroir.OperationKindCall: - case wazeroir.OperationKindCallIndirect: - - case wazeroir.OperationKindSelect: - case wazeroir.OperationKindPick: - case wazeroir.OperationKindSet: - case wazeroir.OperationKindGlobalGet: - case wazeroir.OperationKindGlobalSet: - case wazeroir.OperationKindLoad: - case wazeroir.OperationKindLoad8: - case wazeroir.OperationKindLoad16: - case wazeroir.OperationKindLoad32: - case wazeroir.OperationKindStore: - case wazeroir.OperationKindStore8: - case wazeroir.OperationKindStore16: - case wazeroir.OperationKindStore32: - - case wazeroir.OperationKindConstI32: - case wazeroir.OperationKindConstI64: - case wazeroir.OperationKindConstF32: - case wazeroir.OperationKindConstF64: - case wazeroir.OperationKindEq: - case wazeroir.OperationKindNe: - case wazeroir.OperationKindEqz: - case wazeroir.OperationKindLt: - case wazeroir.OperationKindGt: - case wazeroir.OperationKindLe: - case wazeroir.OperationKindGe: - case wazeroir.OperationKindAdd: - case wazeroir.OperationKindSub: - case wazeroir.OperationKindMul: - case wazeroir.OperationKindClz: - case wazeroir.OperationKindCtz: - case wazeroir.OperationKindPopcnt: - case wazeroir.OperationKindDiv: - case wazeroir.OperationKindRem: - case wazeroir.OperationKindAnd: - case wazeroir.OperationKindOr: - case wazeroir.OperationKindXor: - case wazeroir.OperationKindShl: - case wazeroir.OperationKindShr: - case wazeroir.OperationKindRotl: - case wazeroir.OperationKindRotr: - case wazeroir.OperationKindAbs: - case wazeroir.OperationKindNeg: - case wazeroir.OperationKindCeil: - case wazeroir.OperationKindFloor: - case wazeroir.OperationKindTrunc: - case wazeroir.OperationKindNearest: - case wazeroir.OperationKindSqrt: - case wazeroir.OperationKindMin: - case wazeroir.OperationKindMax: - case wazeroir.OperationKindCopysign: - - case wazeroir.OperationKindI32ReinterpretFromF32, - wazeroir.OperationKindI64ReinterpretFromF64, - wazeroir.OperationKindF32ReinterpretFromI32, - wazeroir.OperationKindF64ReinterpretFromI64: - // Reinterpret ops are essentially nop for engine mode - // because we treat all values as uint64, and Reinterpret* is only used at module - // validation phase where we check type soundness of all the operations. - // So just eliminate the ops. - continue - } - case wazeroir.OperationLabel: - labelID := o.Label.ID() + // Nullary operations don't need any further processing. + switch op.Kind { + case wazeroir.OperationKindLabel: + label := wazeroir.Label(op.U1) address := uint64(len(ret.body)) - labelAddress[labelID] = address - for _, cb := range onLabelAddressResolved[labelID] { + labelAddress[label] = address + for _, cb := range onLabelAddressResolved[label] { cb(address) } - delete(onLabelAddressResolved, labelID) + delete(onLabelAddressResolved, label) // We just ignore the label operation // as we translate branch operations to the direct address jmp. continue - case wazeroir.OperationBr: - if o.Target.IsReturnTarget() { + + case wazeroir.OperationKindBr: + label := wazeroir.Label(op.U1) + if label.IsReturnTarget() { // Jmp to the end of the possible binary. op.U1 = math.MaxUint64 } else { - labelID := o.Target.ID() - addr, ok := labelAddress[labelID] + addr, ok := labelAddress[label] if !ok { // If this is the forward jump (e.g. to the continuation of if, etc.), // the target is not emitted yet, so resolve the address later. - onLabelAddressResolved[labelID] = append(onLabelAddressResolved[labelID], + onLabelAddressResolved[label] = append(onLabelAddressResolved[label], func(addr uint64) { op.U1 = addr }, @@ -384,22 +314,20 @@ func (e *engine) lowerIR(ir *wazeroir.CompilationResult) (*code, error) { op.U1 = addr } } - case wazeroir.OperationBrIf: - op.Rs = make([]*wazeroir.InclusiveRange, 2) - op.Us = make([]uint64, 2) - for i, target := range []wazeroir.BranchTargetDrop{o.Then, o.Else} { - op.Rs[i] = target.ToDrop - if target.Target.IsReturnTarget() { + + case wazeroir.OperationKindBrIf: + for i := 0; i < 2; i++ { + label := wazeroir.Label(op.Us[i]) + if label.IsReturnTarget() { // Jmp to the end of the possible binary. op.Us[i] = math.MaxUint64 } else { - labelID := target.Target.ID() - addr, ok := labelAddress[labelID] + addr, ok := labelAddress[label] if !ok { i := i // If this is the forward jump (e.g. to the continuation of if, etc.), // the target is not emitted yet, so resolve the address later. - onLabelAddressResolved[labelID] = append(onLabelAddressResolved[labelID], + onLabelAddressResolved[label] = append(onLabelAddressResolved[label], func(addr uint64) { op.Us[i] = addr }, @@ -409,23 +337,20 @@ func (e *engine) lowerIR(ir *wazeroir.CompilationResult) (*code, error) { } } } - case wazeroir.OperationBrTable: - targets := append([]*wazeroir.BranchTargetDrop{o.Default}, o.Targets...) - op.Rs = make([]*wazeroir.InclusiveRange, len(targets)) - op.Us = make([]uint64, len(targets)) - for i, target := range targets { - op.Rs[i] = target.ToDrop - if target.Target.IsReturnTarget() { + + case wazeroir.OperationKindBrTable: + for i, target := range op.Us { + label := wazeroir.Label(target) + if label.IsReturnTarget() { // Jmp to the end of the possible binary. op.Us[i] = math.MaxUint64 } else { - labelID := target.Target.ID() - addr, ok := labelAddress[labelID] + addr, ok := labelAddress[label] if !ok { i := i // pin index for later resolution // If this is the forward jump (e.g. to the continuation of if, etc.), // the target is not emitted yet, so resolve the address later. - onLabelAddressResolved[labelID] = append(onLabelAddressResolved[labelID], + onLabelAddressResolved[label] = append(onLabelAddressResolved[label], func(addr uint64) { op.Us[i] = addr }, @@ -435,175 +360,23 @@ func (e *engine) lowerIR(ir *wazeroir.CompilationResult) (*code, error) { } } } - case wazeroir.OperationDrop: - op.Rs = make([]*wazeroir.InclusiveRange, 1) - op.Rs[0] = o.Depth - - case wazeroir.OperationITruncFromF: - op.B1 = byte(o.InputType) - op.B2 = byte(o.OutputType) - op.B3 = o.NonTrapping - case wazeroir.OperationFConvertFromI: - op.B1 = byte(o.InputType) - op.B2 = byte(o.OutputType) - case wazeroir.OperationExtend: - if o.Signed { - op.B1 = 1 - } - case wazeroir.OperationMemoryInit: - op.U1 = uint64(o.DataIndex) - case wazeroir.OperationDataDrop: - op.U1 = uint64(o.DataIndex) - case wazeroir.OperationTableInit: - op.U1 = uint64(o.ElemIndex) - op.U2 = uint64(o.TableIndex) - case wazeroir.OperationElemDrop: - op.U1 = uint64(o.ElemIndex) - case wazeroir.OperationTableCopy: - op.U1 = uint64(o.SrcTableIndex) - op.U2 = uint64(o.DstTableIndex) - case wazeroir.OperationRefFunc: - op.U1 = uint64(o.FunctionIndex) - case wazeroir.OperationTableGet: - op.U1 = uint64(o.TableIndex) - case wazeroir.OperationTableSet: - op.U1 = uint64(o.TableIndex) - case wazeroir.OperationTableSize: - op.U1 = uint64(o.TableIndex) - case wazeroir.OperationTableGrow: - op.U1 = uint64(o.TableIndex) - case wazeroir.OperationTableFill: - op.U1 = uint64(o.TableIndex) - case wazeroir.OperationV128Const: - op.U1 = o.Lo - op.U2 = o.Hi - case wazeroir.OperationV128Add: - op.B1 = o.Shape - case wazeroir.OperationV128Sub: - op.B1 = o.Shape - case wazeroir.OperationV128Load: - op.B1 = o.Type - op.U1 = uint64(o.Arg.Alignment) - op.U2 = uint64(o.Arg.Offset) - case wazeroir.OperationV128LoadLane: - op.B1 = o.LaneSize - op.B2 = o.LaneIndex - op.U1 = uint64(o.Arg.Alignment) - op.U2 = uint64(o.Arg.Offset) - case wazeroir.OperationV128Store: - op.U1 = uint64(o.Arg.Alignment) - op.U2 = uint64(o.Arg.Offset) - case wazeroir.OperationV128StoreLane: - op.B1 = o.LaneSize - op.B2 = o.LaneIndex - op.U1 = uint64(o.Arg.Alignment) - op.U2 = uint64(o.Arg.Offset) - case wazeroir.OperationV128ExtractLane: - op.B1 = o.Shape - op.B2 = o.LaneIndex - op.B3 = o.Signed - case wazeroir.OperationV128ReplaceLane: - op.B1 = o.Shape - op.B2 = o.LaneIndex - case wazeroir.OperationV128Splat: - op.B1 = o.Shape - case wazeroir.OperationV128Shuffle: - op.Us = make([]uint64, 16) - for i, l := range o.Lanes { - op.Us[i] = uint64(l) - } - case wazeroir.OperationV128Swizzle: - case wazeroir.OperationV128AnyTrue: - case wazeroir.OperationV128AllTrue: - op.B1 = o.Shape - case wazeroir.OperationV128BitMask: - op.B1 = o.Shape - case wazeroir.OperationV128And: - case wazeroir.OperationV128Not: - case wazeroir.OperationV128Or: - case wazeroir.OperationV128Xor: - case wazeroir.OperationV128Bitselect: - case wazeroir.OperationV128AndNot: - case wazeroir.OperationV128Shr: - op.B1 = o.Shape - op.B3 = o.Signed - case wazeroir.OperationV128Shl: - op.B1 = o.Shape - case wazeroir.OperationV128Cmp: - op.B1 = o.Type - case wazeroir.OperationV128AddSat: - op.B1 = o.Shape - op.B3 = o.Signed - case wazeroir.OperationV128SubSat: - op.B1 = o.Shape - op.B3 = o.Signed - case wazeroir.OperationV128Mul: - op.B1 = o.Shape - case wazeroir.OperationV128Div: - op.B1 = o.Shape - case wazeroir.OperationV128Neg: - op.B1 = o.Shape - case wazeroir.OperationV128Sqrt: - op.B1 = o.Shape - case wazeroir.OperationV128Abs: - op.B1 = o.Shape - case wazeroir.OperationV128Popcnt: - case wazeroir.OperationV128Min: - op.B1 = o.Shape - op.B3 = o.Signed - case wazeroir.OperationV128Max: - op.B1 = o.Shape - op.B3 = o.Signed - case wazeroir.OperationV128AvgrU: - op.B1 = o.Shape - case wazeroir.OperationV128Pmin: - op.B1 = o.Shape - case wazeroir.OperationV128Pmax: - op.B1 = o.Shape - case wazeroir.OperationV128Ceil: - op.B1 = o.Shape - case wazeroir.OperationV128Floor: - op.B1 = o.Shape - case wazeroir.OperationV128Trunc: - op.B1 = o.Shape - case wazeroir.OperationV128Nearest: - op.B1 = o.Shape - case wazeroir.OperationV128Extend: - op.B1 = o.OriginShape - if o.Signed { - op.B2 = 1 - } - op.B3 = o.UseLow - case wazeroir.OperationV128ExtMul: - op.B1 = o.OriginShape - if o.Signed { - op.B2 = 1 - } - op.B3 = o.UseLow - case wazeroir.OperationV128Q15mulrSatS: - case wazeroir.OperationV128ExtAddPairwise: - op.B1 = o.OriginShape - op.B3 = o.Signed - case wazeroir.OperationV128FloatPromote: - case wazeroir.OperationV128FloatDemote: - case wazeroir.OperationV128FConvertFromI: - op.B1 = o.DestinationShape - op.B3 = o.Signed - case wazeroir.OperationV128Dot: - case wazeroir.OperationV128Narrow: - op.B1 = o.OriginShape - op.B3 = o.Signed - case wazeroir.OperationV128ITruncSatFromF: - op.B1 = o.OriginShape - op.B3 = o.Signed - default: - panic(fmt.Errorf("BUG: unimplemented operation %s", op.Kind().String())) + case wazeroir.OperationKindV128ITruncSatFromF: + case wazeroir.OperationKindI32ReinterpretFromF32, + wazeroir.OperationKindI64ReinterpretFromF64, + wazeroir.OperationKindF32ReinterpretFromI32, + wazeroir.OperationKindF64ReinterpretFromI64: + // Reinterpret ops are essentially nop for engine mode + // because we treat all values as uint64, and Reinterpret* is only used at module + // validation phase where we check type soundness of all the operations. + // So just eliminate the ops. + continue } + ret.body = append(ret.body, op) } if len(onLabelAddressResolved) > 0 { - keys := make([]wazeroir.LabelID, 0, len(onLabelAddressResolved)) + keys := make([]wazeroir.Label, 0, len(onLabelAddressResolved)) for id := range onLabelAddressResolved { keys = append(keys, id) } @@ -790,7 +563,7 @@ func (ce *callEngine) callNativeFunc(ctx context.Context, m *wasm.ModuleInstance // TODO: add description of each operation/case // on, for example, how many args are used, // how the stack is modified, etc. - switch op.Kind() { + switch op.Kind { case wazeroir.OperationKindBuiltinFunctionCheckExitCode: if err := m.FailIfClosed(); err != nil { panic(err) @@ -810,11 +583,15 @@ func (ce *callEngine) callNativeFunc(ctx context.Context, m *wasm.ModuleInstance } case wazeroir.OperationKindBrTable: if v := uint64(ce.popValue()); v < uint64(len(op.Us)-1) { - ce.drop(op.Rs[v+1]) + if uint64(len(op.Rs)) > v+1 { + ce.drop(op.Rs[v+1]) + } frame.pc = op.Us[v+1] } else { // Default branch. - ce.drop(op.Rs[0]) + if len(op.Rs) > 0 { + ce.drop(op.Rs[0]) + } frame.pc = op.Us[0] } case wazeroir.OperationKindCall: diff --git a/internal/engine/interpreter/interpreter_test.go b/internal/engine/interpreter/interpreter_test.go index 62bc9167..3df894a4 100644 --- a/internal/engine/interpreter/interpreter_test.go +++ b/internal/engine/interpreter/interpreter_test.go @@ -328,26 +328,26 @@ func TestInterpreter_NonTrappingFloatToIntConversion(t *testing.T) { var body []*wazeroir.UnionOperation if in32bit { body = append(body, &wazeroir.UnionOperation{ - OpKind: wazeroir.OperationKindConstF32, - U1: uint64(math.Float32bits(tc.input32bit[i])), + Kind: wazeroir.OperationKindConstF32, + U1: uint64(math.Float32bits(tc.input32bit[i])), }) } else { body = append(body, &wazeroir.UnionOperation{ - OpKind: wazeroir.OperationKindConstF64, - U1: uint64(math.Float64bits(tc.input64bit[i])), + Kind: wazeroir.OperationKindConstF64, + U1: uint64(math.Float64bits(tc.input64bit[i])), }) } body = append(body, &wazeroir.UnionOperation{ - OpKind: wazeroir.OperationKindITruncFromF, - B1: byte(tc.inputType), - B2: byte(tc.outputType), - B3: true, // NonTrapping = true. + Kind: wazeroir.OperationKindITruncFromF, + B1: byte(tc.inputType), + B2: byte(tc.outputType), + B3: true, // NonTrapping = true. }) // Return from function. body = append(body, - &wazeroir.UnionOperation{OpKind: wazeroir.OperationKindBr, U1: uint64(math.MaxUint64)}, + &wazeroir.UnionOperation{Kind: wazeroir.OperationKindBr, U1: uint64(math.MaxUint64)}, ) ce := &callEngine{} @@ -417,9 +417,9 @@ func TestInterpreter_CallEngine_callNativeFunc_signExtend(t *testing.T) { f := &function{ moduleInstance: &wasm.ModuleInstance{Engine: &moduleEngine{}}, parent: &code{body: []*wazeroir.UnionOperation{ - {OpKind: wazeroir.OperationKindConstI32, U1: uint64(uint32(tc.in))}, - {OpKind: translateToIROperationKind(tc.opcode)}, - {OpKind: wazeroir.OperationKindBr, U1: uint64(math.MaxUint64)}, + {Kind: wazeroir.OperationKindConstI32, U1: uint64(uint32(tc.in))}, + {Kind: translateToIROperationKind(tc.opcode)}, + {Kind: wazeroir.OperationKindBr, U1: uint64(math.MaxUint64)}, }}, } ce.callNativeFunc(testCtx, &wasm.ModuleInstance{}, f) @@ -471,9 +471,9 @@ func TestInterpreter_CallEngine_callNativeFunc_signExtend(t *testing.T) { f := &function{ moduleInstance: &wasm.ModuleInstance{Engine: &moduleEngine{}}, parent: &code{body: []*wazeroir.UnionOperation{ - {OpKind: wazeroir.OperationKindConstI64, U1: uint64(tc.in)}, - {OpKind: translateToIROperationKind(tc.opcode)}, - {OpKind: wazeroir.OperationKindBr, U1: uint64(math.MaxUint64)}, + {Kind: wazeroir.OperationKindConstI64, U1: uint64(tc.in)}, + {Kind: translateToIROperationKind(tc.opcode)}, + {Kind: wazeroir.OperationKindBr, U1: uint64(math.MaxUint64)}, }}, } ce.callNativeFunc(testCtx, &wasm.ModuleInstance{}, f) diff --git a/internal/wazeroir/compiler.go b/internal/wazeroir/compiler.go index 15ce1d4d..72fb7030 100644 --- a/internal/wazeroir/compiler.go +++ b/internal/wazeroir/compiler.go @@ -37,7 +37,7 @@ type ( func (c *controlFrame) ensureContinuation() { // Make sure that if the frame is block and doesn't have continuation, - // change the OpKind so we can emit the continuation block + // change the Kind so we can emit the continuation block // later when we reach the end instruction of this frame. if c.kind == controlFrameKindBlockWithoutContinuationLabel { c.kind = controlFrameKindBlockWithContinuationLabel @@ -48,14 +48,14 @@ func (c *controlFrame) asLabel() Label { switch c.kind { case controlFrameKindBlockWithContinuationLabel, controlFrameKindBlockWithoutContinuationLabel: - return Label{FrameID: c.frameID, Kind: LabelKindContinuation} + return NewLabel(LabelKindContinuation, c.frameID) case controlFrameKindLoop: - return Label{FrameID: c.frameID, Kind: LabelKindHeader} + return NewLabel(LabelKindHeader, c.frameID) case controlFrameKindFunction: - return Label{Kind: LabelKindReturn} + return NewLabel(LabelKindReturn, 0) case controlFrameKindIfWithElse, controlFrameKindIfWithoutElse: - return Label{FrameID: c.frameID, Kind: LabelKindContinuation} + return NewLabel(LabelKindContinuation, c.frameID) } panic(fmt.Sprintf("unreachable: a bug in wazeroir implementation: %v", c.kind)) } @@ -214,14 +214,14 @@ type CompilationResult struct { GoFunc interface{} // Operations holds wazeroir operations compiled from Wasm instructions in a Wasm function. - Operations []Operation + Operations []UnionOperation // IROperationSourceOffsetsInWasmBinary is index-correlated with Operation and maps each operation to the corresponding source instruction's // offset in the original WebAssembly binary. // Non nil only when the given Wasm module has the DWARF section. IROperationSourceOffsetsInWasmBinary []uint64 - // LabelCallers maps Label.String() to the number of callers to that label. + // LabelCallers maps label.String() to the number of callers to that label. // Here "callers" means that the call-sites which jumps to the label with br, br_if or br_table // instructions. // @@ -233,7 +233,7 @@ type CompilationResult struct { // ) // // This example the label corresponding to `(block i32.const 1111)` is never be reached at runtime because `br 0` exits the function before we reach there - LabelCallers map[LabelID]uint32 + LabelCallers map[Label]uint32 // Signature is the function type of the compilation target function. Signature *wasm.FunctionType @@ -349,7 +349,7 @@ func compile(enabledFeatures api.CoreFeatures, enabledFeatures: enabledFeatures, controlFrames: controlFramesStack, callFrameStackSizeInUint64: callFrameStackSizeInUint64, - result: CompilationResult{LabelCallers: map[LabelID]uint32{}}, + result: CompilationResult{LabelCallers: map[Label]uint32{}}, body: body, localTypes: localTypes, sig: sig, @@ -478,15 +478,13 @@ operatorSwitch: c.controlFrames.push(frame) // Prep labels for inside and the continuation of this loop. - loopLabel := Label{FrameID: frame.frameID, Kind: LabelKindHeader} - c.result.LabelCallers[loopLabel.ID()]++ + loopLabel := NewLabel(LabelKindHeader, frame.frameID) + c.result.LabelCallers[loopLabel]++ // Emit the branch operation to enter inside the loop. c.emit( - OperationBr{ - Target: loopLabel, - }, - OperationLabel{Label: loopLabel}, + NewOperationBr(loopLabel), + NewOperationLabel(loopLabel), ) // Insert the exit code check on the loop header, which is the only necessary point in the function body @@ -526,20 +524,18 @@ operatorSwitch: c.controlFrames.push(frame) // Prep labels for if and else of this if. - thenLabel := Label{Kind: LabelKindHeader, FrameID: frame.frameID} - elseLabel := Label{Kind: LabelKindElse, FrameID: frame.frameID} - c.result.LabelCallers[thenLabel.ID()]++ - c.result.LabelCallers[elseLabel.ID()]++ + thenLabel := NewLabel(LabelKindHeader, frame.frameID) + elseLabel := NewLabel(LabelKindElse, frame.frameID) + c.result.LabelCallers[thenLabel]++ + c.result.LabelCallers[elseLabel]++ // Emit the branch operation to enter the then block. c.emit( - OperationBrIf{ - Then: thenLabel.asBranchTargetDrop(), - Else: elseLabel.asBranchTargetDrop(), - }, - OperationLabel{ - Label: thenLabel, - }, + NewOperationBrIf( + thenLabel.asBranchTargetDrop(), + elseLabel.asBranchTargetDrop(), + ), + NewOperationLabel(thenLabel), ) case wasm.OpcodeElse: frame := c.controlFrames.top() @@ -561,22 +557,22 @@ operatorSwitch: // We are no longer unreachable in else frame, // so emit the correct label, and reset the unreachable state. - elseLabel := Label{FrameID: frame.frameID, Kind: LabelKindElse} + elseLabel := NewLabel(LabelKindElse, frame.frameID) c.resetUnreachable() c.emit( - OperationLabel{Label: elseLabel}, + NewOperationLabel(elseLabel), ) break operatorSwitch } - // Change the OpKind of this If block, indicating that + // Change the Kind of this If block, indicating that // the if has else block. frame.kind = controlFrameKindIfWithElse // We need to reset the stack so that // the values pushed inside the then block // do not affect the else block. - dropOp := OperationDrop{Depth: c.getFrameDropRange(frame, false)} + dropOp := NewOperationDrop(c.getFrameDropRange(frame, false)) // Reset the stack manipulated by the then block, and re-push the block param types to the stack. @@ -586,18 +582,18 @@ operatorSwitch: } // Prep labels for else and the continuation of this if block. - elseLabel := Label{FrameID: frame.frameID, Kind: LabelKindElse} - continuationLabel := Label{FrameID: frame.frameID, Kind: LabelKindContinuation} - c.result.LabelCallers[continuationLabel.ID()]++ + elseLabel := NewLabel(LabelKindElse, frame.frameID) + continuationLabel := NewLabel(LabelKindContinuation, frame.frameID) + c.result.LabelCallers[continuationLabel]++ // Emit the instructions for exiting the if loop, // and then the initiation of else block. c.emit( dropOp, // Jump to the continuation of this block. - OperationBr{Target: continuationLabel}, + NewOperationBr(continuationLabel), // Initiate the else block. - OperationLabel{Label: elseLabel}, + NewOperationLabel(elseLabel), ) case wasm.OpcodeEnd: if c.unreachableState.on && c.unreachableState.depth > 0 { @@ -616,19 +612,19 @@ operatorSwitch: c.stackPush(wasmValueTypeToUnsignedType(t)) } - continuationLabel := Label{FrameID: frame.frameID, Kind: LabelKindContinuation} + continuationLabel := NewLabel(LabelKindContinuation, frame.frameID) if frame.kind == controlFrameKindIfWithoutElse { // Emit the else label. - elseLabel := Label{Kind: LabelKindElse, FrameID: frame.frameID} - c.result.LabelCallers[continuationLabel.ID()]++ + elseLabel := NewLabel(LabelKindElse, frame.frameID) + c.result.LabelCallers[continuationLabel]++ c.emit( - OperationLabel{Label: elseLabel}, - OperationBr{Target: continuationLabel}, - OperationLabel{Label: continuationLabel}, + NewOperationLabel(elseLabel), + NewOperationBr(continuationLabel), + NewOperationLabel(continuationLabel), ) } else { c.emit( - OperationLabel{Label: continuationLabel}, + NewOperationLabel(continuationLabel), ) } @@ -639,7 +635,7 @@ operatorSwitch: // We need to reset the stack so that // the values pushed inside the block. - dropOp := OperationDrop{Depth: c.getFrameDropRange(frame, true)} + dropOp := NewOperationDrop(c.getFrameDropRange(frame, true)) c.stack = c.stack[:frame.originalStackLenWithoutParam] // Push the result types onto the stack. @@ -647,7 +643,7 @@ operatorSwitch: c.stackPush(wasmValueTypeToUnsignedType(t)) } - // Emit the instructions according to the OpKind of the current control frame. + // Emit the instructions according to the Kind of the current control frame. switch frame.kind { case controlFrameKindFunction: if !c.controlFrames.empty() { @@ -657,30 +653,30 @@ operatorSwitch: // Return from function. c.emit( dropOp, - OperationBr{Target: Label{Kind: LabelKindReturn}}, + NewOperationBr(NewLabel(LabelKindReturn, 0)), ) case controlFrameKindIfWithoutElse: // This case we have to emit "empty" else label. - elseLabel := Label{Kind: LabelKindElse, FrameID: frame.frameID} - continuationLabel := Label{Kind: LabelKindContinuation, FrameID: frame.frameID} - c.result.LabelCallers[continuationLabel.ID()] += 2 + elseLabel := NewLabel(LabelKindElse, frame.frameID) + continuationLabel := NewLabel(LabelKindContinuation, frame.frameID) + c.result.LabelCallers[continuationLabel] += 2 c.emit( dropOp, - OperationBr{Target: continuationLabel}, + NewOperationBr(continuationLabel), // Emit the else which soon branches into the continuation. - OperationLabel{Label: elseLabel}, - OperationBr{Target: continuationLabel}, + NewOperationLabel(elseLabel), + NewOperationBr(continuationLabel), // Initiate the continuation. - OperationLabel{Label: continuationLabel}, + NewOperationLabel(continuationLabel), ) case controlFrameKindBlockWithContinuationLabel, controlFrameKindIfWithElse: - continuationLabel := Label{Kind: LabelKindContinuation, FrameID: frame.frameID} - c.result.LabelCallers[continuationLabel.ID()]++ + continuationLabel := NewLabel(LabelKindContinuation, frame.frameID) + c.result.LabelCallers[continuationLabel]++ c.emit( dropOp, - OperationBr{Target: continuationLabel}, - OperationLabel{Label: continuationLabel}, + NewOperationBr(continuationLabel), + NewOperationLabel(continuationLabel), ) case controlFrameKindLoop, controlFrameKindBlockWithoutContinuationLabel: c.emit( @@ -688,7 +684,7 @@ operatorSwitch: ) default: // Should never happen. If so, there's a bug in the translation. - panic(fmt.Errorf("bug: invalid control frame OpKind: 0x%x", frame.kind)) + panic(fmt.Errorf("bug: invalid control frame Kind: 0x%x", frame.kind)) } case wasm.OpcodeBr: @@ -705,12 +701,12 @@ operatorSwitch: targetFrame := c.controlFrames.get(int(targetIndex)) targetFrame.ensureContinuation() - dropOp := OperationDrop{Depth: c.getFrameDropRange(targetFrame, false)} - target := targetFrame.asLabel() - c.result.LabelCallers[target.ID()]++ + dropOp := NewOperationDrop(c.getFrameDropRange(targetFrame, false)) + targetID := targetFrame.asLabel() + c.result.LabelCallers[targetID]++ c.emit( dropOp, - OperationBr{Target: target}, + NewOperationBr(targetID), ) // Br operation is stack-polymorphic, and mark the state as unreachable. // That means subsequent instructions in the current control frame are "unreachable" @@ -731,20 +727,18 @@ operatorSwitch: targetFrame := c.controlFrames.get(int(targetIndex)) targetFrame.ensureContinuation() drop := c.getFrameDropRange(targetFrame, false) - target := targetFrame.asLabel() - c.result.LabelCallers[target.ID()]++ + targetID := targetFrame.asLabel() + c.result.LabelCallers[targetID]++ - continuationLabel := Label{FrameID: c.nextID(), Kind: LabelKindHeader} - c.result.LabelCallers[continuationLabel.ID()]++ + continuationLabel := NewLabel(LabelKindHeader, c.nextID()) + c.result.LabelCallers[continuationLabel]++ c.emit( - OperationBrIf{ - Then: BranchTargetDrop{ToDrop: drop, Target: target}, - Else: continuationLabel.asBranchTargetDrop(), - }, + NewOperationBrIf( + BranchTargetDrop{ToDrop: drop, Target: targetID}, + continuationLabel.asBranchTargetDrop(), + ), // Start emitting else block operations. - OperationLabel{ - Label: continuationLabel, - }, + NewOperationLabel(continuationLabel), ) case wasm.OpcodeBrTable: c.br.Reset(c.body[c.pc+1:]) @@ -770,8 +764,9 @@ operatorSwitch: } // Read the branch targets. - targets := make([]*BranchTargetDrop, numTargets) - for i := range targets { + targetLabels := make([]uint64, numTargets) + targetDrops := make([]*InclusiveRange, numTargets) + for i := uint32(0); i < numTargets; i++ { l, n, err := leb128.DecodeUint32(r) if err != nil { return fmt.Errorf("error reading target %d in br_table: %w", i, err) @@ -780,9 +775,10 @@ operatorSwitch: targetFrame := c.controlFrames.get(int(l)) targetFrame.ensureContinuation() drop := c.getFrameDropRange(targetFrame, false) - target := &BranchTargetDrop{ToDrop: drop, Target: targetFrame.asLabel()} - targets[i] = target - c.result.LabelCallers[target.Target.ID()]++ + targetLabel := targetFrame.asLabel() + targetLabels[i] = uint64(targetLabel) + targetDrops[i] = drop + c.result.LabelCallers[targetLabel]++ } // Prep default target control frame. @@ -794,16 +790,14 @@ operatorSwitch: defaultTargetFrame := c.controlFrames.get(int(l)) defaultTargetFrame.ensureContinuation() defaultTargetDrop := c.getFrameDropRange(defaultTargetFrame, false) - defaultTarget := defaultTargetFrame.asLabel() - c.result.LabelCallers[defaultTarget.ID()]++ + defaultTargetID := defaultTargetFrame.asLabel() + c.result.LabelCallers[defaultTargetID]++ c.emit( - OperationBrTable{ - Targets: targets, - Default: &BranchTargetDrop{ - ToDrop: defaultTargetDrop, Target: defaultTarget, - }, - }, + NewOperationBrTable( + append([]uint64{uint64(defaultTargetID)}, targetLabels...), + append([]*InclusiveRange{defaultTargetDrop}, targetDrops...), + ), ) // Br operation is stack-polymorphic, and mark the state as unreachable. // That means subsequent instructions in the current control frame are "unreachable" @@ -811,12 +805,12 @@ operatorSwitch: c.markUnreachable() case wasm.OpcodeReturn: functionFrame := c.controlFrames.functionFrame() - dropOp := OperationDrop{Depth: c.getFrameDropRange(functionFrame, false)} + dropOp := NewOperationDrop(c.getFrameDropRange(functionFrame, false)) // Cleanup the stack and then jmp to function frame's continuation (meaning return). c.emit( dropOp, - OperationBr{Target: functionFrame.asLabel()}, + NewOperationBr(functionFrame.asLabel()), ) // Return operation is stack-polymorphic, and mark the state as unreachable. @@ -845,7 +839,7 @@ operatorSwitch: r.End++ } c.emit( - OperationDrop{Depth: r}, + NewOperationDrop(r), ) case wasm.OpcodeSelect: // If it is on the unreachable state, ignore the instruction. @@ -1518,59 +1512,59 @@ operatorSwitch: ) case wasm.OpcodeI32TruncF32S: c.emit( - OperationITruncFromF{InputType: Float32, OutputType: SignedInt32}, + NewOperationITruncFromF(Float32, SignedInt32, false), ) case wasm.OpcodeI32TruncF32U: c.emit( - OperationITruncFromF{InputType: Float32, OutputType: SignedUint32}, + NewOperationITruncFromF(Float32, SignedUint32, false), ) case wasm.OpcodeI32TruncF64S: c.emit( - OperationITruncFromF{InputType: Float64, OutputType: SignedInt32}, + NewOperationITruncFromF(Float64, SignedInt32, false), ) case wasm.OpcodeI32TruncF64U: c.emit( - OperationITruncFromF{InputType: Float64, OutputType: SignedUint32}, + NewOperationITruncFromF(Float64, SignedUint32, false), ) case wasm.OpcodeI64ExtendI32S: c.emit( - OperationExtend{Signed: true}, + NewOperationExtend(true), ) case wasm.OpcodeI64ExtendI32U: c.emit( - OperationExtend{Signed: false}, + NewOperationExtend(false), ) case wasm.OpcodeI64TruncF32S: c.emit( - OperationITruncFromF{InputType: Float32, OutputType: SignedInt64}, + NewOperationITruncFromF(Float32, SignedInt64, false), ) case wasm.OpcodeI64TruncF32U: c.emit( - OperationITruncFromF{InputType: Float32, OutputType: SignedUint64}, + NewOperationITruncFromF(Float32, SignedUint64, false), ) case wasm.OpcodeI64TruncF64S: c.emit( - OperationITruncFromF{InputType: Float64, OutputType: SignedInt64}, + NewOperationITruncFromF(Float64, SignedInt64, false), ) case wasm.OpcodeI64TruncF64U: c.emit( - OperationITruncFromF{InputType: Float64, OutputType: SignedUint64}, + NewOperationITruncFromF(Float64, SignedUint64, false), ) case wasm.OpcodeF32ConvertI32S: c.emit( - OperationFConvertFromI{InputType: SignedInt32, OutputType: Float32}, + NewOperationFConvertFromI(SignedInt32, Float32), ) case wasm.OpcodeF32ConvertI32U: c.emit( - OperationFConvertFromI{InputType: SignedUint32, OutputType: Float32}, + NewOperationFConvertFromI(SignedUint32, Float32), ) case wasm.OpcodeF32ConvertI64S: c.emit( - OperationFConvertFromI{InputType: SignedInt64, OutputType: Float32}, + NewOperationFConvertFromI(SignedInt64, Float32), ) case wasm.OpcodeF32ConvertI64U: c.emit( - OperationFConvertFromI{InputType: SignedUint64, OutputType: Float32}, + NewOperationFConvertFromI(SignedUint64, Float32), ) case wasm.OpcodeF32DemoteF64: c.emit( @@ -1578,19 +1572,19 @@ operatorSwitch: ) case wasm.OpcodeF64ConvertI32S: c.emit( - OperationFConvertFromI{InputType: SignedInt32, OutputType: Float64}, + NewOperationFConvertFromI(SignedInt32, Float64), ) case wasm.OpcodeF64ConvertI32U: c.emit( - OperationFConvertFromI{InputType: SignedUint32, OutputType: Float64}, + NewOperationFConvertFromI(SignedUint32, Float64), ) case wasm.OpcodeF64ConvertI64S: c.emit( - OperationFConvertFromI{InputType: SignedInt64, OutputType: Float64}, + NewOperationFConvertFromI(SignedInt64, Float64), ) case wasm.OpcodeF64ConvertI64U: c.emit( - OperationFConvertFromI{InputType: SignedUint64, OutputType: Float64}, + NewOperationFConvertFromI(SignedUint64, Float64), ) case wasm.OpcodeF64PromoteF32: c.emit( @@ -1640,7 +1634,7 @@ operatorSwitch: } c.pc += num - 1 c.emit( - OperationRefFunc{FunctionIndex: index}, + NewOperationRefFunc(index), ) case wasm.OpcodeRefNull: c.pc++ // Skip the type of reftype as every ref value is opaque pointer. @@ -1660,7 +1654,7 @@ operatorSwitch: } c.pc += num - 1 c.emit( - OperationTableGet{TableIndex: tableIndex}, + NewOperationTableGet(tableIndex), ) case wasm.OpcodeTableSet: c.pc++ @@ -1670,7 +1664,7 @@ operatorSwitch: } c.pc += num - 1 c.emit( - OperationTableSet{TableIndex: tableIndex}, + NewOperationTableSet(tableIndex), ) case wasm.OpcodeMiscPrefix: c.pc++ @@ -1683,35 +1677,35 @@ operatorSwitch: switch byte(miscOp) { case wasm.OpcodeMiscI32TruncSatF32S: c.emit( - OperationITruncFromF{InputType: Float32, OutputType: SignedInt32, NonTrapping: true}, + NewOperationITruncFromF(Float32, SignedInt32, true), ) case wasm.OpcodeMiscI32TruncSatF32U: c.emit( - OperationITruncFromF{InputType: Float32, OutputType: SignedUint32, NonTrapping: true}, + NewOperationITruncFromF(Float32, SignedUint32, true), ) case wasm.OpcodeMiscI32TruncSatF64S: c.emit( - OperationITruncFromF{InputType: Float64, OutputType: SignedInt32, NonTrapping: true}, + NewOperationITruncFromF(Float64, SignedInt32, true), ) case wasm.OpcodeMiscI32TruncSatF64U: c.emit( - OperationITruncFromF{InputType: Float64, OutputType: SignedUint32, NonTrapping: true}, + NewOperationITruncFromF(Float64, SignedUint32, true), ) case wasm.OpcodeMiscI64TruncSatF32S: c.emit( - OperationITruncFromF{InputType: Float32, OutputType: SignedInt64, NonTrapping: true}, + NewOperationITruncFromF(Float32, SignedInt64, true), ) case wasm.OpcodeMiscI64TruncSatF32U: c.emit( - OperationITruncFromF{InputType: Float32, OutputType: SignedUint64, NonTrapping: true}, + NewOperationITruncFromF(Float32, SignedUint64, true), ) case wasm.OpcodeMiscI64TruncSatF64S: c.emit( - OperationITruncFromF{InputType: Float64, OutputType: SignedInt64, NonTrapping: true}, + NewOperationITruncFromF(Float64, SignedInt64, true), ) case wasm.OpcodeMiscI64TruncSatF64U: c.emit( - OperationITruncFromF{InputType: Float64, OutputType: SignedUint64, NonTrapping: true}, + NewOperationITruncFromF(Float64, SignedUint64, true), ) case wasm.OpcodeMiscMemoryInit: c.result.UsesMemory = true @@ -1721,7 +1715,7 @@ operatorSwitch: } c.pc += num + 1 // +1 to skip the memory index which is fixed to zero. c.emit( - OperationMemoryInit{DataIndex: dataIndex}, + NewOperationMemoryInit(dataIndex), ) case wasm.OpcodeMiscDataDrop: dataIndex, num, err := leb128.LoadUint32(c.body[c.pc+1:]) @@ -1730,7 +1724,7 @@ operatorSwitch: } c.pc += num c.emit( - OperationDataDrop{DataIndex: dataIndex}, + NewOperationDataDrop(dataIndex), ) case wasm.OpcodeMiscMemoryCopy: c.result.UsesMemory = true @@ -1757,7 +1751,7 @@ operatorSwitch: } c.pc += num c.emit( - OperationTableInit{ElemIndex: elemIndex, TableIndex: tableIndex}, + NewOperationTableInit(elemIndex, tableIndex), ) case wasm.OpcodeMiscElemDrop: elemIndex, num, err := leb128.LoadUint32(c.body[c.pc+1:]) @@ -1766,7 +1760,7 @@ operatorSwitch: } c.pc += num c.emit( - OperationElemDrop{ElemIndex: elemIndex}, + NewOperationElemDrop(elemIndex), ) case wasm.OpcodeMiscTableCopy: // Read the source table inde.g. @@ -1782,7 +1776,7 @@ operatorSwitch: } c.pc += num c.emit( - OperationTableCopy{SrcTableIndex: src, DstTableIndex: dst}, + NewOperationTableCopy(src, dst), ) case wasm.OpcodeMiscTableGrow: // Read the source table inde.g. @@ -1792,7 +1786,7 @@ operatorSwitch: } c.pc += num c.emit( - OperationTableGrow{TableIndex: tableIndex}, + NewOperationTableGrow(tableIndex), ) case wasm.OpcodeMiscTableSize: // Read the source table inde.g. @@ -1802,7 +1796,7 @@ operatorSwitch: } c.pc += num c.emit( - OperationTableSize{TableIndex: tableIndex}, + NewOperationTableSize(tableIndex), ) case wasm.OpcodeMiscTableFill: // Read the source table index. @@ -1812,7 +1806,7 @@ operatorSwitch: } c.pc += num c.emit( - OperationTableFill{TableIndex: tableIndex}, + NewOperationTableFill(tableIndex), ) default: return fmt.Errorf("unsupported misc instruction in wazeroir: 0x%x", op) @@ -1826,7 +1820,7 @@ operatorSwitch: c.pc += 8 hi := binary.LittleEndian.Uint64(c.body[c.pc : c.pc+8]) c.emit( - OperationV128Const{Lo: lo, Hi: hi}, + NewOperationV128Const(lo, hi), ) c.pc += 7 case wasm.OpcodeVecV128Load: @@ -1835,7 +1829,7 @@ operatorSwitch: return err } c.emit( - OperationV128Load{Type: V128LoadType128, Arg: arg}, + NewOperationV128Load(V128LoadType128, arg), ) case wasm.OpcodeVecV128Load8x8s: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load8x8SName) @@ -1843,7 +1837,7 @@ operatorSwitch: return err } c.emit( - OperationV128Load{Type: V128LoadType8x8s, Arg: arg}, + NewOperationV128Load(V128LoadType8x8s, arg), ) case wasm.OpcodeVecV128Load8x8u: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load8x8UName) @@ -1851,7 +1845,7 @@ operatorSwitch: return err } c.emit( - OperationV128Load{Type: V128LoadType8x8u, Arg: arg}, + NewOperationV128Load(V128LoadType8x8u, arg), ) case wasm.OpcodeVecV128Load16x4s: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load16x4SName) @@ -1859,7 +1853,7 @@ operatorSwitch: return err } c.emit( - OperationV128Load{Type: V128LoadType16x4s, Arg: arg}, + NewOperationV128Load(V128LoadType16x4s, arg), ) case wasm.OpcodeVecV128Load16x4u: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load16x4UName) @@ -1867,7 +1861,7 @@ operatorSwitch: return err } c.emit( - OperationV128Load{Type: V128LoadType16x4u, Arg: arg}, + NewOperationV128Load(V128LoadType16x4u, arg), ) case wasm.OpcodeVecV128Load32x2s: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load32x2SName) @@ -1875,7 +1869,7 @@ operatorSwitch: return err } c.emit( - OperationV128Load{Type: V128LoadType32x2s, Arg: arg}, + NewOperationV128Load(V128LoadType32x2s, arg), ) case wasm.OpcodeVecV128Load32x2u: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load32x2UName) @@ -1883,7 +1877,7 @@ operatorSwitch: return err } c.emit( - OperationV128Load{Type: V128LoadType32x2u, Arg: arg}, + NewOperationV128Load(V128LoadType32x2u, arg), ) case wasm.OpcodeVecV128Load8Splat: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load8SplatName) @@ -1891,7 +1885,7 @@ operatorSwitch: return err } c.emit( - OperationV128Load{Type: V128LoadType8Splat, Arg: arg}, + NewOperationV128Load(V128LoadType8Splat, arg), ) case wasm.OpcodeVecV128Load16Splat: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load16SplatName) @@ -1899,7 +1893,7 @@ operatorSwitch: return err } c.emit( - OperationV128Load{Type: V128LoadType16Splat, Arg: arg}, + NewOperationV128Load(V128LoadType16Splat, arg), ) case wasm.OpcodeVecV128Load32Splat: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load32SplatName) @@ -1907,7 +1901,7 @@ operatorSwitch: return err } c.emit( - OperationV128Load{Type: V128LoadType32Splat, Arg: arg}, + NewOperationV128Load(V128LoadType32Splat, arg), ) case wasm.OpcodeVecV128Load64Splat: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load64SplatName) @@ -1915,7 +1909,7 @@ operatorSwitch: return err } c.emit( - OperationV128Load{Type: V128LoadType64Splat, Arg: arg}, + NewOperationV128Load(V128LoadType64Splat, arg), ) case wasm.OpcodeVecV128Load32zero: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load32zeroName) @@ -1923,7 +1917,7 @@ operatorSwitch: return err } c.emit( - OperationV128Load{Type: V128LoadType32zero, Arg: arg}, + NewOperationV128Load(V128LoadType32zero, arg), ) case wasm.OpcodeVecV128Load64zero: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load64zeroName) @@ -1931,7 +1925,7 @@ operatorSwitch: return err } c.emit( - OperationV128Load{Type: V128LoadType64zero, Arg: arg}, + NewOperationV128Load(V128LoadType64zero, arg), ) case wasm.OpcodeVecV128Load8Lane: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load8LaneName) @@ -1941,7 +1935,7 @@ operatorSwitch: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128LoadLane{LaneIndex: laneIndex, LaneSize: 8, Arg: arg}, + NewOperationV128LoadLane(laneIndex, 8, arg), ) case wasm.OpcodeVecV128Load16Lane: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load16LaneName) @@ -1951,7 +1945,7 @@ operatorSwitch: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128LoadLane{LaneIndex: laneIndex, LaneSize: 16, Arg: arg}, + NewOperationV128LoadLane(laneIndex, 16, arg), ) case wasm.OpcodeVecV128Load32Lane: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load32LaneName) @@ -1961,7 +1955,7 @@ operatorSwitch: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128LoadLane{LaneIndex: laneIndex, LaneSize: 32, Arg: arg}, + NewOperationV128LoadLane(laneIndex, 32, arg), ) case wasm.OpcodeVecV128Load64Lane: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load64LaneName) @@ -1971,7 +1965,7 @@ operatorSwitch: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128LoadLane{LaneIndex: laneIndex, LaneSize: 64, Arg: arg}, + NewOperationV128LoadLane(laneIndex, 64, arg), ) case wasm.OpcodeVecV128Store: arg, err := c.readMemoryArg(wasm.OpcodeVecV128StoreName) @@ -1979,7 +1973,7 @@ operatorSwitch: return err } c.emit( - OperationV128Store{Arg: arg}, + NewOperationV128Store(arg), ) case wasm.OpcodeVecV128Store8Lane: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Store8LaneName) @@ -1989,7 +1983,7 @@ operatorSwitch: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128StoreLane{LaneIndex: laneIndex, LaneSize: 8, Arg: arg}, + NewOperationV128StoreLane(laneIndex, 8, arg), ) case wasm.OpcodeVecV128Store16Lane: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Store16LaneName) @@ -1999,7 +1993,7 @@ operatorSwitch: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128StoreLane{LaneIndex: laneIndex, LaneSize: 16, Arg: arg}, + NewOperationV128StoreLane(laneIndex, 16, arg), ) case wasm.OpcodeVecV128Store32Lane: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Store32LaneName) @@ -2009,7 +2003,7 @@ operatorSwitch: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128StoreLane{LaneIndex: laneIndex, LaneSize: 32, Arg: arg}, + NewOperationV128StoreLane(laneIndex, 32, arg), ) case wasm.OpcodeVecV128Store64Lane: arg, err := c.readMemoryArg(wasm.OpcodeVecV128Store64LaneName) @@ -2019,889 +2013,892 @@ operatorSwitch: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128StoreLane{LaneIndex: laneIndex, LaneSize: 64, Arg: arg}, + NewOperationV128StoreLane(laneIndex, 64, arg), ) case wasm.OpcodeVecI8x16ExtractLaneS: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ExtractLane{LaneIndex: laneIndex, Shape: ShapeI8x16, Signed: true}, + NewOperationV128ExtractLane(laneIndex, true, ShapeI8x16), ) case wasm.OpcodeVecI8x16ExtractLaneU: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ExtractLane{LaneIndex: laneIndex, Shape: ShapeI8x16, Signed: false}, + NewOperationV128ExtractLane(laneIndex, false, ShapeI8x16), ) case wasm.OpcodeVecI16x8ExtractLaneS: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ExtractLane{LaneIndex: laneIndex, Shape: ShapeI16x8, Signed: true}, + NewOperationV128ExtractLane(laneIndex, true, ShapeI16x8), ) case wasm.OpcodeVecI16x8ExtractLaneU: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ExtractLane{LaneIndex: laneIndex, Shape: ShapeI16x8, Signed: false}, + NewOperationV128ExtractLane(laneIndex, false, ShapeI16x8), ) case wasm.OpcodeVecI32x4ExtractLane: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ExtractLane{LaneIndex: laneIndex, Shape: ShapeI32x4}, + NewOperationV128ExtractLane(laneIndex, false, ShapeI32x4), ) case wasm.OpcodeVecI64x2ExtractLane: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ExtractLane{LaneIndex: laneIndex, Shape: ShapeI64x2}, + NewOperationV128ExtractLane(laneIndex, false, ShapeI64x2), ) case wasm.OpcodeVecF32x4ExtractLane: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ExtractLane{LaneIndex: laneIndex, Shape: ShapeF32x4}, + NewOperationV128ExtractLane(laneIndex, false, ShapeF32x4), ) case wasm.OpcodeVecF64x2ExtractLane: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ExtractLane{LaneIndex: laneIndex, Shape: ShapeF64x2}, + NewOperationV128ExtractLane(laneIndex, false, ShapeF64x2), ) case wasm.OpcodeVecI8x16ReplaceLane: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ReplaceLane{LaneIndex: laneIndex, Shape: ShapeI8x16}, + NewOperationV128ReplaceLane(laneIndex, ShapeI8x16), ) case wasm.OpcodeVecI16x8ReplaceLane: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ReplaceLane{LaneIndex: laneIndex, Shape: ShapeI16x8}, + NewOperationV128ReplaceLane(laneIndex, ShapeI16x8), ) case wasm.OpcodeVecI32x4ReplaceLane: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ReplaceLane{LaneIndex: laneIndex, Shape: ShapeI32x4}, + NewOperationV128ReplaceLane(laneIndex, ShapeI32x4), ) case wasm.OpcodeVecI64x2ReplaceLane: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ReplaceLane{LaneIndex: laneIndex, Shape: ShapeI64x2}, + NewOperationV128ReplaceLane(laneIndex, ShapeI64x2), ) case wasm.OpcodeVecF32x4ReplaceLane: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ReplaceLane{LaneIndex: laneIndex, Shape: ShapeF32x4}, + NewOperationV128ReplaceLane(laneIndex, ShapeF32x4), ) case wasm.OpcodeVecF64x2ReplaceLane: c.pc++ laneIndex := c.body[c.pc] c.emit( - OperationV128ReplaceLane{LaneIndex: laneIndex, Shape: ShapeF64x2}, + NewOperationV128ReplaceLane(laneIndex, ShapeF64x2), ) case wasm.OpcodeVecI8x16Splat: c.emit( - OperationV128Splat{Shape: ShapeI8x16}, + NewOperationV128Splat(ShapeI8x16), ) case wasm.OpcodeVecI16x8Splat: c.emit( - OperationV128Splat{Shape: ShapeI16x8}, + NewOperationV128Splat(ShapeI16x8), ) case wasm.OpcodeVecI32x4Splat: c.emit( - OperationV128Splat{Shape: ShapeI32x4}, + NewOperationV128Splat(ShapeI32x4), ) case wasm.OpcodeVecI64x2Splat: c.emit( - OperationV128Splat{Shape: ShapeI64x2}, + NewOperationV128Splat(ShapeI64x2), ) case wasm.OpcodeVecF32x4Splat: c.emit( - OperationV128Splat{Shape: ShapeF32x4}, + NewOperationV128Splat(ShapeF32x4), ) case wasm.OpcodeVecF64x2Splat: c.emit( - OperationV128Splat{Shape: ShapeF64x2}, + NewOperationV128Splat(ShapeF64x2), ) case wasm.OpcodeVecI8x16Swizzle: c.emit( - OperationV128Swizzle{}, + NewOperationV128Swizzle(), ) case wasm.OpcodeVecV128i8x16Shuffle: c.pc++ - op := OperationV128Shuffle{} - copy(op.Lanes[:], c.body[c.pc:c.pc+16]) + lanes := make([]uint64, 16) + for i := uint64(0); i < 16; i++ { + lanes[i] = uint64(c.body[c.pc+i]) + } + op := NewOperationV128Shuffle(lanes) c.emit(op) c.pc += 15 case wasm.OpcodeVecV128AnyTrue: c.emit( - OperationV128AnyTrue{}, + NewOperationV128AnyTrue(), ) case wasm.OpcodeVecI8x16AllTrue: c.emit( - OperationV128AllTrue{Shape: ShapeI8x16}, + NewOperationV128AllTrue(ShapeI8x16), ) case wasm.OpcodeVecI16x8AllTrue: c.emit( - OperationV128AllTrue{Shape: ShapeI16x8}, + NewOperationV128AllTrue(ShapeI16x8), ) case wasm.OpcodeVecI32x4AllTrue: c.emit( - OperationV128AllTrue{Shape: ShapeI32x4}, + NewOperationV128AllTrue(ShapeI32x4), ) case wasm.OpcodeVecI64x2AllTrue: c.emit( - OperationV128AllTrue{Shape: ShapeI64x2}, + NewOperationV128AllTrue(ShapeI64x2), ) case wasm.OpcodeVecI8x16BitMask: c.emit( - OperationV128BitMask{Shape: ShapeI8x16}, + NewOperationV128BitMask(ShapeI8x16), ) case wasm.OpcodeVecI16x8BitMask: c.emit( - OperationV128BitMask{Shape: ShapeI16x8}, + NewOperationV128BitMask(ShapeI16x8), ) case wasm.OpcodeVecI32x4BitMask: c.emit( - OperationV128BitMask{Shape: ShapeI32x4}, + NewOperationV128BitMask(ShapeI32x4), ) case wasm.OpcodeVecI64x2BitMask: c.emit( - OperationV128BitMask{Shape: ShapeI64x2}, + NewOperationV128BitMask(ShapeI64x2), ) case wasm.OpcodeVecV128And: c.emit( - OperationV128And{}, + NewOperationV128And(), ) case wasm.OpcodeVecV128Not: c.emit( - OperationV128Not{}, + NewOperationV128Not(), ) case wasm.OpcodeVecV128Or: c.emit( - OperationV128Or{}, + NewOperationV128Or(), ) case wasm.OpcodeVecV128Xor: c.emit( - OperationV128Xor{}, + NewOperationV128Xor(), ) case wasm.OpcodeVecV128Bitselect: c.emit( - OperationV128Bitselect{}, + NewOperationV128Bitselect(), ) case wasm.OpcodeVecV128AndNot: c.emit( - OperationV128AndNot{}, + NewOperationV128AndNot(), ) case wasm.OpcodeVecI8x16Shl: c.emit( - OperationV128Shl{Shape: ShapeI8x16}, + NewOperationV128Shl(ShapeI8x16), ) case wasm.OpcodeVecI8x16ShrS: c.emit( - OperationV128Shr{Shape: ShapeI8x16, Signed: true}, + NewOperationV128Shr(ShapeI8x16, true), ) case wasm.OpcodeVecI8x16ShrU: c.emit( - OperationV128Shr{Shape: ShapeI8x16, Signed: false}, + NewOperationV128Shr(ShapeI8x16, false), ) case wasm.OpcodeVecI16x8Shl: c.emit( - OperationV128Shl{Shape: ShapeI16x8}, + NewOperationV128Shl(ShapeI16x8), ) case wasm.OpcodeVecI16x8ShrS: c.emit( - OperationV128Shr{Shape: ShapeI16x8, Signed: true}, + NewOperationV128Shr(ShapeI16x8, true), ) case wasm.OpcodeVecI16x8ShrU: c.emit( - OperationV128Shr{Shape: ShapeI16x8, Signed: false}, + NewOperationV128Shr(ShapeI16x8, false), ) case wasm.OpcodeVecI32x4Shl: c.emit( - OperationV128Shl{Shape: ShapeI32x4}, + NewOperationV128Shl(ShapeI32x4), ) case wasm.OpcodeVecI32x4ShrS: c.emit( - OperationV128Shr{Shape: ShapeI32x4, Signed: true}, + NewOperationV128Shr(ShapeI32x4, true), ) case wasm.OpcodeVecI32x4ShrU: c.emit( - OperationV128Shr{Shape: ShapeI32x4, Signed: false}, + NewOperationV128Shr(ShapeI32x4, false), ) case wasm.OpcodeVecI64x2Shl: c.emit( - OperationV128Shl{Shape: ShapeI64x2}, + NewOperationV128Shl(ShapeI64x2), ) case wasm.OpcodeVecI64x2ShrS: c.emit( - OperationV128Shr{Shape: ShapeI64x2, Signed: true}, + NewOperationV128Shr(ShapeI64x2, true), ) case wasm.OpcodeVecI64x2ShrU: c.emit( - OperationV128Shr{Shape: ShapeI64x2, Signed: false}, + NewOperationV128Shr(ShapeI64x2, false), ) case wasm.OpcodeVecI8x16Eq: c.emit( - OperationV128Cmp{Type: V128CmpTypeI8x16Eq}, + NewOperationV128Cmp(V128CmpTypeI8x16Eq), ) case wasm.OpcodeVecI8x16Ne: c.emit( - OperationV128Cmp{Type: V128CmpTypeI8x16Ne}, + NewOperationV128Cmp(V128CmpTypeI8x16Ne), ) case wasm.OpcodeVecI8x16LtS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI8x16LtS}, + NewOperationV128Cmp(V128CmpTypeI8x16LtS), ) case wasm.OpcodeVecI8x16LtU: c.emit( - OperationV128Cmp{Type: V128CmpTypeI8x16LtU}, + NewOperationV128Cmp(V128CmpTypeI8x16LtU), ) case wasm.OpcodeVecI8x16GtS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI8x16GtS}, + NewOperationV128Cmp(V128CmpTypeI8x16GtS), ) case wasm.OpcodeVecI8x16GtU: c.emit( - OperationV128Cmp{Type: V128CmpTypeI8x16GtU}, + NewOperationV128Cmp(V128CmpTypeI8x16GtU), ) case wasm.OpcodeVecI8x16LeS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI8x16LeS}, + NewOperationV128Cmp(V128CmpTypeI8x16LeS), ) case wasm.OpcodeVecI8x16LeU: c.emit( - OperationV128Cmp{Type: V128CmpTypeI8x16LeU}, + NewOperationV128Cmp(V128CmpTypeI8x16LeU), ) case wasm.OpcodeVecI8x16GeS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI8x16GeS}, + NewOperationV128Cmp(V128CmpTypeI8x16GeS), ) case wasm.OpcodeVecI8x16GeU: c.emit( - OperationV128Cmp{Type: V128CmpTypeI8x16GeU}, + NewOperationV128Cmp(V128CmpTypeI8x16GeU), ) case wasm.OpcodeVecI16x8Eq: c.emit( - OperationV128Cmp{Type: V128CmpTypeI16x8Eq}, + NewOperationV128Cmp(V128CmpTypeI16x8Eq), ) case wasm.OpcodeVecI16x8Ne: c.emit( - OperationV128Cmp{Type: V128CmpTypeI16x8Ne}, + NewOperationV128Cmp(V128CmpTypeI16x8Ne), ) case wasm.OpcodeVecI16x8LtS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI16x8LtS}, + NewOperationV128Cmp(V128CmpTypeI16x8LtS), ) case wasm.OpcodeVecI16x8LtU: c.emit( - OperationV128Cmp{Type: V128CmpTypeI16x8LtU}, + NewOperationV128Cmp(V128CmpTypeI16x8LtU), ) case wasm.OpcodeVecI16x8GtS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI16x8GtS}, + NewOperationV128Cmp(V128CmpTypeI16x8GtS), ) case wasm.OpcodeVecI16x8GtU: c.emit( - OperationV128Cmp{Type: V128CmpTypeI16x8GtU}, + NewOperationV128Cmp(V128CmpTypeI16x8GtU), ) case wasm.OpcodeVecI16x8LeS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI16x8LeS}, + NewOperationV128Cmp(V128CmpTypeI16x8LeS), ) case wasm.OpcodeVecI16x8LeU: c.emit( - OperationV128Cmp{Type: V128CmpTypeI16x8LeU}, + NewOperationV128Cmp(V128CmpTypeI16x8LeU), ) case wasm.OpcodeVecI16x8GeS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI16x8GeS}, + NewOperationV128Cmp(V128CmpTypeI16x8GeS), ) case wasm.OpcodeVecI16x8GeU: c.emit( - OperationV128Cmp{Type: V128CmpTypeI16x8GeU}, + NewOperationV128Cmp(V128CmpTypeI16x8GeU), ) case wasm.OpcodeVecI32x4Eq: c.emit( - OperationV128Cmp{Type: V128CmpTypeI32x4Eq}, + NewOperationV128Cmp(V128CmpTypeI32x4Eq), ) case wasm.OpcodeVecI32x4Ne: c.emit( - OperationV128Cmp{Type: V128CmpTypeI32x4Ne}, + NewOperationV128Cmp(V128CmpTypeI32x4Ne), ) case wasm.OpcodeVecI32x4LtS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI32x4LtS}, + NewOperationV128Cmp(V128CmpTypeI32x4LtS), ) case wasm.OpcodeVecI32x4LtU: c.emit( - OperationV128Cmp{Type: V128CmpTypeI32x4LtU}, + NewOperationV128Cmp(V128CmpTypeI32x4LtU), ) case wasm.OpcodeVecI32x4GtS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI32x4GtS}, + NewOperationV128Cmp(V128CmpTypeI32x4GtS), ) case wasm.OpcodeVecI32x4GtU: c.emit( - OperationV128Cmp{Type: V128CmpTypeI32x4GtU}, + NewOperationV128Cmp(V128CmpTypeI32x4GtU), ) case wasm.OpcodeVecI32x4LeS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI32x4LeS}, + NewOperationV128Cmp(V128CmpTypeI32x4LeS), ) case wasm.OpcodeVecI32x4LeU: c.emit( - OperationV128Cmp{Type: V128CmpTypeI32x4LeU}, + NewOperationV128Cmp(V128CmpTypeI32x4LeU), ) case wasm.OpcodeVecI32x4GeS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI32x4GeS}, + NewOperationV128Cmp(V128CmpTypeI32x4GeS), ) case wasm.OpcodeVecI32x4GeU: c.emit( - OperationV128Cmp{Type: V128CmpTypeI32x4GeU}, + NewOperationV128Cmp(V128CmpTypeI32x4GeU), ) case wasm.OpcodeVecI64x2Eq: c.emit( - OperationV128Cmp{Type: V128CmpTypeI64x2Eq}, + NewOperationV128Cmp(V128CmpTypeI64x2Eq), ) case wasm.OpcodeVecI64x2Ne: c.emit( - OperationV128Cmp{Type: V128CmpTypeI64x2Ne}, + NewOperationV128Cmp(V128CmpTypeI64x2Ne), ) case wasm.OpcodeVecI64x2LtS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI64x2LtS}, + NewOperationV128Cmp(V128CmpTypeI64x2LtS), ) case wasm.OpcodeVecI64x2GtS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI64x2GtS}, + NewOperationV128Cmp(V128CmpTypeI64x2GtS), ) case wasm.OpcodeVecI64x2LeS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI64x2LeS}, + NewOperationV128Cmp(V128CmpTypeI64x2LeS), ) case wasm.OpcodeVecI64x2GeS: c.emit( - OperationV128Cmp{Type: V128CmpTypeI64x2GeS}, + NewOperationV128Cmp(V128CmpTypeI64x2GeS), ) case wasm.OpcodeVecF32x4Eq: c.emit( - OperationV128Cmp{Type: V128CmpTypeF32x4Eq}, + NewOperationV128Cmp(V128CmpTypeF32x4Eq), ) case wasm.OpcodeVecF32x4Ne: c.emit( - OperationV128Cmp{Type: V128CmpTypeF32x4Ne}, + NewOperationV128Cmp(V128CmpTypeF32x4Ne), ) case wasm.OpcodeVecF32x4Lt: c.emit( - OperationV128Cmp{Type: V128CmpTypeF32x4Lt}, + NewOperationV128Cmp(V128CmpTypeF32x4Lt), ) case wasm.OpcodeVecF32x4Gt: c.emit( - OperationV128Cmp{Type: V128CmpTypeF32x4Gt}, + NewOperationV128Cmp(V128CmpTypeF32x4Gt), ) case wasm.OpcodeVecF32x4Le: c.emit( - OperationV128Cmp{Type: V128CmpTypeF32x4Le}, + NewOperationV128Cmp(V128CmpTypeF32x4Le), ) case wasm.OpcodeVecF32x4Ge: c.emit( - OperationV128Cmp{Type: V128CmpTypeF32x4Ge}, + NewOperationV128Cmp(V128CmpTypeF32x4Ge), ) case wasm.OpcodeVecF64x2Eq: c.emit( - OperationV128Cmp{Type: V128CmpTypeF64x2Eq}, + NewOperationV128Cmp(V128CmpTypeF64x2Eq), ) case wasm.OpcodeVecF64x2Ne: c.emit( - OperationV128Cmp{Type: V128CmpTypeF64x2Ne}, + NewOperationV128Cmp(V128CmpTypeF64x2Ne), ) case wasm.OpcodeVecF64x2Lt: c.emit( - OperationV128Cmp{Type: V128CmpTypeF64x2Lt}, + NewOperationV128Cmp(V128CmpTypeF64x2Lt), ) case wasm.OpcodeVecF64x2Gt: c.emit( - OperationV128Cmp{Type: V128CmpTypeF64x2Gt}, + NewOperationV128Cmp(V128CmpTypeF64x2Gt), ) case wasm.OpcodeVecF64x2Le: c.emit( - OperationV128Cmp{Type: V128CmpTypeF64x2Le}, + NewOperationV128Cmp(V128CmpTypeF64x2Le), ) case wasm.OpcodeVecF64x2Ge: c.emit( - OperationV128Cmp{Type: V128CmpTypeF64x2Ge}, + NewOperationV128Cmp(V128CmpTypeF64x2Ge), ) case wasm.OpcodeVecI8x16Neg: c.emit( - OperationV128Neg{Shape: ShapeI8x16}, + NewOperationV128Neg(ShapeI8x16), ) case wasm.OpcodeVecI16x8Neg: c.emit( - OperationV128Neg{Shape: ShapeI16x8}, + NewOperationV128Neg(ShapeI16x8), ) case wasm.OpcodeVecI32x4Neg: c.emit( - OperationV128Neg{Shape: ShapeI32x4}, + NewOperationV128Neg(ShapeI32x4), ) case wasm.OpcodeVecI64x2Neg: c.emit( - OperationV128Neg{Shape: ShapeI64x2}, + NewOperationV128Neg(ShapeI64x2), ) case wasm.OpcodeVecF32x4Neg: c.emit( - OperationV128Neg{Shape: ShapeF32x4}, + NewOperationV128Neg(ShapeF32x4), ) case wasm.OpcodeVecF64x2Neg: c.emit( - OperationV128Neg{Shape: ShapeF64x2}, + NewOperationV128Neg(ShapeF64x2), ) case wasm.OpcodeVecI8x16Add: c.emit( - OperationV128Add{Shape: ShapeI8x16}, + NewOperationV128Add(ShapeI8x16), ) case wasm.OpcodeVecI16x8Add: c.emit( - OperationV128Add{Shape: ShapeI16x8}, + NewOperationV128Add(ShapeI16x8), ) case wasm.OpcodeVecI32x4Add: c.emit( - OperationV128Add{Shape: ShapeI32x4}, + NewOperationV128Add(ShapeI32x4), ) case wasm.OpcodeVecI64x2Add: c.emit( - OperationV128Add{Shape: ShapeI64x2}, + NewOperationV128Add(ShapeI64x2), ) case wasm.OpcodeVecF32x4Add: c.emit( - OperationV128Add{Shape: ShapeF32x4}, + NewOperationV128Add(ShapeF32x4), ) case wasm.OpcodeVecF64x2Add: c.emit( - OperationV128Add{Shape: ShapeF64x2}, + NewOperationV128Add(ShapeF64x2), ) case wasm.OpcodeVecI8x16Sub: c.emit( - OperationV128Sub{Shape: ShapeI8x16}, + NewOperationV128Sub(ShapeI8x16), ) case wasm.OpcodeVecI16x8Sub: c.emit( - OperationV128Sub{Shape: ShapeI16x8}, + NewOperationV128Sub(ShapeI16x8), ) case wasm.OpcodeVecI32x4Sub: c.emit( - OperationV128Sub{Shape: ShapeI32x4}, + NewOperationV128Sub(ShapeI32x4), ) case wasm.OpcodeVecI64x2Sub: c.emit( - OperationV128Sub{Shape: ShapeI64x2}, + NewOperationV128Sub(ShapeI64x2), ) case wasm.OpcodeVecF32x4Sub: c.emit( - OperationV128Sub{Shape: ShapeF32x4}, + NewOperationV128Sub(ShapeF32x4), ) case wasm.OpcodeVecF64x2Sub: c.emit( - OperationV128Sub{Shape: ShapeF64x2}, + NewOperationV128Sub(ShapeF64x2), ) case wasm.OpcodeVecI8x16AddSatS: c.emit( - OperationV128AddSat{Shape: ShapeI8x16, Signed: true}, + NewOperationV128AddSat(ShapeI8x16, true), ) case wasm.OpcodeVecI8x16AddSatU: c.emit( - OperationV128AddSat{Shape: ShapeI8x16, Signed: false}, + NewOperationV128AddSat(ShapeI8x16, false), ) case wasm.OpcodeVecI16x8AddSatS: c.emit( - OperationV128AddSat{Shape: ShapeI16x8, Signed: true}, + NewOperationV128AddSat(ShapeI16x8, true), ) case wasm.OpcodeVecI16x8AddSatU: c.emit( - OperationV128AddSat{Shape: ShapeI16x8, Signed: false}, + NewOperationV128AddSat(ShapeI16x8, false), ) case wasm.OpcodeVecI8x16SubSatS: c.emit( - OperationV128SubSat{Shape: ShapeI8x16, Signed: true}, + NewOperationV128SubSat(ShapeI8x16, true), ) case wasm.OpcodeVecI8x16SubSatU: c.emit( - OperationV128SubSat{Shape: ShapeI8x16, Signed: false}, + NewOperationV128SubSat(ShapeI8x16, false), ) case wasm.OpcodeVecI16x8SubSatS: c.emit( - OperationV128SubSat{Shape: ShapeI16x8, Signed: true}, + NewOperationV128SubSat(ShapeI16x8, true), ) case wasm.OpcodeVecI16x8SubSatU: c.emit( - OperationV128SubSat{Shape: ShapeI16x8, Signed: false}, + NewOperationV128SubSat(ShapeI16x8, false), ) case wasm.OpcodeVecI16x8Mul: c.emit( - OperationV128Mul{Shape: ShapeI16x8}, + NewOperationV128Mul(ShapeI16x8), ) case wasm.OpcodeVecI32x4Mul: c.emit( - OperationV128Mul{Shape: ShapeI32x4}, + NewOperationV128Mul(ShapeI32x4), ) case wasm.OpcodeVecI64x2Mul: c.emit( - OperationV128Mul{Shape: ShapeI64x2}, + NewOperationV128Mul(ShapeI64x2), ) case wasm.OpcodeVecF32x4Mul: c.emit( - OperationV128Mul{Shape: ShapeF32x4}, + NewOperationV128Mul(ShapeF32x4), ) case wasm.OpcodeVecF64x2Mul: c.emit( - OperationV128Mul{Shape: ShapeF64x2}, + NewOperationV128Mul(ShapeF64x2), ) case wasm.OpcodeVecF32x4Sqrt: c.emit( - OperationV128Sqrt{Shape: ShapeF32x4}, + NewOperationV128Sqrt(ShapeF32x4), ) case wasm.OpcodeVecF64x2Sqrt: c.emit( - OperationV128Sqrt{Shape: ShapeF64x2}, + NewOperationV128Sqrt(ShapeF64x2), ) case wasm.OpcodeVecF32x4Div: c.emit( - OperationV128Div{Shape: ShapeF32x4}, + NewOperationV128Div(ShapeF32x4), ) case wasm.OpcodeVecF64x2Div: c.emit( - OperationV128Div{Shape: ShapeF64x2}, + NewOperationV128Div(ShapeF64x2), ) case wasm.OpcodeVecI8x16Abs: c.emit( - OperationV128Abs{Shape: ShapeI8x16}, + NewOperationV128Abs(ShapeI8x16), ) case wasm.OpcodeVecI8x16Popcnt: c.emit( - OperationV128Popcnt{}, + NewOperationV128Popcnt(ShapeI8x16), ) case wasm.OpcodeVecI16x8Abs: c.emit( - OperationV128Abs{Shape: ShapeI16x8}, + NewOperationV128Abs(ShapeI16x8), ) case wasm.OpcodeVecI32x4Abs: c.emit( - OperationV128Abs{Shape: ShapeI32x4}, + NewOperationV128Abs(ShapeI32x4), ) case wasm.OpcodeVecI64x2Abs: c.emit( - OperationV128Abs{Shape: ShapeI64x2}, + NewOperationV128Abs(ShapeI64x2), ) case wasm.OpcodeVecF32x4Abs: c.emit( - OperationV128Abs{Shape: ShapeF32x4}, + NewOperationV128Abs(ShapeF32x4), ) case wasm.OpcodeVecF64x2Abs: c.emit( - OperationV128Abs{Shape: ShapeF64x2}, + NewOperationV128Abs(ShapeF64x2), ) case wasm.OpcodeVecI8x16MinS: c.emit( - OperationV128Min{Signed: true, Shape: ShapeI8x16}, + NewOperationV128Min(ShapeI8x16, true), ) case wasm.OpcodeVecI8x16MinU: c.emit( - OperationV128Min{Shape: ShapeI8x16}, + NewOperationV128Min(ShapeI8x16, false), ) case wasm.OpcodeVecI8x16MaxS: c.emit( - OperationV128Max{Shape: ShapeI8x16, Signed: true}, + NewOperationV128Max(ShapeI8x16, true), ) case wasm.OpcodeVecI8x16MaxU: c.emit( - OperationV128Max{Shape: ShapeI8x16}, + NewOperationV128Max(ShapeI8x16, false), ) case wasm.OpcodeVecI8x16AvgrU: c.emit( - OperationV128AvgrU{Shape: ShapeI8x16}, + NewOperationV128AvgrU(ShapeI8x16), ) case wasm.OpcodeVecI16x8MinS: c.emit( - OperationV128Min{Signed: true, Shape: ShapeI16x8}, + NewOperationV128Min(ShapeI16x8, true), ) case wasm.OpcodeVecI16x8MinU: c.emit( - OperationV128Min{Shape: ShapeI16x8}, + NewOperationV128Min(ShapeI16x8, false), ) case wasm.OpcodeVecI16x8MaxS: c.emit( - OperationV128Max{Shape: ShapeI16x8, Signed: true}, + NewOperationV128Max(ShapeI16x8, true), ) case wasm.OpcodeVecI16x8MaxU: c.emit( - OperationV128Max{Shape: ShapeI16x8}, + NewOperationV128Max(ShapeI16x8, false), ) case wasm.OpcodeVecI16x8AvgrU: c.emit( - OperationV128AvgrU{Shape: ShapeI16x8}, + NewOperationV128AvgrU(ShapeI16x8), ) case wasm.OpcodeVecI32x4MinS: c.emit( - OperationV128Min{Signed: true, Shape: ShapeI32x4}, + NewOperationV128Min(ShapeI32x4, true), ) case wasm.OpcodeVecI32x4MinU: c.emit( - OperationV128Min{Shape: ShapeI32x4}, + NewOperationV128Min(ShapeI32x4, false), ) case wasm.OpcodeVecI32x4MaxS: c.emit( - OperationV128Max{Shape: ShapeI32x4, Signed: true}, + NewOperationV128Max(ShapeI32x4, true), ) case wasm.OpcodeVecI32x4MaxU: c.emit( - OperationV128Max{Shape: ShapeI32x4}, + NewOperationV128Max(ShapeI32x4, false), ) case wasm.OpcodeVecF32x4Min: c.emit( - OperationV128Min{Shape: ShapeF32x4}, + NewOperationV128Min(ShapeF32x4, false), ) case wasm.OpcodeVecF32x4Max: c.emit( - OperationV128Max{Shape: ShapeF32x4}, + NewOperationV128Max(ShapeF32x4, false), ) case wasm.OpcodeVecF64x2Min: c.emit( - OperationV128Min{Shape: ShapeF64x2}, + NewOperationV128Min(ShapeF64x2, false), ) case wasm.OpcodeVecF64x2Max: c.emit( - OperationV128Max{Shape: ShapeF64x2}, + NewOperationV128Max(ShapeF64x2, false), ) case wasm.OpcodeVecF32x4Pmin: c.emit( - OperationV128Pmin{Shape: ShapeF32x4}, + NewOperationV128Pmin(ShapeF32x4), ) case wasm.OpcodeVecF32x4Pmax: c.emit( - OperationV128Pmax{Shape: ShapeF32x4}, + NewOperationV128Pmax(ShapeF32x4), ) case wasm.OpcodeVecF64x2Pmin: c.emit( - OperationV128Pmin{Shape: ShapeF64x2}, + NewOperationV128Pmin(ShapeF64x2), ) case wasm.OpcodeVecF64x2Pmax: c.emit( - OperationV128Pmax{Shape: ShapeF64x2}, + NewOperationV128Pmax(ShapeF64x2), ) case wasm.OpcodeVecF32x4Ceil: c.emit( - OperationV128Ceil{Shape: ShapeF32x4}, + NewOperationV128Ceil(ShapeF32x4), ) case wasm.OpcodeVecF32x4Floor: c.emit( - OperationV128Floor{Shape: ShapeF32x4}, + NewOperationV128Floor(ShapeF32x4), ) case wasm.OpcodeVecF32x4Trunc: c.emit( - OperationV128Trunc{Shape: ShapeF32x4}, + NewOperationV128Trunc(ShapeF32x4), ) case wasm.OpcodeVecF32x4Nearest: c.emit( - OperationV128Nearest{Shape: ShapeF32x4}, + NewOperationV128Nearest(ShapeF32x4), ) case wasm.OpcodeVecF64x2Ceil: c.emit( - OperationV128Ceil{Shape: ShapeF64x2}, + NewOperationV128Ceil(ShapeF64x2), ) case wasm.OpcodeVecF64x2Floor: c.emit( - OperationV128Floor{Shape: ShapeF64x2}, + NewOperationV128Floor(ShapeF64x2), ) case wasm.OpcodeVecF64x2Trunc: c.emit( - OperationV128Trunc{Shape: ShapeF64x2}, + NewOperationV128Trunc(ShapeF64x2), ) case wasm.OpcodeVecF64x2Nearest: c.emit( - OperationV128Nearest{Shape: ShapeF64x2}, + NewOperationV128Nearest(ShapeF64x2), ) case wasm.OpcodeVecI16x8ExtendLowI8x16S: c.emit( - OperationV128Extend{OriginShape: ShapeI8x16, Signed: true, UseLow: true}, + NewOperationV128Extend(ShapeI8x16, true, true), ) case wasm.OpcodeVecI16x8ExtendHighI8x16S: c.emit( - OperationV128Extend{OriginShape: ShapeI8x16, Signed: true, UseLow: false}, + NewOperationV128Extend(ShapeI8x16, true, false), ) case wasm.OpcodeVecI16x8ExtendLowI8x16U: c.emit( - OperationV128Extend{OriginShape: ShapeI8x16, Signed: false, UseLow: true}, + NewOperationV128Extend(ShapeI8x16, false, true), ) case wasm.OpcodeVecI16x8ExtendHighI8x16U: c.emit( - OperationV128Extend{OriginShape: ShapeI8x16, Signed: false, UseLow: false}, + NewOperationV128Extend(ShapeI8x16, false, false), ) case wasm.OpcodeVecI32x4ExtendLowI16x8S: c.emit( - OperationV128Extend{OriginShape: ShapeI16x8, Signed: true, UseLow: true}, + NewOperationV128Extend(ShapeI16x8, true, true), ) case wasm.OpcodeVecI32x4ExtendHighI16x8S: c.emit( - OperationV128Extend{OriginShape: ShapeI16x8, Signed: true, UseLow: false}, + NewOperationV128Extend(ShapeI16x8, true, false), ) case wasm.OpcodeVecI32x4ExtendLowI16x8U: c.emit( - OperationV128Extend{OriginShape: ShapeI16x8, Signed: false, UseLow: true}, + NewOperationV128Extend(ShapeI16x8, false, true), ) case wasm.OpcodeVecI32x4ExtendHighI16x8U: c.emit( - OperationV128Extend{OriginShape: ShapeI16x8, Signed: false, UseLow: false}, + NewOperationV128Extend(ShapeI16x8, false, false), ) case wasm.OpcodeVecI64x2ExtendLowI32x4S: c.emit( - OperationV128Extend{OriginShape: ShapeI32x4, Signed: true, UseLow: true}, + NewOperationV128Extend(ShapeI32x4, true, true), ) case wasm.OpcodeVecI64x2ExtendHighI32x4S: c.emit( - OperationV128Extend{OriginShape: ShapeI32x4, Signed: true, UseLow: false}, + NewOperationV128Extend(ShapeI32x4, true, false), ) case wasm.OpcodeVecI64x2ExtendLowI32x4U: c.emit( - OperationV128Extend{OriginShape: ShapeI32x4, Signed: false, UseLow: true}, + NewOperationV128Extend(ShapeI32x4, false, true), ) case wasm.OpcodeVecI64x2ExtendHighI32x4U: c.emit( - OperationV128Extend{OriginShape: ShapeI32x4, Signed: false, UseLow: false}, + NewOperationV128Extend(ShapeI32x4, false, false), ) case wasm.OpcodeVecI16x8Q15mulrSatS: c.emit( - OperationV128Q15mulrSatS{}, + NewOperationV128Q15mulrSatS(), ) case wasm.OpcodeVecI16x8ExtMulLowI8x16S: c.emit( - OperationV128ExtMul{OriginShape: ShapeI8x16, Signed: true, UseLow: true}, + NewOperationV128ExtMul(ShapeI8x16, true, true), ) case wasm.OpcodeVecI16x8ExtMulHighI8x16S: c.emit( - OperationV128ExtMul{OriginShape: ShapeI8x16, Signed: true, UseLow: false}, + NewOperationV128ExtMul(ShapeI8x16, true, false), ) case wasm.OpcodeVecI16x8ExtMulLowI8x16U: c.emit( - OperationV128ExtMul{OriginShape: ShapeI8x16, Signed: false, UseLow: true}, + NewOperationV128ExtMul(ShapeI8x16, false, true), ) case wasm.OpcodeVecI16x8ExtMulHighI8x16U: c.emit( - OperationV128ExtMul{OriginShape: ShapeI8x16, Signed: false, UseLow: false}, + NewOperationV128ExtMul(ShapeI8x16, false, false), ) case wasm.OpcodeVecI32x4ExtMulLowI16x8S: c.emit( - OperationV128ExtMul{OriginShape: ShapeI16x8, Signed: true, UseLow: true}, + NewOperationV128ExtMul(ShapeI16x8, true, true), ) case wasm.OpcodeVecI32x4ExtMulHighI16x8S: c.emit( - OperationV128ExtMul{OriginShape: ShapeI16x8, Signed: true, UseLow: false}, + NewOperationV128ExtMul(ShapeI16x8, true, false), ) case wasm.OpcodeVecI32x4ExtMulLowI16x8U: c.emit( - OperationV128ExtMul{OriginShape: ShapeI16x8, Signed: false, UseLow: true}, + NewOperationV128ExtMul(ShapeI16x8, false, true), ) case wasm.OpcodeVecI32x4ExtMulHighI16x8U: c.emit( - OperationV128ExtMul{OriginShape: ShapeI16x8, Signed: false, UseLow: false}, + NewOperationV128ExtMul(ShapeI16x8, false, false), ) case wasm.OpcodeVecI64x2ExtMulLowI32x4S: c.emit( - OperationV128ExtMul{OriginShape: ShapeI32x4, Signed: true, UseLow: true}, + NewOperationV128ExtMul(ShapeI32x4, true, true), ) case wasm.OpcodeVecI64x2ExtMulHighI32x4S: c.emit( - OperationV128ExtMul{OriginShape: ShapeI32x4, Signed: true, UseLow: false}, + NewOperationV128ExtMul(ShapeI32x4, true, false), ) case wasm.OpcodeVecI64x2ExtMulLowI32x4U: c.emit( - OperationV128ExtMul{OriginShape: ShapeI32x4, Signed: false, UseLow: true}, + NewOperationV128ExtMul(ShapeI32x4, false, true), ) case wasm.OpcodeVecI64x2ExtMulHighI32x4U: c.emit( - OperationV128ExtMul{OriginShape: ShapeI32x4, Signed: false, UseLow: false}, + NewOperationV128ExtMul(ShapeI32x4, false, false), ) case wasm.OpcodeVecI16x8ExtaddPairwiseI8x16S: c.emit( - OperationV128ExtAddPairwise{OriginShape: ShapeI8x16, Signed: true}, + NewOperationV128ExtAddPairwise(ShapeI8x16, true), ) case wasm.OpcodeVecI16x8ExtaddPairwiseI8x16U: c.emit( - OperationV128ExtAddPairwise{OriginShape: ShapeI8x16, Signed: false}, + NewOperationV128ExtAddPairwise(ShapeI8x16, false), ) case wasm.OpcodeVecI32x4ExtaddPairwiseI16x8S: c.emit( - OperationV128ExtAddPairwise{OriginShape: ShapeI16x8, Signed: true}, + NewOperationV128ExtAddPairwise(ShapeI16x8, true), ) case wasm.OpcodeVecI32x4ExtaddPairwiseI16x8U: c.emit( - OperationV128ExtAddPairwise{OriginShape: ShapeI16x8, Signed: false}, + NewOperationV128ExtAddPairwise(ShapeI16x8, false), ) case wasm.OpcodeVecF64x2PromoteLowF32x4Zero: c.emit( - OperationV128FloatPromote{}, + NewOperationV128FloatPromote(), ) case wasm.OpcodeVecF32x4DemoteF64x2Zero: c.emit( - OperationV128FloatDemote{}, + NewOperationV128FloatDemote(), ) case wasm.OpcodeVecF32x4ConvertI32x4S: c.emit( - OperationV128FConvertFromI{DestinationShape: ShapeF32x4, Signed: true}, + NewOperationV128FConvertFromI(ShapeF32x4, true), ) case wasm.OpcodeVecF32x4ConvertI32x4U: c.emit( - OperationV128FConvertFromI{DestinationShape: ShapeF32x4, Signed: false}, + NewOperationV128FConvertFromI(ShapeF32x4, false), ) case wasm.OpcodeVecF64x2ConvertLowI32x4S: c.emit( - OperationV128FConvertFromI{DestinationShape: ShapeF64x2, Signed: true}, + NewOperationV128FConvertFromI(ShapeF64x2, true), ) case wasm.OpcodeVecF64x2ConvertLowI32x4U: c.emit( - OperationV128FConvertFromI{DestinationShape: ShapeF64x2, Signed: false}, + NewOperationV128FConvertFromI(ShapeF64x2, false), ) case wasm.OpcodeVecI32x4DotI16x8S: c.emit( - OperationV128Dot{}, + NewOperationV128Dot(), ) case wasm.OpcodeVecI8x16NarrowI16x8S: c.emit( - OperationV128Narrow{OriginShape: ShapeI16x8, Signed: true}, + NewOperationV128Narrow(ShapeI16x8, true), ) case wasm.OpcodeVecI8x16NarrowI16x8U: c.emit( - OperationV128Narrow{OriginShape: ShapeI16x8, Signed: false}, + NewOperationV128Narrow(ShapeI16x8, false), ) case wasm.OpcodeVecI16x8NarrowI32x4S: c.emit( - OperationV128Narrow{OriginShape: ShapeI32x4, Signed: true}, + NewOperationV128Narrow(ShapeI32x4, true), ) case wasm.OpcodeVecI16x8NarrowI32x4U: c.emit( - OperationV128Narrow{OriginShape: ShapeI32x4, Signed: false}, + NewOperationV128Narrow(ShapeI32x4, false), ) case wasm.OpcodeVecI32x4TruncSatF32x4S: c.emit( - OperationV128ITruncSatFromF{OriginShape: ShapeF32x4, Signed: true}, + NewOperationV128ITruncSatFromF(ShapeF32x4, true), ) case wasm.OpcodeVecI32x4TruncSatF32x4U: c.emit( - OperationV128ITruncSatFromF{OriginShape: ShapeF32x4, Signed: false}, + NewOperationV128ITruncSatFromF(ShapeF32x4, false), ) case wasm.OpcodeVecI32x4TruncSatF64x2SZero: c.emit( - OperationV128ITruncSatFromF{OriginShape: ShapeF64x2, Signed: true}, + NewOperationV128ITruncSatFromF(ShapeF64x2, true), ) case wasm.OpcodeVecI32x4TruncSatF64x2UZero: c.emit( - OperationV128ITruncSatFromF{OriginShape: ShapeF64x2, Signed: false}, + NewOperationV128ITruncSatFromF(ShapeF64x2, false), ) default: return fmt.Errorf("unsupported vector instruction in wazeroir: %s", wasm.VectorInstructionName(vecOp)) @@ -3012,16 +3009,16 @@ func (c *compiler) stackPush(ts UnsignedType) { } // emit adds the operations into the result. -func (c *compiler) emit(ops ...Operation) { +func (c *compiler) emit(ops ...UnionOperation) { if !c.unreachableState.on { for _, op := range ops { - switch o := op.(type) { - case OperationDrop: + switch op.Kind { + case OperationKindDrop: // If the drop range is nil, // we could remove such operations. // That happens when drop operation is unnecessary. // i.e. when there's no need to adjust stack before jmp. - if o.Depth == nil { + if op.Rs[0] == nil { continue } } @@ -3051,7 +3048,7 @@ func (c *compiler) emitDefaultValue(t wasm.ValueType) { c.emit(NewOperationConstF64(0)) case wasm.ValueTypeV128: c.stackPush(UnsignedTypeV128) - c.emit(OperationV128Const{Hi: 0, Lo: 0}) + c.emit(NewOperationV128Const(0, 0)) } } diff --git a/internal/wazeroir/compiler_test.go b/internal/wazeroir/compiler_test.go index ffd46cd3..59298bef 100644 --- a/internal/wazeroir/compiler_test.go +++ b/internal/wazeroir/compiler_test.go @@ -48,10 +48,10 @@ func TestCompile(t *testing.T) { CodeSection: []wasm.Code{{Body: []byte{wasm.OpcodeEnd}}}, }, expected: &CompilationResult{ - Operations: []Operation{ // begin with params: [] - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + Operations: []UnionOperation{ // begin with params: [] + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, - LabelCallers: map[LabelID]uint32{}, + LabelCallers: map[Label]uint32{}, Functions: []uint32{0}, Types: []wasm.FunctionType{v_v}, Signature: &v_v, @@ -66,10 +66,10 @@ func TestCompile(t *testing.T) { CodeSection: []wasm.Code{{Body: []byte{wasm.OpcodeEnd}}}, }, expected: &CompilationResult{ - Operations: []Operation{ // begin with params: [] - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + Operations: []UnionOperation{ // begin with params: [] + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, - LabelCallers: map[LabelID]uint32{}, + LabelCallers: map[Label]uint32{}, Functions: []uint32{0}, Types: []wasm.FunctionType{v_v}, Signature: &v_v, @@ -102,12 +102,12 @@ func TestCompile(t *testing.T) { CodeSection: []wasm.Code{{Body: []byte{wasm.OpcodeLocalGet, 0, wasm.OpcodeEnd}}}, }, expected: &CompilationResult{ - Operations: []Operation{ // begin with params: [$x] - NewOperationPick(0, false), // [$x, $x] - OperationDrop{Depth: &InclusiveRange{Start: 1, End: 1}}, // [$x] - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + Operations: []UnionOperation{ // begin with params: [$x] + NewOperationPick(0, false), // [$x, $x] + NewOperationDrop(&InclusiveRange{Start: 1, End: 1}), // [$x] + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, - LabelCallers: map[LabelID]uint32{}, + LabelCallers: map[Label]uint32{}, Types: []wasm.FunctionType{ { Params: []wasm.ValueType{i32}, Results: []wasm.ValueType{i32}, @@ -137,13 +137,13 @@ func TestCompile(t *testing.T) { }}}, }, expected: &CompilationResult{ - Operations: []Operation{ // begin with params: [] + Operations: []UnionOperation{ // begin with params: [] NewOperationConstI32(8), // [8] NewOperationLoad(UnsignedTypeI32, MemoryArg{Alignment: 2, Offset: 0}), // [x] - OperationDrop{Depth: &InclusiveRange{}}, // [] - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{}), // [] + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, - LabelCallers: map[LabelID]uint32{}, + LabelCallers: map[Label]uint32{}, Types: []wasm.FunctionType{v_v}, Functions: []uint32{0}, Signature: &v_v, @@ -164,13 +164,13 @@ func TestCompile(t *testing.T) { }}}, }, expected: &CompilationResult{ - Operations: []Operation{ // begin with params: [] + Operations: []UnionOperation{ // begin with params: [] NewOperationConstI32(8), // [8] NewOperationLoad(UnsignedTypeI32, MemoryArg{Alignment: 2, Offset: 0}), // [x] - OperationDrop{Depth: &InclusiveRange{}}, // [] - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{}), // [] + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, - LabelCallers: map[LabelID]uint32{}, + LabelCallers: map[Label]uint32{}, Types: []wasm.FunctionType{v_v}, Functions: []uint32{0}, Signature: &v_v, @@ -188,13 +188,13 @@ func TestCompile(t *testing.T) { }}}, }, expected: &CompilationResult{ - Operations: []Operation{ // begin with params: [$delta] - NewOperationPick(0, false), // [$delta, $delta] - NewOperationMemoryGrow(), // [$delta, $old_size] - OperationDrop{Depth: &InclusiveRange{Start: 1, End: 1}}, // [$old_size] - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + Operations: []UnionOperation{ // begin with params: [$delta] + NewOperationPick(0, false), // [$delta, $delta] + NewOperationMemoryGrow(), // [$delta, $old_size] + NewOperationDrop(&InclusiveRange{Start: 1, End: 1}), // [$old_size] + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, - LabelCallers: map[LabelID]uint32{}, + LabelCallers: map[Label]uint32{}, Types: []wasm.FunctionType{{ Params: []wasm.ValueType{i32}, Results: []wasm.ValueType{i32}, ParamNumInUint64: 1, @@ -262,19 +262,15 @@ func TestCompile_Block(t *testing.T) { // Above set manually until the text compiler supports this: // (func (export "type-i32-i32") (block (drop (i32.add (br 0))))) expected: &CompilationResult{ - Operations: []Operation{ // begin with params: [] - OperationBr{ - Target: Label{FrameID: 2, Kind: LabelKindContinuation}, // arbitrary FrameID - }, - OperationLabel{ - Label: Label{FrameID: 2, Kind: LabelKindContinuation}, // arbitrary FrameID - }, - OperationBr{Target: Label{Kind: LabelKindReturn}}, + Operations: []UnionOperation{ // begin with params: [] + NewOperationBr(NewLabel(LabelKindContinuation, 2)), // arbitrary FrameID + NewOperationLabel(NewLabel(LabelKindContinuation, 2)), // arbitrary FrameID + NewOperationBr(NewLabel(LabelKindReturn, 0)), }, // Note: i32.add comes after br 0 so is unreachable. Compilation succeeds when it feels like it // shouldn't because the br instruction is stack-polymorphic. In other words, (br 0) substitutes for the // two i32 parameters to add. - LabelCallers: map[LabelID]uint32{Label{Kind: LabelKindContinuation, FrameID: 2}.ID(): 1}, + LabelCallers: map[Label]uint32{NewLabel(LabelKindContinuation, 2): 1}, Functions: []uint32{0}, Types: []wasm.FunctionType{v_v}, Signature: &v_v, @@ -343,18 +339,18 @@ func TestCompile_BulkMemoryOperations(t *testing.T) { } expected := &CompilationResult{ - Operations: []Operation{ // begin with params: [] - NewOperationConstI32(16), // [16] - NewOperationConstI32(0), // [16, 0] - NewOperationConstI32(7), // [16, 0, 7] - OperationMemoryInit{1}, // [] - OperationDataDrop{1}, // [] - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + Operations: []UnionOperation{ // begin with params: [] + NewOperationConstI32(16), // [16] + NewOperationConstI32(0), // [16, 0] + NewOperationConstI32(7), // [16, 0, 7] + NewOperationMemoryInit(1), // [] + NewOperationDataDrop(1), // [] + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, HasMemory: true, UsesMemory: true, HasDataInstances: true, - LabelCallers: map[LabelID]uint32{}, + LabelCallers: map[Label]uint32{}, Signature: &v_v, Functions: []wasm.Index{0}, Types: []wasm.FunctionType{v_v}, @@ -396,13 +392,13 @@ func TestCompile_MultiValue(t *testing.T) { }}}, }, expected: &CompilationResult{ - Operations: []Operation{ // begin with params: [$x, $y] - NewOperationPick(0, false), // [$x, $y, $y] - NewOperationPick(2, false), // [$x, $y, $y, $x] - OperationDrop{Depth: &InclusiveRange{Start: 2, End: 3}}, // [$y, $x] - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + Operations: []UnionOperation{ // begin with params: [$x, $y] + NewOperationPick(0, false), // [$x, $y, $y] + NewOperationPick(2, false), // [$x, $y, $y, $x] + NewOperationDrop(&InclusiveRange{Start: 2, End: 3}), // [$y, $x] + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, - LabelCallers: map[LabelID]uint32{}, + LabelCallers: map[Label]uint32{}, Signature: &i32i32_i32i32, Functions: []wasm.Index{0}, Types: []wasm.FunctionType{i32i32_i32i32}, @@ -432,20 +428,16 @@ func TestCompile_MultiValue(t *testing.T) { // ) // ) expected: &CompilationResult{ - Operations: []Operation{ // begin with params: [] - NewOperationConstF64(4), // [4] - NewOperationConstF64(5), // [4, 5] - OperationBr{ - Target: Label{FrameID: 2, Kind: LabelKindContinuation}, // arbitrary FrameID - }, - OperationLabel{ - Label: Label{FrameID: 2, Kind: LabelKindContinuation}, // arbitrary FrameID - }, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + Operations: []UnionOperation{ // begin with params: [] + NewOperationConstF64(4), // [4] + NewOperationConstF64(5), // [4, 5] + NewOperationBr(NewLabel(LabelKindContinuation, 2)), // arbitrary FrameID + NewOperationLabel(NewLabel(LabelKindContinuation, 2)), // arbitrary FrameID + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, // Note: f64.add comes after br 0 so is unreachable. This is why neither the add, nor its other operand // are in the above compilation result. - LabelCallers: map[LabelID]uint32{Label{Kind: LabelKindContinuation, FrameID: 2}.ID(): 1}, // arbitrary label + LabelCallers: map[Label]uint32{NewLabel(LabelKindContinuation, 2): 1}, // arbitrary label Signature: &v_f64f64, Functions: []wasm.Index{0}, Types: []wasm.FunctionType{v_f64f64}, @@ -463,12 +455,12 @@ func TestCompile_MultiValue(t *testing.T) { }}}, }, expected: &CompilationResult{ - Operations: []Operation{ // begin with params: [] - NewOperationConstI32(306), // [306] - NewOperationConstI64(356), // [306, 356] - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + Operations: []UnionOperation{ // begin with params: [] + NewOperationConstI32(306), // [306] + NewOperationConstI64(356), // [306, 356] + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, - LabelCallers: map[LabelID]uint32{}, + LabelCallers: map[Label]uint32{}, Signature: &_i32i64, Functions: []wasm.Index{0}, Types: []wasm.FunctionType{_i32i64}, @@ -498,29 +490,29 @@ func TestCompile_MultiValue(t *testing.T) { // ) // ) expected: &CompilationResult{ - Operations: []Operation{ // begin with params: [$0] + Operations: []UnionOperation{ // begin with params: [$0] NewOperationConstI32(1), // [$0, 1] NewOperationPick(1, false), // [$0, 1, $0] - OperationBrIf{ // [$0, 1] - Then: BranchTargetDrop{Target: Label{FrameID: 2, Kind: LabelKindHeader}}, - Else: BranchTargetDrop{Target: Label{FrameID: 2, Kind: LabelKindElse}}, - }, - OperationLabel{Label: Label{FrameID: 2, Kind: LabelKindHeader}}, + NewOperationBrIf( // [$0, 1] + BranchTargetDrop{Target: NewLabel(LabelKindHeader, 2)}, + BranchTargetDrop{Target: NewLabel(LabelKindElse, 2)}, + ), + NewOperationLabel(NewLabel(LabelKindHeader, 2)), NewOperationConstI32(2), // [$0, 1, 2] NewOperationAdd(UnsignedTypeI32), // [$0, 3] - OperationBr{Target: Label{FrameID: 2, Kind: LabelKindContinuation}}, - OperationLabel{Label: Label{FrameID: 2, Kind: LabelKindElse}}, + NewOperationBr(NewLabel(LabelKindContinuation, 2)), + NewOperationLabel(NewLabel(LabelKindElse, 2)), NewOperationConstI32(uint32(api.EncodeI32(-2))), // [$0, 1, -2] NewOperationAdd(UnsignedTypeI32), // [$0, -1] - OperationBr{Target: Label{FrameID: 2, Kind: LabelKindContinuation}}, - OperationLabel{Label: Label{FrameID: 2, Kind: LabelKindContinuation}}, - OperationDrop{Depth: &InclusiveRange{Start: 1, End: 1}}, // .L2 = [3], .L2_else = [-1] - OperationBr{Target: Label{Kind: LabelKindReturn}}, + NewOperationBr(NewLabel(LabelKindContinuation, 2)), + NewOperationLabel(NewLabel(LabelKindContinuation, 2)), + NewOperationDrop(&InclusiveRange{Start: 1, End: 1}), // .L2 = [3], .L2_else = [-1] + NewOperationBr(NewLabel(LabelKindReturn, 0)), }, - LabelCallers: map[LabelID]uint32{ - Label{Kind: LabelKindHeader, FrameID: 2}.ID(): 1, - Label{Kind: LabelKindContinuation, FrameID: 2}.ID(): 2, - Label{Kind: LabelKindElse, FrameID: 2}.ID(): 1, + LabelCallers: map[Label]uint32{ + NewLabel(LabelKindHeader, 2): 1, + NewLabel(LabelKindContinuation, 2): 2, + NewLabel(LabelKindElse, 2): 1, }, Signature: &i32_i32, Functions: []wasm.Index{0}, @@ -556,28 +548,28 @@ func TestCompile_MultiValue(t *testing.T) { // ) // ) expected: &CompilationResult{ - Operations: []Operation{ // begin with params: [$0] + Operations: []UnionOperation{ // begin with params: [$0] NewOperationConstI32(1), // [$0, 1] NewOperationConstI32(2), // [$0, 1, 2] NewOperationPick(2, false), // [$0, 1, 2, $0] - OperationBrIf{ // [$0, 1, 2] - Then: BranchTargetDrop{Target: Label{FrameID: 2, Kind: LabelKindHeader}}, - Else: BranchTargetDrop{Target: Label{FrameID: 2, Kind: LabelKindElse}}, - }, - OperationLabel{Label: Label{FrameID: 2, Kind: LabelKindHeader}}, + NewOperationBrIf( // [$0, 1, 2] + BranchTargetDrop{Target: NewLabel(LabelKindHeader, 2)}, + BranchTargetDrop{Target: NewLabel(LabelKindElse, 2)}, + ), + NewOperationLabel(NewLabel(LabelKindHeader, 2)), NewOperationAdd(UnsignedTypeI32), // [$0, 3] - OperationBr{Target: Label{FrameID: 2, Kind: LabelKindContinuation}}, - OperationLabel{Label: Label{FrameID: 2, Kind: LabelKindElse}}, + NewOperationBr(NewLabel(LabelKindContinuation, 2)), + NewOperationLabel(NewLabel(LabelKindElse, 2)), NewOperationSub(UnsignedTypeI32), // [$0, -1] - OperationBr{Target: Label{FrameID: 2, Kind: LabelKindContinuation}}, - OperationLabel{Label: Label{FrameID: 2, Kind: LabelKindContinuation}}, - OperationDrop{Depth: &InclusiveRange{Start: 1, End: 1}}, // .L2 = [3], .L2_else = [-1] - OperationBr{Target: Label{Kind: LabelKindReturn}}, + NewOperationBr(NewLabel(LabelKindContinuation, 2)), + NewOperationLabel(NewLabel(LabelKindContinuation, 2)), + NewOperationDrop(&InclusiveRange{Start: 1, End: 1}), // .L2 = [3], .L2_else = [-1] + NewOperationBr(NewLabel(LabelKindReturn, 0)), }, - LabelCallers: map[LabelID]uint32{ - Label{Kind: LabelKindHeader, FrameID: 2}.ID(): 1, - Label{Kind: LabelKindContinuation, FrameID: 2}.ID(): 2, - Label{Kind: LabelKindElse, FrameID: 2}.ID(): 1, + LabelCallers: map[Label]uint32{ + NewLabel(LabelKindHeader, 2): 1, + NewLabel(LabelKindContinuation, 2): 2, + NewLabel(LabelKindElse, 2): 1, }, Signature: &i32_i32, Functions: []wasm.Index{0}, @@ -613,28 +605,28 @@ func TestCompile_MultiValue(t *testing.T) { // ) // ) expected: &CompilationResult{ - Operations: []Operation{ // begin with params: [$0] + Operations: []UnionOperation{ // begin with params: [$0] NewOperationConstI32(1), // [$0, 1] NewOperationConstI32(2), // [$0, 1, 2] NewOperationPick(2, false), // [$0, 1, 2, $0] - OperationBrIf{ // [$0, 1, 2] - Then: BranchTargetDrop{Target: Label{FrameID: 2, Kind: LabelKindHeader}}, - Else: BranchTargetDrop{Target: Label{FrameID: 2, Kind: LabelKindElse}}, - }, - OperationLabel{Label: Label{FrameID: 2, Kind: LabelKindHeader}}, + NewOperationBrIf( // [$0, 1, 2] + BranchTargetDrop{Target: NewLabel(LabelKindHeader, 2)}, + BranchTargetDrop{Target: NewLabel(LabelKindElse, 2)}, + ), + NewOperationLabel(NewLabel(LabelKindHeader, 2)), NewOperationAdd(UnsignedTypeI32), // [$0, 3] - OperationBr{Target: Label{FrameID: 2, Kind: LabelKindContinuation}}, - OperationLabel{Label: Label{FrameID: 2, Kind: LabelKindElse}}, + NewOperationBr(NewLabel(LabelKindContinuation, 2)), + NewOperationLabel(NewLabel(LabelKindElse, 2)), NewOperationSub(UnsignedTypeI32), // [$0, -1] - OperationBr{Target: Label{FrameID: 2, Kind: LabelKindContinuation}}, - OperationLabel{Label: Label{FrameID: 2, Kind: LabelKindContinuation}}, - OperationDrop{Depth: &InclusiveRange{Start: 1, End: 1}}, // .L2 = [3], .L2_else = [-1] - OperationBr{Target: Label{Kind: LabelKindReturn}}, + NewOperationBr(NewLabel(LabelKindContinuation, 2)), + NewOperationLabel(NewLabel(LabelKindContinuation, 2)), + NewOperationDrop(&InclusiveRange{Start: 1, End: 1}), // .L2 = [3], .L2_else = [-1] + NewOperationBr(NewLabel(LabelKindReturn, 0)), }, - LabelCallers: map[LabelID]uint32{ - Label{Kind: LabelKindHeader, FrameID: 2}.ID(): 1, - Label{Kind: LabelKindContinuation, FrameID: 2}.ID(): 2, - Label{Kind: LabelKindElse, FrameID: 2}.ID(): 1, + LabelCallers: map[Label]uint32{ + NewLabel(LabelKindHeader, 2): 1, + NewLabel(LabelKindContinuation, 2): 2, + NewLabel(LabelKindElse, 2): 1, }, Signature: &i32_i32, Functions: []wasm.Index{0}, @@ -674,17 +666,17 @@ func TestCompile_NonTrappingFloatToIntConversion(t *testing.T) { } expected := &CompilationResult{ - Operations: []Operation{ // begin with params: [$0] + Operations: []UnionOperation{ // begin with params: [$0] NewOperationPick(0, false), // [$0, $0] - OperationITruncFromF{ // [$0, i32.trunc_sat_f32_s($0)] - InputType: Float32, - OutputType: SignedInt32, - NonTrapping: true, - }, - OperationDrop{Depth: &InclusiveRange{Start: 1, End: 1}}, // [i32.trunc_sat_f32_s($0)] - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationITruncFromF( // [$0, i32.trunc_sat_f32_s($0)] + Float32, + SignedInt32, + true, + ), + NewOperationDrop(&InclusiveRange{Start: 1, End: 1}), // [i32.trunc_sat_f32_s($0)] + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, - LabelCallers: map[LabelID]uint32{}, + LabelCallers: map[Label]uint32{}, Signature: &f32_i32, Functions: []wasm.Index{0}, Types: []wasm.FunctionType{f32_i32}, @@ -709,13 +701,13 @@ func TestCompile_SignExtensionOps(t *testing.T) { } expected := &CompilationResult{ - Operations: []Operation{ // begin with params: [$0] - NewOperationPick(0, false), // [$0, $0] - NewOperationSignExtend32From8(), // [$0, i32.extend8_s($0)] - OperationDrop{Depth: &InclusiveRange{Start: 1, End: 1}}, // [i32.extend8_s($0)] - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + Operations: []UnionOperation{ // begin with params: [$0] + NewOperationPick(0, false), // [$0, $0] + NewOperationSignExtend32From8(), // [$0, i32.extend8_s($0)] + NewOperationDrop(&InclusiveRange{Start: 1, End: 1}), // [i32.extend8_s($0)] + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, - LabelCallers: map[LabelID]uint32{}, + LabelCallers: map[Label]uint32{}, Signature: &i32_i32, Functions: []wasm.Index{0}, Types: []wasm.FunctionType{i32_i32}, @@ -760,13 +752,13 @@ func TestCompile_CallIndirectNonZeroTableIndex(t *testing.T) { } expected := &CompilationResult{ - Operations: []Operation{ // begin with params: [] + Operations: []UnionOperation{ // begin with params: [] NewOperationConstI32(0), NewOperationCallIndirect(2, 5), - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, HasTable: true, - LabelCallers: map[LabelID]uint32{}, + LabelCallers: map[Label]uint32{}, Signature: &v_v, Functions: []wasm.Index{0}, TableTypes: []wasm.RefType{ @@ -784,7 +776,7 @@ func TestCompile_Refs(t *testing.T) { tests := []struct { name string body []byte - expected []Operation + expected []UnionOperation }{ { name: "ref.func", @@ -793,10 +785,10 @@ func TestCompile_Refs(t *testing.T) { wasm.OpcodeDrop, wasm.OpcodeEnd, }, - expected: []Operation{ - OperationRefFunc{FunctionIndex: 100}, - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 0}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + expected: []UnionOperation{ + NewOperationRefFunc(100), + NewOperationDrop(&InclusiveRange{Start: 0, End: 0}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -806,10 +798,10 @@ func TestCompile_Refs(t *testing.T) { wasm.OpcodeDrop, wasm.OpcodeEnd, }, - expected: []Operation{ + expected: []UnionOperation{ NewOperationConstI64(0), - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 0}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{Start: 0, End: 0}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -819,10 +811,10 @@ func TestCompile_Refs(t *testing.T) { wasm.OpcodeDrop, wasm.OpcodeEnd, }, - expected: []Operation{ + expected: []UnionOperation{ NewOperationConstI64(0), - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 0}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{Start: 0, End: 0}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -833,11 +825,11 @@ func TestCompile_Refs(t *testing.T) { wasm.OpcodeDrop, wasm.OpcodeEnd, }, - expected: []Operation{ - OperationRefFunc{FunctionIndex: 100}, + expected: []UnionOperation{ + NewOperationRefFunc(100), NewOperationEqz(UnsignedInt64), - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 0}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{Start: 0, End: 0}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -848,11 +840,11 @@ func TestCompile_Refs(t *testing.T) { wasm.OpcodeDrop, wasm.OpcodeEnd, }, - expected: []Operation{ + expected: []UnionOperation{ NewOperationConstI64(0), NewOperationEqz(UnsignedInt64), - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 0}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{Start: 0, End: 0}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, } @@ -876,7 +868,7 @@ func TestCompile_TableGetOrSet(t *testing.T) { tests := []struct { name string body []byte - expected []Operation + expected []UnionOperation }{ { name: "table.get", @@ -886,11 +878,11 @@ func TestCompile_TableGetOrSet(t *testing.T) { wasm.OpcodeDrop, wasm.OpcodeEnd, }, - expected: []Operation{ + expected: []UnionOperation{ NewOperationConstI32(10), - OperationTableGet{TableIndex: 0}, - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 0}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationTableGet(0), + NewOperationDrop(&InclusiveRange{Start: 0, End: 0}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -901,11 +893,11 @@ func TestCompile_TableGetOrSet(t *testing.T) { wasm.OpcodeTableSet, 0, wasm.OpcodeEnd, }, - expected: []Operation{ + expected: []UnionOperation{ NewOperationConstI32(10), NewOperationConstI64(0), - OperationTableSet{TableIndex: 0}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationTableSet(0), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -916,11 +908,11 @@ func TestCompile_TableGetOrSet(t *testing.T) { wasm.OpcodeTableSet, 0, wasm.OpcodeEnd, }, - expected: []Operation{ + expected: []UnionOperation{ NewOperationConstI32(10), - OperationRefFunc{FunctionIndex: 1}, - OperationTableSet{TableIndex: 0}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationRefFunc(1), + NewOperationTableSet(0), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, } @@ -945,7 +937,7 @@ func TestCompile_TableGrowFillSize(t *testing.T) { tests := []struct { name string body []byte - expected []Operation + expected []UnionOperation }{ { name: "table.grow", @@ -955,12 +947,12 @@ func TestCompile_TableGrowFillSize(t *testing.T) { wasm.OpcodeMiscPrefix, wasm.OpcodeMiscTableGrow, 1, wasm.OpcodeEnd, }, - expected: []Operation{ + expected: []UnionOperation{ NewOperationConstI64(0), // Null ref. NewOperationConstI32(1), - OperationTableGrow{TableIndex: 1}, - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 0}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationTableGrow(1), + NewOperationDrop(&InclusiveRange{Start: 0, End: 0}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -972,12 +964,12 @@ func TestCompile_TableGrowFillSize(t *testing.T) { wasm.OpcodeMiscPrefix, wasm.OpcodeMiscTableFill, 1, wasm.OpcodeEnd, }, - expected: []Operation{ + expected: []UnionOperation{ NewOperationConstI32(10), NewOperationConstI64(0), // Null ref. NewOperationConstI32(1), - OperationTableFill{TableIndex: 1}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationTableFill(1), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -986,10 +978,10 @@ func TestCompile_TableGrowFillSize(t *testing.T) { wasm.OpcodeMiscPrefix, wasm.OpcodeMiscTableSize, 1, wasm.OpcodeEnd, }, - expected: []Operation{ - OperationTableSize{TableIndex: 1}, - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 0}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + expected: []UnionOperation{ + NewOperationTableSize(1), + NewOperationDrop(&InclusiveRange{Start: 0, End: 0}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, } @@ -1015,7 +1007,7 @@ func TestCompile_Locals(t *testing.T) { tests := []struct { name string mod *wasm.Module - expected []Operation + expected []UnionOperation }{ { name: "local.get - func param - v128", @@ -1027,10 +1019,10 @@ func TestCompile_Locals(t *testing.T) { wasm.OpcodeEnd, }}}, }, - expected: []Operation{ + expected: []UnionOperation{ NewOperationPick(1, true), // [param[0].low, param[0].high] -> [param[0].low, param[0].high, param[0].low, param[0].high] - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 3}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{Start: 0, End: 3}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -1043,10 +1035,10 @@ func TestCompile_Locals(t *testing.T) { wasm.OpcodeEnd, }}}, }, - expected: []Operation{ + expected: []UnionOperation{ NewOperationPick(0, false), // [param[0]] -> [param[0], param[0]] - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 1}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{Start: 0, End: 1}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -1062,11 +1054,11 @@ func TestCompile_Locals(t *testing.T) { LocalTypes: []wasm.ValueType{wasm.ValueTypeV128}, }}, }, - expected: []Operation{ - OperationV128Const{Lo: 0, Hi: 0}, + expected: []UnionOperation{ + NewOperationV128Const(0, 0), NewOperationPick(1, true), // [p[0].low, p[0].high] -> [p[0].low, p[0].high, p[0].low, p[0].high] - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 3}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{Start: 0, End: 3}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -1082,13 +1074,13 @@ func TestCompile_Locals(t *testing.T) { wasm.OpcodeEnd, }}}, }, - expected: []Operation{ + expected: []UnionOperation{ // [p[0].lo, p[1].hi] -> [p[0].lo, p[1].hi, 0x01, 0x02] - OperationV128Const{Lo: 0x01, Hi: 0x02}, + NewOperationV128Const(0x01, 0x02), // [p[0].lo, p[1].hi, 0x01, 0x02] -> [0x01, 0x02] NewOperationSet(3, true), - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 1}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{Start: 0, End: 1}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -1102,11 +1094,11 @@ func TestCompile_Locals(t *testing.T) { wasm.OpcodeEnd, }}}, }, - expected: []Operation{ + expected: []UnionOperation{ NewOperationConstI32(0x1), NewOperationSet(1, false), - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 0}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{Start: 0, End: 0}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -1125,14 +1117,14 @@ func TestCompile_Locals(t *testing.T) { LocalTypes: []wasm.ValueType{wasm.ValueTypeV128}, }}, }, - expected: []Operation{ - OperationV128Const{Lo: 0, Hi: 0}, + expected: []UnionOperation{ + NewOperationV128Const(0, 0), // [p[0].lo, p[1].hi] -> [p[0].lo, p[1].hi, 0x01, 0x02] - OperationV128Const{Lo: 0x01, Hi: 0x02}, + NewOperationV128Const(0x01, 0x02), // [p[0].lo, p[1].hi, 0x01, 0x02] -> [0x01, 0x02] NewOperationSet(3, true), - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 1}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{Start: 0, End: 1}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -1148,15 +1140,15 @@ func TestCompile_Locals(t *testing.T) { wasm.OpcodeEnd, }}}, }, - expected: []Operation{ + expected: []UnionOperation{ // [p[0].lo, p[1].hi] -> [p[0].lo, p[1].hi, 0x01, 0x02] - OperationV128Const{Lo: 0x01, Hi: 0x02}, + NewOperationV128Const(0x01, 0x02), // [p[0].lo, p[1].hi, 0x01, 0x02] -> [p[0].lo, p[1].hi, 0x01, 0x02, 0x01, 0x02] NewOperationPick(1, true), // [p[0].lo, p[1].hi, 0x01, 0x02, 0x01, 0x02] -> [0x01, 0x02, 0x01, 0x02] NewOperationSet(5, true), - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 3}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{Start: 0, End: 3}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -1170,12 +1162,12 @@ func TestCompile_Locals(t *testing.T) { wasm.OpcodeEnd, }}}, }, - expected: []Operation{ + expected: []UnionOperation{ NewOperationConstF32(math.Float32frombits(1)), NewOperationPick(0, false), NewOperationSet(2, false), - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 1}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{Start: 0, End: 1}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, { @@ -1194,16 +1186,16 @@ func TestCompile_Locals(t *testing.T) { LocalTypes: []wasm.ValueType{wasm.ValueTypeV128}, }}, }, - expected: []Operation{ - OperationV128Const{Lo: 0, Hi: 0}, + expected: []UnionOperation{ + NewOperationV128Const(0, 0), // [p[0].lo, p[1].hi] -> [p[0].lo, p[1].hi, 0x01, 0x02] - OperationV128Const{Lo: 0x01, Hi: 0x02}, + NewOperationV128Const(0x01, 0x02), // [p[0].lo, p[1].hi, 0x01, 0x02] -> [p[0].lo, p[1].hi, 0x01, 0x02, 0x01, 0x02] NewOperationPick(1, true), // [p[0].lo, p[1].hi, 0x01, 0x02, 0x01, 0x2] -> [0x01, 0x02, 0x01, 0x02] NewOperationSet(5, true), - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 3}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, // return! + NewOperationDrop(&InclusiveRange{Start: 0, End: 3}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), // return! }, }, } @@ -1320,176 +1312,176 @@ func TestCompile_Vec(t *testing.T) { tests := []struct { name string body []byte - expected Operation + expected UnionOperation needDropBeforeReturn bool }{ { name: "i8x16 add", needDropBeforeReturn: true, body: vv2v(wasm.OpcodeVecI8x16Add), - expected: OperationV128Add{Shape: ShapeI8x16}, + expected: NewOperationV128Add(ShapeI8x16), }, { name: "i8x16 add", needDropBeforeReturn: true, body: vv2v(wasm.OpcodeVecI16x8Add), - expected: OperationV128Add{Shape: ShapeI16x8}, + expected: NewOperationV128Add(ShapeI16x8), }, { name: "i32x2 add", needDropBeforeReturn: true, body: vv2v(wasm.OpcodeVecI64x2Add), - expected: OperationV128Add{Shape: ShapeI64x2}, + expected: NewOperationV128Add(ShapeI64x2), }, { name: "i64x2 add", needDropBeforeReturn: true, body: vv2v(wasm.OpcodeVecI64x2Add), - expected: OperationV128Add{Shape: ShapeI64x2}, + expected: NewOperationV128Add(ShapeI64x2), }, { name: "i8x16 sub", needDropBeforeReturn: true, body: vv2v(wasm.OpcodeVecI8x16Sub), - expected: OperationV128Sub{Shape: ShapeI8x16}, + expected: NewOperationV128Sub(ShapeI8x16), }, { name: "i16x8 sub", needDropBeforeReturn: true, body: vv2v(wasm.OpcodeVecI16x8Sub), - expected: OperationV128Sub{Shape: ShapeI16x8}, + expected: NewOperationV128Sub(ShapeI16x8), }, { name: "i32x2 sub", needDropBeforeReturn: true, body: vv2v(wasm.OpcodeVecI64x2Sub), - expected: OperationV128Sub{Shape: ShapeI64x2}, + expected: NewOperationV128Sub(ShapeI64x2), }, { name: "i64x2 sub", needDropBeforeReturn: true, body: vv2v(wasm.OpcodeVecI64x2Sub), - expected: OperationV128Sub{Shape: ShapeI64x2}, + expected: NewOperationV128Sub(ShapeI64x2), }, { name: wasm.OpcodeVecV128LoadName, body: load(wasm.OpcodeVecV128Load, 0, 0), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType128, Arg: MemoryArg{Alignment: 0, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType128, MemoryArg{Alignment: 0, Offset: 0}), }, { name: wasm.OpcodeVecV128LoadName + "/align=4", body: load(wasm.OpcodeVecV128Load, 0, 4), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType128, Arg: MemoryArg{Alignment: 4, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType128, MemoryArg{Alignment: 4, Offset: 0}), }, { name: wasm.OpcodeVecV128Load8x8SName, body: load(wasm.OpcodeVecV128Load8x8s, 1, 0), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType8x8s, Arg: MemoryArg{Alignment: 0, Offset: 1}}, + expected: NewOperationV128Load(V128LoadType8x8s, MemoryArg{Alignment: 0, Offset: 1}), }, { name: wasm.OpcodeVecV128Load8x8SName + "/align=1", body: load(wasm.OpcodeVecV128Load8x8s, 0, 1), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType8x8s, Arg: MemoryArg{Alignment: 1, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType8x8s, MemoryArg{Alignment: 1, Offset: 0}), }, { name: wasm.OpcodeVecV128Load8x8UName, body: load(wasm.OpcodeVecV128Load8x8u, 0, 0), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType8x8u, Arg: MemoryArg{Alignment: 0, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType8x8u, MemoryArg{Alignment: 0, Offset: 0}), }, { name: wasm.OpcodeVecV128Load8x8UName + "/align=1", body: load(wasm.OpcodeVecV128Load8x8u, 0, 1), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType8x8u, Arg: MemoryArg{Alignment: 1, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType8x8u, MemoryArg{Alignment: 1, Offset: 0}), }, { name: wasm.OpcodeVecV128Load16x4SName, body: load(wasm.OpcodeVecV128Load16x4s, 1, 0), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType16x4s, Arg: MemoryArg{Alignment: 0, Offset: 1}}, + expected: NewOperationV128Load(V128LoadType16x4s, MemoryArg{Alignment: 0, Offset: 1}), }, { name: wasm.OpcodeVecV128Load16x4SName + "/align=2", body: load(wasm.OpcodeVecV128Load16x4s, 0, 2), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType16x4s, Arg: MemoryArg{Alignment: 2, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType16x4s, MemoryArg{Alignment: 2, Offset: 0}), }, { name: wasm.OpcodeVecV128Load16x4UName, body: load(wasm.OpcodeVecV128Load16x4u, 0, 0), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType16x4u, Arg: MemoryArg{Alignment: 0, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType16x4u, MemoryArg{Alignment: 0, Offset: 0}), }, { name: wasm.OpcodeVecV128Load16x4UName + "/align=2", body: load(wasm.OpcodeVecV128Load16x4u, 0, 2), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType16x4u, Arg: MemoryArg{Alignment: 2, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType16x4u, MemoryArg{Alignment: 2, Offset: 0}), }, { name: wasm.OpcodeVecV128Load32x2SName, body: load(wasm.OpcodeVecV128Load32x2s, 1, 0), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType32x2s, Arg: MemoryArg{Alignment: 0, Offset: 1}}, + expected: NewOperationV128Load(V128LoadType32x2s, MemoryArg{Alignment: 0, Offset: 1}), }, { name: wasm.OpcodeVecV128Load32x2SName + "/align=3", body: load(wasm.OpcodeVecV128Load32x2s, 0, 3), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType32x2s, Arg: MemoryArg{Alignment: 3, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType32x2s, MemoryArg{Alignment: 3, Offset: 0}), }, { name: wasm.OpcodeVecV128Load32x2UName, body: load(wasm.OpcodeVecV128Load32x2u, 0, 0), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType32x2u, Arg: MemoryArg{Alignment: 0, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType32x2u, MemoryArg{Alignment: 0, Offset: 0}), }, { name: wasm.OpcodeVecV128Load32x2UName + "/align=3", body: load(wasm.OpcodeVecV128Load32x2u, 0, 3), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType32x2u, Arg: MemoryArg{Alignment: 3, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType32x2u, MemoryArg{Alignment: 3, Offset: 0}), }, { name: wasm.OpcodeVecV128Load8SplatName, body: load(wasm.OpcodeVecV128Load8Splat, 2, 0), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType8Splat, Arg: MemoryArg{Alignment: 0, Offset: 2}}, + expected: NewOperationV128Load(V128LoadType8Splat, MemoryArg{Alignment: 0, Offset: 2}), }, { name: wasm.OpcodeVecV128Load16SplatName, body: load(wasm.OpcodeVecV128Load16Splat, 0, 1), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType16Splat, Arg: MemoryArg{Alignment: 1, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType16Splat, MemoryArg{Alignment: 1, Offset: 0}), }, { name: wasm.OpcodeVecV128Load32SplatName, body: load(wasm.OpcodeVecV128Load32Splat, 3, 2), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType32Splat, Arg: MemoryArg{Alignment: 2, Offset: 3}}, + expected: NewOperationV128Load(V128LoadType32Splat, MemoryArg{Alignment: 2, Offset: 3}), }, { name: wasm.OpcodeVecV128Load64SplatName, body: load(wasm.OpcodeVecV128Load64Splat, 0, 3), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType64Splat, Arg: MemoryArg{Alignment: 3, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType64Splat, MemoryArg{Alignment: 3, Offset: 0}), }, { name: wasm.OpcodeVecV128Load32zeroName, body: load(wasm.OpcodeVecV128Load32zero, 0, 2), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType32zero, Arg: MemoryArg{Alignment: 2, Offset: 0}}, + expected: NewOperationV128Load(V128LoadType32zero, MemoryArg{Alignment: 2, Offset: 0}), }, { name: wasm.OpcodeVecV128Load64zeroName, body: load(wasm.OpcodeVecV128Load64zero, 5, 3), needDropBeforeReturn: true, - expected: OperationV128Load{Type: V128LoadType64zero, Arg: MemoryArg{Alignment: 3, Offset: 5}}, + expected: NewOperationV128Load(V128LoadType64zero, MemoryArg{Alignment: 3, Offset: 5}), }, { name: wasm.OpcodeVecV128Load8LaneName, needDropBeforeReturn: true, body: loadLane(wasm.OpcodeVecV128Load8Lane, 5, 0, 10), - expected: OperationV128LoadLane{LaneIndex: 10, LaneSize: 8, Arg: MemoryArg{Alignment: 0, Offset: 5}}, + expected: NewOperationV128LoadLane(10, 8, MemoryArg{Alignment: 0, Offset: 5}), }, { name: wasm.OpcodeVecV128Load16LaneName, needDropBeforeReturn: true, body: loadLane(wasm.OpcodeVecV128Load16Lane, 100, 1, 7), - expected: OperationV128LoadLane{LaneIndex: 7, LaneSize: 16, Arg: MemoryArg{Alignment: 1, Offset: 100}}, + expected: NewOperationV128LoadLane(7, 16, MemoryArg{Alignment: 1, Offset: 100}), }, { name: wasm.OpcodeVecV128Load32LaneName, needDropBeforeReturn: true, body: loadLane(wasm.OpcodeVecV128Load32Lane, 0, 2, 3), - expected: OperationV128LoadLane{LaneIndex: 3, LaneSize: 32, Arg: MemoryArg{Alignment: 2, Offset: 0}}, + expected: NewOperationV128LoadLane(3, 32, MemoryArg{Alignment: 2, Offset: 0}), }, { name: wasm.OpcodeVecV128Load64LaneName, needDropBeforeReturn: true, body: loadLane(wasm.OpcodeVecV128Load64Lane, 0, 3, 1), - expected: OperationV128LoadLane{LaneIndex: 1, LaneSize: 64, Arg: MemoryArg{Alignment: 3, Offset: 0}}, + expected: NewOperationV128LoadLane(1, 64, MemoryArg{Alignment: 3, Offset: 0}), }, { name: wasm.OpcodeVecV128StoreName, body: []byte{ @@ -1505,250 +1497,250 @@ func TestCompile_Vec(t *testing.T) { 10, // offset wasm.OpcodeEnd, }, - expected: OperationV128Store{Arg: MemoryArg{Alignment: 4, Offset: 10}}, + expected: NewOperationV128Store(MemoryArg{Alignment: 4, Offset: 10}), }, { name: wasm.OpcodeVecV128Store8LaneName, body: storeLane(wasm.OpcodeVecV128Store8Lane, 0, 0, 0), - expected: OperationV128StoreLane{LaneIndex: 0, LaneSize: 8, Arg: MemoryArg{Alignment: 0, Offset: 0}}, + expected: NewOperationV128StoreLane(0, 8, MemoryArg{Alignment: 0, Offset: 0}), }, { name: wasm.OpcodeVecV128Store8LaneName + "/lane=15", body: storeLane(wasm.OpcodeVecV128Store8Lane, 100, 0, 15), - expected: OperationV128StoreLane{LaneIndex: 15, LaneSize: 8, Arg: MemoryArg{Alignment: 0, Offset: 100}}, + expected: NewOperationV128StoreLane(15, 8, MemoryArg{Alignment: 0, Offset: 100}), }, { name: wasm.OpcodeVecV128Store16LaneName, body: storeLane(wasm.OpcodeVecV128Store16Lane, 0, 0, 0), - expected: OperationV128StoreLane{LaneIndex: 0, LaneSize: 16, Arg: MemoryArg{Alignment: 0, Offset: 0}}, + expected: NewOperationV128StoreLane(0, 16, MemoryArg{Alignment: 0, Offset: 0}), }, { name: wasm.OpcodeVecV128Store16LaneName + "/lane=7/align=1", body: storeLane(wasm.OpcodeVecV128Store16Lane, 100, 1, 7), - expected: OperationV128StoreLane{LaneIndex: 7, LaneSize: 16, Arg: MemoryArg{Alignment: 1, Offset: 100}}, + expected: NewOperationV128StoreLane(7, 16, MemoryArg{Alignment: 1, Offset: 100}), }, { name: wasm.OpcodeVecV128Store32LaneName, body: storeLane(wasm.OpcodeVecV128Store32Lane, 0, 0, 0), - expected: OperationV128StoreLane{LaneIndex: 0, LaneSize: 32, Arg: MemoryArg{Alignment: 0, Offset: 0}}, + expected: NewOperationV128StoreLane(0, 32, MemoryArg{Alignment: 0, Offset: 0}), }, { name: wasm.OpcodeVecV128Store32LaneName + "/lane=3/align=2", body: storeLane(wasm.OpcodeVecV128Store32Lane, 100, 2, 3), - expected: OperationV128StoreLane{LaneIndex: 3, LaneSize: 32, Arg: MemoryArg{Alignment: 2, Offset: 100}}, + expected: NewOperationV128StoreLane(3, 32, MemoryArg{Alignment: 2, Offset: 100}), }, { name: wasm.OpcodeVecV128Store64LaneName, body: storeLane(wasm.OpcodeVecV128Store64Lane, 0, 0, 0), - expected: OperationV128StoreLane{LaneIndex: 0, LaneSize: 64, Arg: MemoryArg{Alignment: 0, Offset: 0}}, + expected: NewOperationV128StoreLane(0, 64, MemoryArg{Alignment: 0, Offset: 0}), }, { name: wasm.OpcodeVecV128Store64LaneName + "/lane=1/align=3", body: storeLane(wasm.OpcodeVecV128Store64Lane, 50, 3, 1), - expected: OperationV128StoreLane{LaneIndex: 1, LaneSize: 64, Arg: MemoryArg{Alignment: 3, Offset: 50}}, + expected: NewOperationV128StoreLane(1, 64, MemoryArg{Alignment: 3, Offset: 50}), }, { name: wasm.OpcodeVecI8x16ExtractLaneSName, body: extractLane(wasm.OpcodeVecI8x16ExtractLaneS, 0), - expected: OperationV128ExtractLane{LaneIndex: 0, Signed: true, Shape: ShapeI8x16}, + expected: NewOperationV128ExtractLane(0, true, ShapeI8x16), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI8x16ExtractLaneSName + "/lane=15", body: extractLane(wasm.OpcodeVecI8x16ExtractLaneS, 15), - expected: OperationV128ExtractLane{LaneIndex: 15, Signed: true, Shape: ShapeI8x16}, + expected: NewOperationV128ExtractLane(15, true, ShapeI8x16), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI8x16ExtractLaneUName, body: extractLane(wasm.OpcodeVecI8x16ExtractLaneU, 0), - expected: OperationV128ExtractLane{LaneIndex: 0, Signed: false, Shape: ShapeI8x16}, + expected: NewOperationV128ExtractLane(0, false, ShapeI8x16), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI8x16ExtractLaneUName + "/lane=15", body: extractLane(wasm.OpcodeVecI8x16ExtractLaneU, 15), - expected: OperationV128ExtractLane{LaneIndex: 15, Signed: false, Shape: ShapeI8x16}, + expected: NewOperationV128ExtractLane(15, false, ShapeI8x16), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI16x8ExtractLaneSName, body: extractLane(wasm.OpcodeVecI16x8ExtractLaneS, 0), - expected: OperationV128ExtractLane{LaneIndex: 0, Signed: true, Shape: ShapeI16x8}, + expected: NewOperationV128ExtractLane(0, true, ShapeI16x8), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI16x8ExtractLaneSName + "/lane=7", body: extractLane(wasm.OpcodeVecI16x8ExtractLaneS, 7), - expected: OperationV128ExtractLane{LaneIndex: 7, Signed: true, Shape: ShapeI16x8}, + expected: NewOperationV128ExtractLane(7, true, ShapeI16x8), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI16x8ExtractLaneUName, body: extractLane(wasm.OpcodeVecI16x8ExtractLaneU, 0), - expected: OperationV128ExtractLane{LaneIndex: 0, Signed: false, Shape: ShapeI16x8}, + expected: NewOperationV128ExtractLane(0, false, ShapeI16x8), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI16x8ExtractLaneUName + "/lane=7", body: extractLane(wasm.OpcodeVecI16x8ExtractLaneU, 7), - expected: OperationV128ExtractLane{LaneIndex: 7, Signed: false, Shape: ShapeI16x8}, + expected: NewOperationV128ExtractLane(7, false, ShapeI16x8), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI32x4ExtractLaneName, body: extractLane(wasm.OpcodeVecI32x4ExtractLane, 0), - expected: OperationV128ExtractLane{LaneIndex: 0, Shape: ShapeI32x4}, + expected: NewOperationV128ExtractLane(0, false, ShapeI32x4), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI32x4ExtractLaneName + "/lane=3", body: extractLane(wasm.OpcodeVecI32x4ExtractLane, 3), - expected: OperationV128ExtractLane{LaneIndex: 3, Shape: ShapeI32x4}, + expected: NewOperationV128ExtractLane(3, false, ShapeI32x4), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI64x2ExtractLaneName, body: extractLane(wasm.OpcodeVecI64x2ExtractLane, 0), - expected: OperationV128ExtractLane{LaneIndex: 0, Shape: ShapeI64x2}, + expected: NewOperationV128ExtractLane(0, false, ShapeI64x2), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI64x2ExtractLaneName + "/lane=1", body: extractLane(wasm.OpcodeVecI64x2ExtractLane, 1), - expected: OperationV128ExtractLane{LaneIndex: 1, Shape: ShapeI64x2}, + expected: NewOperationV128ExtractLane(1, false, ShapeI64x2), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecF32x4ExtractLaneName, body: extractLane(wasm.OpcodeVecF32x4ExtractLane, 0), - expected: OperationV128ExtractLane{LaneIndex: 0, Shape: ShapeF32x4}, + expected: NewOperationV128ExtractLane(0, false, ShapeF32x4), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecF32x4ExtractLaneName + "/lane=3", body: extractLane(wasm.OpcodeVecF32x4ExtractLane, 3), - expected: OperationV128ExtractLane{LaneIndex: 3, Shape: ShapeF32x4}, + expected: NewOperationV128ExtractLane(3, false, ShapeF32x4), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecF64x2ExtractLaneName, body: extractLane(wasm.OpcodeVecF64x2ExtractLane, 0), - expected: OperationV128ExtractLane{LaneIndex: 0, Shape: ShapeF64x2}, + expected: NewOperationV128ExtractLane(0, false, ShapeF64x2), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecF64x2ExtractLaneName + "/lane=1", body: extractLane(wasm.OpcodeVecF64x2ExtractLane, 1), - expected: OperationV128ExtractLane{LaneIndex: 1, Shape: ShapeF64x2}, + expected: NewOperationV128ExtractLane(1, false, ShapeF64x2), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI8x16ReplaceLaneName, body: replaceLane(wasm.OpcodeVecI8x16ReplaceLane, 0), - expected: OperationV128ReplaceLane{LaneIndex: 0, Shape: ShapeI8x16}, + expected: NewOperationV128ReplaceLane(0, ShapeI8x16), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI8x16ReplaceLaneName + "/lane=15", body: replaceLane(wasm.OpcodeVecI8x16ReplaceLane, 15), - expected: OperationV128ReplaceLane{LaneIndex: 15, Shape: ShapeI8x16}, + expected: NewOperationV128ReplaceLane(15, ShapeI8x16), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI16x8ReplaceLaneName, body: replaceLane(wasm.OpcodeVecI16x8ReplaceLane, 0), - expected: OperationV128ReplaceLane{LaneIndex: 0, Shape: ShapeI16x8}, + expected: NewOperationV128ReplaceLane(0, ShapeI16x8), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI16x8ReplaceLaneName + "/lane=7", body: replaceLane(wasm.OpcodeVecI16x8ReplaceLane, 7), - expected: OperationV128ReplaceLane{LaneIndex: 7, Shape: ShapeI16x8}, + expected: NewOperationV128ReplaceLane(7, ShapeI16x8), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI32x4ReplaceLaneName, body: replaceLane(wasm.OpcodeVecI32x4ReplaceLane, 0), - expected: OperationV128ReplaceLane{LaneIndex: 0, Shape: ShapeI32x4}, + expected: NewOperationV128ReplaceLane(0, ShapeI32x4), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI32x4ReplaceLaneName + "/lane=3", body: replaceLane(wasm.OpcodeVecI32x4ReplaceLane, 3), - expected: OperationV128ReplaceLane{LaneIndex: 3, Shape: ShapeI32x4}, + expected: NewOperationV128ReplaceLane(3, ShapeI32x4), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI64x2ReplaceLaneName, body: replaceLane(wasm.OpcodeVecI64x2ReplaceLane, 0), - expected: OperationV128ReplaceLane{LaneIndex: 0, Shape: ShapeI64x2}, + expected: NewOperationV128ReplaceLane(0, ShapeI64x2), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI64x2ReplaceLaneName + "/lane=1", body: replaceLane(wasm.OpcodeVecI64x2ReplaceLane, 1), - expected: OperationV128ReplaceLane{LaneIndex: 1, Shape: ShapeI64x2}, + expected: NewOperationV128ReplaceLane(1, ShapeI64x2), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecF32x4ReplaceLaneName, body: replaceLane(wasm.OpcodeVecF32x4ReplaceLane, 0), - expected: OperationV128ReplaceLane{LaneIndex: 0, Shape: ShapeF32x4}, + expected: NewOperationV128ReplaceLane(0, ShapeF32x4), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecF32x4ReplaceLaneName + "/lane=3", body: replaceLane(wasm.OpcodeVecF32x4ReplaceLane, 3), - expected: OperationV128ReplaceLane{LaneIndex: 3, Shape: ShapeF32x4}, + expected: NewOperationV128ReplaceLane(3, ShapeF32x4), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecF64x2ReplaceLaneName, body: replaceLane(wasm.OpcodeVecF64x2ReplaceLane, 0), - expected: OperationV128ReplaceLane{LaneIndex: 0, Shape: ShapeF64x2}, + expected: NewOperationV128ReplaceLane(0, ShapeF64x2), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecF64x2ReplaceLaneName + "/lane=1", body: replaceLane(wasm.OpcodeVecF64x2ReplaceLane, 1), - expected: OperationV128ReplaceLane{LaneIndex: 1, Shape: ShapeF64x2}, + expected: NewOperationV128ReplaceLane(1, ShapeF64x2), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI8x16SplatName, body: splat(wasm.OpcodeVecI8x16Splat), - expected: OperationV128Splat{Shape: ShapeI8x16}, + expected: NewOperationV128Splat(ShapeI8x16), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI16x8SplatName, body: splat(wasm.OpcodeVecI16x8Splat), - expected: OperationV128Splat{Shape: ShapeI16x8}, + expected: NewOperationV128Splat(ShapeI16x8), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI32x4SplatName, body: splat(wasm.OpcodeVecI32x4Splat), - expected: OperationV128Splat{Shape: ShapeI32x4}, + expected: NewOperationV128Splat(ShapeI32x4), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI64x2SplatName, body: splat(wasm.OpcodeVecI64x2Splat), - expected: OperationV128Splat{Shape: ShapeI64x2}, + expected: NewOperationV128Splat(ShapeI64x2), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecF32x4SplatName, body: splat(wasm.OpcodeVecF32x4Splat), - expected: OperationV128Splat{Shape: ShapeF32x4}, + expected: NewOperationV128Splat(ShapeF32x4), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecF64x2SplatName, body: splat(wasm.OpcodeVecF64x2Splat), - expected: OperationV128Splat{Shape: ShapeF64x2}, + expected: NewOperationV128Splat(ShapeF64x2), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecI8x16SwizzleName, body: vv2v(wasm.OpcodeVecI8x16Swizzle), - expected: OperationV128Swizzle{}, + expected: NewOperationV128Swizzle(), needDropBeforeReturn: true, }, { @@ -1763,1018 +1755,922 @@ func TestCompile_Vec(t *testing.T) { wasm.OpcodeDrop, wasm.OpcodeEnd, }, - expected: OperationV128Shuffle{Lanes: [16]byte{ + expected: NewOperationV128Shuffle([]uint64{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - }}, + }), needDropBeforeReturn: true, }, { name: wasm.OpcodeVecV128NotName, body: v2v(wasm.OpcodeVecV128Not), needDropBeforeReturn: true, - expected: OperationV128Not{}, + expected: NewOperationV128Not(), }, { name: wasm.OpcodeVecV128AndName, body: vv2v(wasm.OpcodeVecV128And), needDropBeforeReturn: true, - expected: OperationV128And{}, + expected: NewOperationV128And(), }, { name: wasm.OpcodeVecV128AndNotName, body: vv2v(wasm.OpcodeVecV128AndNot), needDropBeforeReturn: true, - expected: OperationV128AndNot{}, + expected: NewOperationV128AndNot(), }, { name: wasm.OpcodeVecV128OrName, body: vv2v(wasm.OpcodeVecV128Or), needDropBeforeReturn: true, - expected: OperationV128Or{}, + expected: NewOperationV128Or(), }, { name: wasm.OpcodeVecV128XorName, body: vv2v(wasm.OpcodeVecV128Xor), needDropBeforeReturn: true, - expected: OperationV128Xor{}, + expected: NewOperationV128Xor(), }, { name: wasm.OpcodeVecV128BitselectName, body: vvv2v(wasm.OpcodeVecV128Bitselect), needDropBeforeReturn: true, - expected: OperationV128Bitselect{}, + expected: NewOperationV128Bitselect(), }, { name: wasm.OpcodeVecI8x16ShlName, body: vi2v(wasm.OpcodeVecI8x16Shl), needDropBeforeReturn: true, - expected: OperationV128Shl{Shape: ShapeI8x16}, + expected: NewOperationV128Shl(ShapeI8x16), }, { name: wasm.OpcodeVecI8x16ShrSName, body: vi2v(wasm.OpcodeVecI8x16ShrS), needDropBeforeReturn: true, - expected: OperationV128Shr{Shape: ShapeI8x16, Signed: true}, + expected: NewOperationV128Shr(ShapeI8x16, true), }, { name: wasm.OpcodeVecI8x16ShrUName, body: vi2v(wasm.OpcodeVecI8x16ShrU), needDropBeforeReturn: true, - expected: OperationV128Shr{Shape: ShapeI8x16, Signed: false}, + expected: NewOperationV128Shr(ShapeI8x16, false), }, { name: wasm.OpcodeVecI16x8ShlName, body: vi2v(wasm.OpcodeVecI16x8Shl), needDropBeforeReturn: true, - expected: OperationV128Shl{Shape: ShapeI16x8}, + expected: NewOperationV128Shl(ShapeI16x8), }, { name: wasm.OpcodeVecI16x8ShrSName, body: vi2v(wasm.OpcodeVecI16x8ShrS), needDropBeforeReturn: true, - expected: OperationV128Shr{Shape: ShapeI16x8, Signed: true}, + expected: NewOperationV128Shr(ShapeI16x8, true), }, { name: wasm.OpcodeVecI16x8ShrUName, body: vi2v(wasm.OpcodeVecI16x8ShrU), needDropBeforeReturn: true, - expected: OperationV128Shr{Shape: ShapeI16x8, Signed: false}, + expected: NewOperationV128Shr(ShapeI16x8, false), }, { name: wasm.OpcodeVecI32x4ShlName, body: vi2v(wasm.OpcodeVecI32x4Shl), needDropBeforeReturn: true, - expected: OperationV128Shl{Shape: ShapeI32x4}, + expected: NewOperationV128Shl(ShapeI32x4), }, { name: wasm.OpcodeVecI32x4ShrSName, body: vi2v(wasm.OpcodeVecI32x4ShrS), needDropBeforeReturn: true, - expected: OperationV128Shr{Shape: ShapeI32x4, Signed: true}, + expected: NewOperationV128Shr(ShapeI32x4, true), }, { name: wasm.OpcodeVecI32x4ShrUName, body: vi2v(wasm.OpcodeVecI32x4ShrU), needDropBeforeReturn: true, - expected: OperationV128Shr{Shape: ShapeI32x4, Signed: false}, + expected: NewOperationV128Shr(ShapeI32x4, false), }, { name: wasm.OpcodeVecI64x2ShlName, body: vi2v(wasm.OpcodeVecI64x2Shl), needDropBeforeReturn: true, - expected: OperationV128Shl{Shape: ShapeI64x2}, + expected: NewOperationV128Shl(ShapeI64x2), }, { name: wasm.OpcodeVecI64x2ShrSName, body: vi2v(wasm.OpcodeVecI64x2ShrS), needDropBeforeReturn: true, - expected: OperationV128Shr{Shape: ShapeI64x2, Signed: true}, + expected: NewOperationV128Shr(ShapeI64x2, true), }, { name: wasm.OpcodeVecI64x2ShrUName, body: vi2v(wasm.OpcodeVecI64x2ShrU), needDropBeforeReturn: true, - expected: OperationV128Shr{Shape: ShapeI64x2, Signed: false}, + expected: NewOperationV128Shr(ShapeI64x2, false), }, { name: wasm.OpcodeVecI8x16EqName, body: vv2v(wasm.OpcodeVecI8x16Eq), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI8x16Eq}, + expected: NewOperationV128Cmp(V128CmpTypeI8x16Eq), }, { name: wasm.OpcodeVecI8x16NeName, body: vv2v(wasm.OpcodeVecI8x16Ne), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI8x16Ne}, + expected: NewOperationV128Cmp(V128CmpTypeI8x16Ne), }, { name: wasm.OpcodeVecI8x16LtSName, body: vv2v(wasm.OpcodeVecI8x16LtS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI8x16LtS}, + expected: NewOperationV128Cmp(V128CmpTypeI8x16LtS), }, { name: wasm.OpcodeVecI8x16LtUName, body: vv2v(wasm.OpcodeVecI8x16LtU), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI8x16LtU}, + expected: NewOperationV128Cmp(V128CmpTypeI8x16LtU), }, { name: wasm.OpcodeVecI8x16GtSName, body: vv2v(wasm.OpcodeVecI8x16GtS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI8x16GtS}, + expected: NewOperationV128Cmp(V128CmpTypeI8x16GtS), }, { name: wasm.OpcodeVecI8x16GtUName, body: vv2v(wasm.OpcodeVecI8x16GtU), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI8x16GtU}, + expected: NewOperationV128Cmp(V128CmpTypeI8x16GtU), }, { name: wasm.OpcodeVecI8x16LeSName, body: vv2v(wasm.OpcodeVecI8x16LeS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI8x16LeS}, + expected: NewOperationV128Cmp(V128CmpTypeI8x16LeS), }, { name: wasm.OpcodeVecI8x16LeUName, body: vv2v(wasm.OpcodeVecI8x16LeU), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI8x16LeU}, + expected: NewOperationV128Cmp(V128CmpTypeI8x16LeU), }, { name: wasm.OpcodeVecI8x16GeSName, body: vv2v(wasm.OpcodeVecI8x16GeS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI8x16GeS}, + expected: NewOperationV128Cmp(V128CmpTypeI8x16GeS), }, { name: wasm.OpcodeVecI8x16GeUName, body: vv2v(wasm.OpcodeVecI8x16GeU), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI8x16GeU}, + expected: NewOperationV128Cmp(V128CmpTypeI8x16GeU), }, { name: wasm.OpcodeVecI16x8EqName, body: vv2v(wasm.OpcodeVecI16x8Eq), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI16x8Eq}, + expected: NewOperationV128Cmp(V128CmpTypeI16x8Eq), }, { name: wasm.OpcodeVecI16x8NeName, body: vv2v(wasm.OpcodeVecI16x8Ne), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI16x8Ne}, + expected: NewOperationV128Cmp(V128CmpTypeI16x8Ne), }, { name: wasm.OpcodeVecI16x8LtSName, body: vv2v(wasm.OpcodeVecI16x8LtS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI16x8LtS}, + expected: NewOperationV128Cmp(V128CmpTypeI16x8LtS), }, { name: wasm.OpcodeVecI16x8LtUName, body: vv2v(wasm.OpcodeVecI16x8LtU), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI16x8LtU}, + expected: NewOperationV128Cmp(V128CmpTypeI16x8LtU), }, { name: wasm.OpcodeVecI16x8GtSName, body: vv2v(wasm.OpcodeVecI16x8GtS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI16x8GtS}, + expected: NewOperationV128Cmp(V128CmpTypeI16x8GtS), }, { name: wasm.OpcodeVecI16x8GtUName, body: vv2v(wasm.OpcodeVecI16x8GtU), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI16x8GtU}, + expected: NewOperationV128Cmp(V128CmpTypeI16x8GtU), }, { name: wasm.OpcodeVecI16x8LeSName, body: vv2v(wasm.OpcodeVecI16x8LeS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI16x8LeS}, + expected: NewOperationV128Cmp(V128CmpTypeI16x8LeS), }, { name: wasm.OpcodeVecI16x8LeUName, body: vv2v(wasm.OpcodeVecI16x8LeU), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI16x8LeU}, + expected: NewOperationV128Cmp(V128CmpTypeI16x8LeU), }, { name: wasm.OpcodeVecI16x8GeSName, body: vv2v(wasm.OpcodeVecI16x8GeS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI16x8GeS}, + expected: NewOperationV128Cmp(V128CmpTypeI16x8GeS), }, { name: wasm.OpcodeVecI16x8GeUName, body: vv2v(wasm.OpcodeVecI16x8GeU), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI16x8GeU}, + expected: NewOperationV128Cmp(V128CmpTypeI16x8GeU), }, { name: wasm.OpcodeVecI32x4EqName, body: vv2v(wasm.OpcodeVecI32x4Eq), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI32x4Eq}, + expected: NewOperationV128Cmp(V128CmpTypeI32x4Eq), }, { name: wasm.OpcodeVecI32x4NeName, body: vv2v(wasm.OpcodeVecI32x4Ne), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI32x4Ne}, + expected: NewOperationV128Cmp(V128CmpTypeI32x4Ne), }, { name: wasm.OpcodeVecI32x4LtSName, body: vv2v(wasm.OpcodeVecI32x4LtS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI32x4LtS}, + expected: NewOperationV128Cmp(V128CmpTypeI32x4LtS), }, { name: wasm.OpcodeVecI32x4LtUName, body: vv2v(wasm.OpcodeVecI32x4LtU), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI32x4LtU}, + expected: NewOperationV128Cmp(V128CmpTypeI32x4LtU), }, { name: wasm.OpcodeVecI32x4GtSName, body: vv2v(wasm.OpcodeVecI32x4GtS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI32x4GtS}, + expected: NewOperationV128Cmp(V128CmpTypeI32x4GtS), }, { name: wasm.OpcodeVecI32x4GtUName, body: vv2v(wasm.OpcodeVecI32x4GtU), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI32x4GtU}, + expected: NewOperationV128Cmp(V128CmpTypeI32x4GtU), }, { name: wasm.OpcodeVecI32x4LeSName, body: vv2v(wasm.OpcodeVecI32x4LeS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI32x4LeS}, + expected: NewOperationV128Cmp(V128CmpTypeI32x4LeS), }, { name: wasm.OpcodeVecI32x4LeUName, body: vv2v(wasm.OpcodeVecI32x4LeU), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI32x4LeU}, + expected: NewOperationV128Cmp(V128CmpTypeI32x4LeU), }, { name: wasm.OpcodeVecI32x4GeSName, body: vv2v(wasm.OpcodeVecI32x4GeS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI32x4GeS}, + expected: NewOperationV128Cmp(V128CmpTypeI32x4GeS), }, { name: wasm.OpcodeVecI32x4GeUName, body: vv2v(wasm.OpcodeVecI32x4GeU), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI32x4GeU}, + expected: NewOperationV128Cmp(V128CmpTypeI32x4GeU), }, { name: wasm.OpcodeVecI64x2EqName, body: vv2v(wasm.OpcodeVecI64x2Eq), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI64x2Eq}, + expected: NewOperationV128Cmp(V128CmpTypeI64x2Eq), }, { name: wasm.OpcodeVecI64x2NeName, body: vv2v(wasm.OpcodeVecI64x2Ne), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI64x2Ne}, + expected: NewOperationV128Cmp(V128CmpTypeI64x2Ne), }, { name: wasm.OpcodeVecI64x2LtSName, body: vv2v(wasm.OpcodeVecI64x2LtS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI64x2LtS}, + expected: NewOperationV128Cmp(V128CmpTypeI64x2LtS), }, { name: wasm.OpcodeVecI64x2GtSName, body: vv2v(wasm.OpcodeVecI64x2GtS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI64x2GtS}, + expected: NewOperationV128Cmp(V128CmpTypeI64x2GtS), }, { name: wasm.OpcodeVecI64x2LeSName, body: vv2v(wasm.OpcodeVecI64x2LeS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI64x2LeS}, + expected: NewOperationV128Cmp(V128CmpTypeI64x2LeS), }, { name: wasm.OpcodeVecI64x2GeSName, body: vv2v(wasm.OpcodeVecI64x2GeS), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeI64x2GeS}, + expected: NewOperationV128Cmp(V128CmpTypeI64x2GeS), }, { name: wasm.OpcodeVecF32x4EqName, body: vv2v(wasm.OpcodeVecF32x4Eq), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeF32x4Eq}, + expected: NewOperationV128Cmp(V128CmpTypeF32x4Eq), }, { name: wasm.OpcodeVecF32x4NeName, body: vv2v(wasm.OpcodeVecF32x4Ne), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeF32x4Ne}, + expected: NewOperationV128Cmp(V128CmpTypeF32x4Ne), }, { name: wasm.OpcodeVecF32x4LtName, body: vv2v(wasm.OpcodeVecF32x4Lt), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeF32x4Lt}, + expected: NewOperationV128Cmp(V128CmpTypeF32x4Lt), }, { name: wasm.OpcodeVecF32x4GtName, body: vv2v(wasm.OpcodeVecF32x4Gt), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeF32x4Gt}, + expected: NewOperationV128Cmp(V128CmpTypeF32x4Gt), }, { name: wasm.OpcodeVecF32x4LeName, body: vv2v(wasm.OpcodeVecF32x4Le), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeF32x4Le}, + expected: NewOperationV128Cmp(V128CmpTypeF32x4Le), }, { name: wasm.OpcodeVecF32x4GeName, body: vv2v(wasm.OpcodeVecF32x4Ge), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeF32x4Ge}, + expected: NewOperationV128Cmp(V128CmpTypeF32x4Ge), }, { name: wasm.OpcodeVecF64x2EqName, body: vv2v(wasm.OpcodeVecF64x2Eq), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeF64x2Eq}, + expected: NewOperationV128Cmp(V128CmpTypeF64x2Eq), }, { name: wasm.OpcodeVecF64x2NeName, body: vv2v(wasm.OpcodeVecF64x2Ne), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeF64x2Ne}, + expected: NewOperationV128Cmp(V128CmpTypeF64x2Ne), }, { name: wasm.OpcodeVecF64x2LtName, body: vv2v(wasm.OpcodeVecF64x2Lt), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeF64x2Lt}, + expected: NewOperationV128Cmp(V128CmpTypeF64x2Lt), }, { name: wasm.OpcodeVecF64x2GtName, body: vv2v(wasm.OpcodeVecF64x2Gt), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeF64x2Gt}, + expected: NewOperationV128Cmp(V128CmpTypeF64x2Gt), }, { name: wasm.OpcodeVecF64x2LeName, body: vv2v(wasm.OpcodeVecF64x2Le), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeF64x2Le}, + expected: NewOperationV128Cmp(V128CmpTypeF64x2Le), }, { name: wasm.OpcodeVecF64x2GeName, body: vv2v(wasm.OpcodeVecF64x2Ge), needDropBeforeReturn: true, - expected: OperationV128Cmp{Type: V128CmpTypeF64x2Ge}, + expected: NewOperationV128Cmp(V128CmpTypeF64x2Ge), }, { name: wasm.OpcodeVecI8x16AllTrueName, body: v2v(wasm.OpcodeVecI8x16AllTrue), needDropBeforeReturn: true, - expected: OperationV128AllTrue{Shape: ShapeI8x16}, + expected: NewOperationV128AllTrue(ShapeI8x16), }, { name: wasm.OpcodeVecI16x8AllTrueName, body: v2v(wasm.OpcodeVecI16x8AllTrue), needDropBeforeReturn: true, - expected: OperationV128AllTrue{Shape: ShapeI16x8}, + expected: NewOperationV128AllTrue(ShapeI16x8), }, { name: wasm.OpcodeVecI32x4AllTrueName, body: v2v(wasm.OpcodeVecI32x4AllTrue), needDropBeforeReturn: true, - expected: OperationV128AllTrue{Shape: ShapeI32x4}, + expected: NewOperationV128AllTrue(ShapeI32x4), }, { name: wasm.OpcodeVecI64x2AllTrueName, body: v2v(wasm.OpcodeVecI64x2AllTrue), needDropBeforeReturn: true, - expected: OperationV128AllTrue{Shape: ShapeI64x2}, + expected: NewOperationV128AllTrue(ShapeI64x2), }, { name: wasm.OpcodeVecI8x16BitMaskName, body: v2v(wasm.OpcodeVecI8x16BitMask), - needDropBeforeReturn: true, expected: OperationV128BitMask{Shape: ShapeI8x16}, + needDropBeforeReturn: true, expected: NewOperationV128BitMask(ShapeI8x16), }, { name: wasm.OpcodeVecI16x8BitMaskName, body: v2v(wasm.OpcodeVecI16x8BitMask), - needDropBeforeReturn: true, expected: OperationV128BitMask{Shape: ShapeI16x8}, + needDropBeforeReturn: true, expected: NewOperationV128BitMask(ShapeI16x8), }, { name: wasm.OpcodeVecI32x4BitMaskName, body: v2v(wasm.OpcodeVecI32x4BitMask), - needDropBeforeReturn: true, expected: OperationV128BitMask{Shape: ShapeI32x4}, + needDropBeforeReturn: true, expected: NewOperationV128BitMask(ShapeI32x4), }, { name: wasm.OpcodeVecI64x2BitMaskName, body: v2v(wasm.OpcodeVecI64x2BitMask), - needDropBeforeReturn: true, expected: OperationV128BitMask{Shape: ShapeI64x2}, + needDropBeforeReturn: true, expected: NewOperationV128BitMask(ShapeI64x2), }, { name: wasm.OpcodeVecV128AnyTrueName, body: v2v(wasm.OpcodeVecV128AnyTrue), needDropBeforeReturn: true, - expected: OperationV128AnyTrue{}, + expected: NewOperationV128AnyTrue(), }, { name: wasm.OpcodeVecI8x16AddName, body: vv2v(wasm.OpcodeVecI8x16Add), needDropBeforeReturn: true, - expected: OperationV128Add{Shape: ShapeI8x16}, + expected: NewOperationV128Add(ShapeI8x16), }, { name: wasm.OpcodeVecI8x16AddSatSName, body: vv2v(wasm.OpcodeVecI8x16AddSatS), needDropBeforeReturn: true, - expected: OperationV128AddSat{Shape: ShapeI8x16, Signed: true}, + expected: NewOperationV128AddSat(ShapeI8x16, true), }, { name: wasm.OpcodeVecI8x16AddSatUName, body: vv2v(wasm.OpcodeVecI8x16AddSatU), needDropBeforeReturn: true, - expected: OperationV128AddSat{Shape: ShapeI8x16, Signed: false}, + expected: NewOperationV128AddSat(ShapeI8x16, false), }, { name: wasm.OpcodeVecI8x16SubName, body: vv2v(wasm.OpcodeVecI8x16Sub), needDropBeforeReturn: true, - expected: OperationV128Sub{Shape: ShapeI8x16}, + expected: NewOperationV128Sub(ShapeI8x16), }, { name: wasm.OpcodeVecI8x16SubSatSName, body: vv2v(wasm.OpcodeVecI8x16SubSatS), needDropBeforeReturn: true, - expected: OperationV128SubSat{Shape: ShapeI8x16, Signed: true}, + expected: NewOperationV128SubSat(ShapeI8x16, true), }, { name: wasm.OpcodeVecI8x16SubSatUName, body: vv2v(wasm.OpcodeVecI8x16SubSatU), needDropBeforeReturn: true, - expected: OperationV128SubSat{Shape: ShapeI8x16, Signed: false}, + expected: NewOperationV128SubSat(ShapeI8x16, false), }, { name: wasm.OpcodeVecI16x8AddName, body: vv2v(wasm.OpcodeVecI16x8Add), needDropBeforeReturn: true, - expected: OperationV128Add{Shape: ShapeI16x8}, + expected: NewOperationV128Add(ShapeI16x8), }, { name: wasm.OpcodeVecI16x8AddSatSName, body: vv2v(wasm.OpcodeVecI16x8AddSatS), needDropBeforeReturn: true, - expected: OperationV128AddSat{Shape: ShapeI16x8, Signed: true}, + expected: NewOperationV128AddSat(ShapeI16x8, true), }, { name: wasm.OpcodeVecI16x8AddSatUName, body: vv2v(wasm.OpcodeVecI16x8AddSatU), needDropBeforeReturn: true, - expected: OperationV128AddSat{Shape: ShapeI16x8, Signed: false}, + expected: NewOperationV128AddSat(ShapeI16x8, false), }, { name: wasm.OpcodeVecI16x8SubName, body: vv2v(wasm.OpcodeVecI16x8Sub), needDropBeforeReturn: true, - expected: OperationV128Sub{Shape: ShapeI16x8}, + expected: NewOperationV128Sub(ShapeI16x8), }, { name: wasm.OpcodeVecI16x8SubSatSName, body: vv2v(wasm.OpcodeVecI16x8SubSatS), needDropBeforeReturn: true, - expected: OperationV128SubSat{Shape: ShapeI16x8, Signed: true}, + expected: NewOperationV128SubSat(ShapeI16x8, true), }, { name: wasm.OpcodeVecI16x8SubSatUName, body: vv2v(wasm.OpcodeVecI16x8SubSatU), needDropBeforeReturn: true, - expected: OperationV128SubSat{Shape: ShapeI16x8, Signed: false}, + expected: NewOperationV128SubSat(ShapeI16x8, false), }, { name: wasm.OpcodeVecI16x8MulName, body: vv2v(wasm.OpcodeVecI16x8Mul), needDropBeforeReturn: true, - expected: OperationV128Mul{Shape: ShapeI16x8}, + expected: NewOperationV128Mul(ShapeI16x8), }, { name: wasm.OpcodeVecI32x4AddName, body: vv2v(wasm.OpcodeVecI32x4Add), needDropBeforeReturn: true, - expected: OperationV128Add{Shape: ShapeI32x4}, + expected: NewOperationV128Add(ShapeI32x4), }, { name: wasm.OpcodeVecI32x4SubName, body: vv2v(wasm.OpcodeVecI32x4Sub), needDropBeforeReturn: true, - expected: OperationV128Sub{Shape: ShapeI32x4}, + expected: NewOperationV128Sub(ShapeI32x4), }, { name: wasm.OpcodeVecI32x4MulName, body: vv2v(wasm.OpcodeVecI32x4Mul), needDropBeforeReturn: true, - expected: OperationV128Mul{Shape: ShapeI32x4}, + expected: NewOperationV128Mul(ShapeI32x4), }, { name: wasm.OpcodeVecI64x2AddName, body: vv2v(wasm.OpcodeVecI64x2Add), needDropBeforeReturn: true, - expected: OperationV128Add{Shape: ShapeI64x2}, + expected: NewOperationV128Add(ShapeI64x2), }, { name: wasm.OpcodeVecI64x2SubName, body: vv2v(wasm.OpcodeVecI64x2Sub), needDropBeforeReturn: true, - expected: OperationV128Sub{Shape: ShapeI64x2}, + expected: NewOperationV128Sub(ShapeI64x2), }, { name: wasm.OpcodeVecI64x2MulName, body: vv2v(wasm.OpcodeVecI64x2Mul), needDropBeforeReturn: true, - expected: OperationV128Mul{Shape: ShapeI64x2}, + expected: NewOperationV128Mul(ShapeI64x2), }, { name: wasm.OpcodeVecF32x4AddName, body: vv2v(wasm.OpcodeVecF32x4Add), needDropBeforeReturn: true, - expected: OperationV128Add{Shape: ShapeF32x4}, + expected: NewOperationV128Add(ShapeF32x4), }, { name: wasm.OpcodeVecF32x4SubName, body: vv2v(wasm.OpcodeVecF32x4Sub), needDropBeforeReturn: true, - expected: OperationV128Sub{Shape: ShapeF32x4}, + expected: NewOperationV128Sub(ShapeF32x4), }, { name: wasm.OpcodeVecF32x4MulName, body: vv2v(wasm.OpcodeVecF32x4Mul), needDropBeforeReturn: true, - expected: OperationV128Mul{Shape: ShapeF32x4}, + expected: NewOperationV128Mul(ShapeF32x4), }, { name: wasm.OpcodeVecF32x4DivName, body: vv2v(wasm.OpcodeVecF32x4Div), needDropBeforeReturn: true, - expected: OperationV128Div{Shape: ShapeF32x4}, + expected: NewOperationV128Div(ShapeF32x4), }, { name: wasm.OpcodeVecF64x2AddName, body: vv2v(wasm.OpcodeVecF64x2Add), needDropBeforeReturn: true, - expected: OperationV128Add{Shape: ShapeF64x2}, + expected: NewOperationV128Add(ShapeF64x2), }, { name: wasm.OpcodeVecF64x2SubName, body: vv2v(wasm.OpcodeVecF64x2Sub), needDropBeforeReturn: true, - expected: OperationV128Sub{Shape: ShapeF64x2}, + expected: NewOperationV128Sub(ShapeF64x2), }, { name: wasm.OpcodeVecF64x2MulName, body: vv2v(wasm.OpcodeVecF64x2Mul), needDropBeforeReturn: true, - expected: OperationV128Mul{Shape: ShapeF64x2}, + expected: NewOperationV128Mul(ShapeF64x2), }, { name: wasm.OpcodeVecF64x2DivName, body: vv2v(wasm.OpcodeVecF64x2Div), needDropBeforeReturn: true, - expected: OperationV128Div{Shape: ShapeF64x2}, + expected: NewOperationV128Div(ShapeF64x2), }, { name: wasm.OpcodeVecI8x16MinSName, body: vv2v(wasm.OpcodeVecI8x16MinS), needDropBeforeReturn: true, - expected: OperationV128Min{Shape: ShapeI8x16, Signed: true}, + expected: NewOperationV128Min(ShapeI8x16, true), }, { name: wasm.OpcodeVecI8x16MinUName, body: vv2v(wasm.OpcodeVecI8x16MinU), needDropBeforeReturn: true, - expected: OperationV128Min{Shape: ShapeI8x16}, + expected: NewOperationV128Min(ShapeI8x16, false), }, { name: wasm.OpcodeVecI8x16MaxSName, body: vv2v(wasm.OpcodeVecI8x16MaxS), needDropBeforeReturn: true, - expected: OperationV128Max{Shape: ShapeI8x16, Signed: true}, + expected: NewOperationV128Max(ShapeI8x16, true), }, { name: wasm.OpcodeVecI8x16MaxUName, body: vv2v(wasm.OpcodeVecI8x16MaxU), needDropBeforeReturn: true, - expected: OperationV128Max{Shape: ShapeI8x16}, + expected: NewOperationV128Max(ShapeI8x16, false), }, { name: wasm.OpcodeVecI8x16AvgrUName, body: vv2v(wasm.OpcodeVecI8x16AvgrU), needDropBeforeReturn: true, - expected: OperationV128AvgrU{Shape: ShapeI8x16}, + expected: NewOperationV128AvgrU(ShapeI8x16), }, { name: wasm.OpcodeVecI16x8MinSName, body: vv2v(wasm.OpcodeVecI16x8MinS), needDropBeforeReturn: true, - expected: OperationV128Min{Shape: ShapeI16x8, Signed: true}, + expected: NewOperationV128Min(ShapeI16x8, true), }, { name: wasm.OpcodeVecI16x8MinUName, body: vv2v(wasm.OpcodeVecI16x8MinU), needDropBeforeReturn: true, - expected: OperationV128Min{Shape: ShapeI16x8}, + expected: NewOperationV128Min(ShapeI16x8, false), }, { name: wasm.OpcodeVecI16x8MaxSName, body: vv2v(wasm.OpcodeVecI16x8MaxS), needDropBeforeReturn: true, - expected: OperationV128Max{Shape: ShapeI16x8, Signed: true}, + expected: NewOperationV128Max(ShapeI16x8, true), }, { name: wasm.OpcodeVecI16x8MaxUName, body: vv2v(wasm.OpcodeVecI16x8MaxU), needDropBeforeReturn: true, - expected: OperationV128Max{Shape: ShapeI16x8}, + expected: NewOperationV128Max(ShapeI16x8, false), }, { name: wasm.OpcodeVecI16x8AvgrUName, body: vv2v(wasm.OpcodeVecI16x8AvgrU), needDropBeforeReturn: true, - expected: OperationV128AvgrU{Shape: ShapeI16x8}, + expected: NewOperationV128AvgrU(ShapeI16x8), }, { name: wasm.OpcodeVecI32x4MinSName, body: vv2v(wasm.OpcodeVecI32x4MinS), needDropBeforeReturn: true, - expected: OperationV128Min{Shape: ShapeI32x4, Signed: true}, + expected: NewOperationV128Min(ShapeI32x4, true), }, { name: wasm.OpcodeVecI32x4MinUName, body: vv2v(wasm.OpcodeVecI32x4MinU), needDropBeforeReturn: true, - expected: OperationV128Min{Shape: ShapeI32x4}, + expected: NewOperationV128Min(ShapeI32x4, false), }, { name: wasm.OpcodeVecI32x4MaxSName, body: vv2v(wasm.OpcodeVecI32x4MaxS), needDropBeforeReturn: true, - expected: OperationV128Max{Shape: ShapeI32x4, Signed: true}, + expected: NewOperationV128Max(ShapeI32x4, true), }, { name: wasm.OpcodeVecI32x4MaxUName, body: vv2v(wasm.OpcodeVecI32x4MaxU), needDropBeforeReturn: true, - expected: OperationV128Max{Shape: ShapeI32x4}, + expected: NewOperationV128Max(ShapeI32x4, false), }, { name: wasm.OpcodeVecF32x4MinName, body: vv2v(wasm.OpcodeVecF32x4Min), needDropBeforeReturn: true, - expected: OperationV128Min{Shape: ShapeF32x4}, + expected: NewOperationV128Min(ShapeF32x4, false), }, { name: wasm.OpcodeVecF32x4MaxName, body: vv2v(wasm.OpcodeVecF32x4Max), needDropBeforeReturn: true, - expected: OperationV128Max{Shape: ShapeF32x4}, + expected: NewOperationV128Max(ShapeF32x4, false), }, { name: wasm.OpcodeVecF64x2MinName, body: vv2v(wasm.OpcodeVecF64x2Min), needDropBeforeReturn: true, - expected: OperationV128Min{Shape: ShapeF64x2}, + expected: NewOperationV128Min(ShapeF64x2, false), }, { name: wasm.OpcodeVecF64x2MaxName, body: vv2v(wasm.OpcodeVecF64x2Max), needDropBeforeReturn: true, - expected: OperationV128Max{Shape: ShapeF64x2}, + expected: NewOperationV128Max(ShapeF64x2, false), }, { name: wasm.OpcodeVecI8x16AbsName, body: v2v(wasm.OpcodeVecI8x16Abs), needDropBeforeReturn: true, - expected: OperationV128Abs{Shape: ShapeI8x16}, + expected: NewOperationV128Abs(ShapeI8x16), }, { name: wasm.OpcodeVecI8x16PopcntName, body: v2v(wasm.OpcodeVecI8x16Popcnt), needDropBeforeReturn: true, - expected: OperationV128Popcnt{}, + expected: NewOperationV128Popcnt(ShapeI8x16), }, { name: wasm.OpcodeVecI16x8AbsName, body: v2v(wasm.OpcodeVecI16x8Abs), needDropBeforeReturn: true, - expected: OperationV128Abs{Shape: ShapeI16x8}, + expected: NewOperationV128Abs(ShapeI16x8), }, { name: wasm.OpcodeVecI32x4AbsName, body: v2v(wasm.OpcodeVecI32x4Abs), needDropBeforeReturn: true, - expected: OperationV128Abs{Shape: ShapeI32x4}, + expected: NewOperationV128Abs(ShapeI32x4), }, { name: wasm.OpcodeVecI64x2AbsName, body: v2v(wasm.OpcodeVecI64x2Abs), needDropBeforeReturn: true, - expected: OperationV128Abs{Shape: ShapeI64x2}, + expected: NewOperationV128Abs(ShapeI64x2), }, { name: wasm.OpcodeVecF32x4AbsName, body: v2v(wasm.OpcodeVecF32x4Abs), needDropBeforeReturn: true, - expected: OperationV128Abs{Shape: ShapeF32x4}, + expected: NewOperationV128Abs(ShapeF32x4), }, { name: wasm.OpcodeVecF64x2AbsName, body: v2v(wasm.OpcodeVecF64x2Abs), needDropBeforeReturn: true, - expected: OperationV128Abs{Shape: ShapeF64x2}, + expected: NewOperationV128Abs(ShapeF64x2), }, { name: wasm.OpcodeVecF32x4CeilName, body: v2v(wasm.OpcodeVecF32x4Ceil), needDropBeforeReturn: true, - expected: OperationV128Ceil{Shape: ShapeF32x4}, + expected: NewOperationV128Ceil(ShapeF32x4), }, { name: wasm.OpcodeVecF32x4FloorName, body: v2v(wasm.OpcodeVecF32x4Floor), needDropBeforeReturn: true, - expected: OperationV128Floor{Shape: ShapeF32x4}, + expected: NewOperationV128Floor(ShapeF32x4), }, { name: wasm.OpcodeVecF32x4TruncName, body: v2v(wasm.OpcodeVecF32x4Trunc), needDropBeforeReturn: true, - expected: OperationV128Trunc{Shape: ShapeF32x4}, + expected: NewOperationV128Trunc(ShapeF32x4), }, { name: wasm.OpcodeVecF32x4NearestName, body: v2v(wasm.OpcodeVecF32x4Nearest), needDropBeforeReturn: true, - expected: OperationV128Nearest{Shape: ShapeF32x4}, + expected: NewOperationV128Nearest(ShapeF32x4), }, { name: wasm.OpcodeVecF64x2CeilName, body: v2v(wasm.OpcodeVecF64x2Ceil), needDropBeforeReturn: true, - expected: OperationV128Ceil{Shape: ShapeF64x2}, + expected: NewOperationV128Ceil(ShapeF64x2), }, { name: wasm.OpcodeVecF64x2FloorName, body: v2v(wasm.OpcodeVecF64x2Floor), needDropBeforeReturn: true, - expected: OperationV128Floor{Shape: ShapeF64x2}, + expected: NewOperationV128Floor(ShapeF64x2), }, { name: wasm.OpcodeVecF64x2TruncName, body: v2v(wasm.OpcodeVecF64x2Trunc), needDropBeforeReturn: true, - expected: OperationV128Trunc{Shape: ShapeF64x2}, + expected: NewOperationV128Trunc(ShapeF64x2), }, { name: wasm.OpcodeVecF64x2NearestName, body: v2v(wasm.OpcodeVecF64x2Nearest), needDropBeforeReturn: true, - expected: OperationV128Nearest{Shape: ShapeF64x2}, + expected: NewOperationV128Nearest(ShapeF64x2), }, { name: wasm.OpcodeVecF32x4PminName, body: vv2v(wasm.OpcodeVecF32x4Pmin), needDropBeforeReturn: true, - expected: OperationV128Pmin{Shape: ShapeF32x4}, + expected: NewOperationV128Pmin(ShapeF32x4), }, { name: wasm.OpcodeVecF32x4PmaxName, body: vv2v(wasm.OpcodeVecF32x4Pmax), needDropBeforeReturn: true, - expected: OperationV128Pmax{Shape: ShapeF32x4}, + expected: NewOperationV128Pmax(ShapeF32x4), }, { name: wasm.OpcodeVecF64x2PminName, body: vv2v(wasm.OpcodeVecF64x2Pmin), needDropBeforeReturn: true, - expected: OperationV128Pmin{Shape: ShapeF64x2}, + expected: NewOperationV128Pmin(ShapeF64x2), }, { name: wasm.OpcodeVecF64x2PmaxName, body: vv2v(wasm.OpcodeVecF64x2Pmax), needDropBeforeReturn: true, - expected: OperationV128Pmax{Shape: ShapeF64x2}, + expected: NewOperationV128Pmax(ShapeF64x2), }, { name: wasm.OpcodeVecI16x8Q15mulrSatSName, body: vv2v(wasm.OpcodeVecI16x8Q15mulrSatS), needDropBeforeReturn: true, - expected: OperationV128Q15mulrSatS{}, + expected: NewOperationV128Q15mulrSatS(), }, { name: wasm.OpcodeVecI16x8ExtMulLowI8x16SName, body: vv2v(wasm.OpcodeVecI16x8ExtMulLowI8x16S), needDropBeforeReturn: true, - expected: OperationV128ExtMul{ - OriginShape: ShapeI8x16, - Signed: true, - UseLow: true, - }, + expected: NewOperationV128ExtMul(ShapeI8x16, true, true), }, { name: wasm.OpcodeVecI16x8ExtMulHighI8x16SName, body: vv2v(wasm.OpcodeVecI16x8ExtMulHighI8x16S), needDropBeforeReturn: true, - expected: OperationV128ExtMul{ - OriginShape: ShapeI8x16, - Signed: true, - UseLow: false, - }, + expected: NewOperationV128ExtMul(ShapeI8x16, true, false), }, { name: wasm.OpcodeVecI16x8ExtMulLowI8x16UName, body: vv2v(wasm.OpcodeVecI16x8ExtMulLowI8x16U), needDropBeforeReturn: true, - expected: OperationV128ExtMul{ - OriginShape: ShapeI8x16, - Signed: false, - UseLow: true, - }, + expected: NewOperationV128ExtMul(ShapeI8x16, false, true), }, { name: wasm.OpcodeVecI16x8ExtMulHighI8x16UName, body: vv2v(wasm.OpcodeVecI16x8ExtMulHighI8x16U), needDropBeforeReturn: true, - expected: OperationV128ExtMul{ - OriginShape: ShapeI8x16, - Signed: false, - UseLow: false, - }, + expected: NewOperationV128ExtMul(ShapeI8x16, false, false), }, { name: wasm.OpcodeVecI32x4ExtMulLowI16x8SName, body: vv2v(wasm.OpcodeVecI32x4ExtMulLowI16x8S), needDropBeforeReturn: true, - expected: OperationV128ExtMul{ - OriginShape: ShapeI16x8, - Signed: true, - UseLow: true, - }, + expected: NewOperationV128ExtMul(ShapeI16x8, true, true), }, { name: wasm.OpcodeVecI32x4ExtMulHighI16x8SName, body: vv2v(wasm.OpcodeVecI32x4ExtMulHighI16x8S), needDropBeforeReturn: true, - expected: OperationV128ExtMul{ - OriginShape: ShapeI16x8, - Signed: true, - UseLow: false, - }, + expected: NewOperationV128ExtMul(ShapeI16x8, true, false), }, { name: wasm.OpcodeVecI32x4ExtMulLowI16x8UName, body: vv2v(wasm.OpcodeVecI32x4ExtMulLowI16x8U), needDropBeforeReturn: true, - expected: OperationV128ExtMul{ - OriginShape: ShapeI16x8, - Signed: false, - UseLow: true, - }, + expected: NewOperationV128ExtMul(ShapeI16x8, false, true), }, { name: wasm.OpcodeVecI32x4ExtMulHighI16x8UName, body: vv2v(wasm.OpcodeVecI32x4ExtMulHighI16x8U), needDropBeforeReturn: true, - expected: OperationV128ExtMul{ - OriginShape: ShapeI16x8, - Signed: false, - UseLow: false, - }, + expected: NewOperationV128ExtMul(ShapeI16x8, false, false), }, { name: wasm.OpcodeVecI64x2ExtMulLowI32x4SName, body: vv2v(wasm.OpcodeVecI64x2ExtMulLowI32x4S), needDropBeforeReturn: true, - expected: OperationV128ExtMul{ - OriginShape: ShapeI32x4, - Signed: true, - UseLow: true, - }, + expected: NewOperationV128ExtMul(ShapeI32x4, true, true), }, { name: wasm.OpcodeVecI64x2ExtMulHighI32x4SName, body: vv2v(wasm.OpcodeVecI64x2ExtMulHighI32x4S), needDropBeforeReturn: true, - expected: OperationV128ExtMul{ - OriginShape: ShapeI32x4, - Signed: true, - UseLow: false, - }, + expected: NewOperationV128ExtMul(ShapeI32x4, true, false), }, { name: wasm.OpcodeVecI64x2ExtMulLowI32x4UName, body: vv2v(wasm.OpcodeVecI64x2ExtMulLowI32x4U), needDropBeforeReturn: true, - expected: OperationV128ExtMul{ - OriginShape: ShapeI32x4, - Signed: false, - UseLow: true, - }, + expected: NewOperationV128ExtMul(ShapeI32x4, false, true), }, { name: wasm.OpcodeVecI64x2ExtMulHighI32x4UName, body: vv2v(wasm.OpcodeVecI64x2ExtMulHighI32x4U), needDropBeforeReturn: true, - expected: OperationV128ExtMul{ - OriginShape: ShapeI32x4, - Signed: false, - UseLow: false, - }, + expected: NewOperationV128ExtMul(ShapeI32x4, false, false), }, { name: wasm.OpcodeVecI16x8ExtendLowI8x16SName, body: v2v(wasm.OpcodeVecI16x8ExtendLowI8x16S), needDropBeforeReturn: true, - expected: OperationV128Extend{ - OriginShape: ShapeI8x16, - Signed: true, - UseLow: true, - }, + expected: NewOperationV128Extend(ShapeI8x16, true, true), }, { name: wasm.OpcodeVecI16x8ExtendHighI8x16SName, body: v2v(wasm.OpcodeVecI16x8ExtendHighI8x16S), needDropBeforeReturn: true, - expected: OperationV128Extend{ - OriginShape: ShapeI8x16, - Signed: true, - UseLow: false, - }, + expected: NewOperationV128Extend(ShapeI8x16, true, false), }, { name: wasm.OpcodeVecI16x8ExtendLowI8x16UName, body: v2v(wasm.OpcodeVecI16x8ExtendLowI8x16U), needDropBeforeReturn: true, - expected: OperationV128Extend{ - OriginShape: ShapeI8x16, - Signed: false, - UseLow: true, - }, + expected: NewOperationV128Extend(ShapeI8x16, false, true), }, { name: wasm.OpcodeVecI16x8ExtendHighI8x16UName, body: v2v(wasm.OpcodeVecI16x8ExtendHighI8x16U), needDropBeforeReturn: true, - expected: OperationV128Extend{ - OriginShape: ShapeI8x16, - Signed: false, - UseLow: false, - }, + expected: NewOperationV128Extend(ShapeI8x16, false, false), }, { name: wasm.OpcodeVecI32x4ExtendLowI16x8SName, body: v2v(wasm.OpcodeVecI32x4ExtendLowI16x8S), needDropBeforeReturn: true, - expected: OperationV128Extend{ - OriginShape: ShapeI16x8, - Signed: true, - UseLow: true, - }, + expected: NewOperationV128Extend(ShapeI16x8, true, true), }, { name: wasm.OpcodeVecI32x4ExtendHighI16x8SName, body: v2v(wasm.OpcodeVecI32x4ExtendHighI16x8S), needDropBeforeReturn: true, - expected: OperationV128Extend{ - OriginShape: ShapeI16x8, - Signed: true, - UseLow: false, - }, + expected: NewOperationV128Extend(ShapeI16x8, true, false), }, { name: wasm.OpcodeVecI32x4ExtendLowI16x8UName, body: v2v(wasm.OpcodeVecI32x4ExtendLowI16x8U), needDropBeforeReturn: true, - expected: OperationV128Extend{ - OriginShape: ShapeI16x8, - Signed: false, - UseLow: true, - }, + expected: NewOperationV128Extend(ShapeI16x8, false, true), }, { name: wasm.OpcodeVecI32x4ExtendHighI16x8UName, body: v2v(wasm.OpcodeVecI32x4ExtendHighI16x8U), needDropBeforeReturn: true, - expected: OperationV128Extend{ - OriginShape: ShapeI16x8, - Signed: false, - UseLow: false, - }, + expected: NewOperationV128Extend(ShapeI16x8, false, false), }, { name: wasm.OpcodeVecI64x2ExtendLowI32x4SName, body: v2v(wasm.OpcodeVecI64x2ExtendLowI32x4S), needDropBeforeReturn: true, - expected: OperationV128Extend{ - OriginShape: ShapeI32x4, - Signed: true, - UseLow: true, - }, + expected: NewOperationV128Extend(ShapeI32x4, true, true), }, { name: wasm.OpcodeVecI64x2ExtendHighI32x4SName, body: v2v(wasm.OpcodeVecI64x2ExtendHighI32x4S), needDropBeforeReturn: true, - expected: OperationV128Extend{ - OriginShape: ShapeI32x4, - Signed: true, - UseLow: false, - }, + expected: NewOperationV128Extend(ShapeI32x4, true, false), }, { name: wasm.OpcodeVecI64x2ExtendLowI32x4UName, body: v2v(wasm.OpcodeVecI64x2ExtendLowI32x4U), needDropBeforeReturn: true, - expected: OperationV128Extend{ - OriginShape: ShapeI32x4, - Signed: false, - UseLow: true, - }, + expected: NewOperationV128Extend(ShapeI32x4, false, true), }, { name: wasm.OpcodeVecI64x2ExtendHighI32x4UName, body: v2v(wasm.OpcodeVecI64x2ExtendHighI32x4U), needDropBeforeReturn: true, - expected: OperationV128Extend{ - OriginShape: ShapeI32x4, - Signed: false, - UseLow: false, - }, + expected: NewOperationV128Extend(ShapeI32x4, false, false), }, { name: wasm.OpcodeVecI16x8ExtaddPairwiseI8x16SName, body: v2v(wasm.OpcodeVecI16x8ExtaddPairwiseI8x16S), needDropBeforeReturn: true, - expected: OperationV128ExtAddPairwise{OriginShape: ShapeI8x16, Signed: true}, + expected: NewOperationV128ExtAddPairwise(ShapeI8x16, true), }, { name: wasm.OpcodeVecI16x8ExtaddPairwiseI8x16UName, body: v2v(wasm.OpcodeVecI16x8ExtaddPairwiseI8x16U), needDropBeforeReturn: true, - expected: OperationV128ExtAddPairwise{OriginShape: ShapeI8x16, Signed: false}, + expected: NewOperationV128ExtAddPairwise(ShapeI8x16, false), }, { name: wasm.OpcodeVecI32x4ExtaddPairwiseI16x8SName, body: v2v(wasm.OpcodeVecI32x4ExtaddPairwiseI16x8S), needDropBeforeReturn: true, - expected: OperationV128ExtAddPairwise{OriginShape: ShapeI16x8, Signed: true}, + expected: NewOperationV128ExtAddPairwise(ShapeI16x8, true), }, { name: wasm.OpcodeVecI32x4ExtaddPairwiseI16x8UName, body: v2v(wasm.OpcodeVecI32x4ExtaddPairwiseI16x8U), needDropBeforeReturn: true, - expected: OperationV128ExtAddPairwise{OriginShape: ShapeI16x8, Signed: false}, + expected: NewOperationV128ExtAddPairwise(ShapeI16x8, false), }, { name: wasm.OpcodeVecF64x2PromoteLowF32x4ZeroName, body: v2v(wasm.OpcodeVecF64x2PromoteLowF32x4Zero), needDropBeforeReturn: true, - expected: OperationV128FloatPromote{}, + expected: NewOperationV128FloatPromote(), }, { name: wasm.OpcodeVecF32x4DemoteF64x2ZeroName, body: v2v(wasm.OpcodeVecF32x4DemoteF64x2Zero), needDropBeforeReturn: true, - expected: OperationV128FloatDemote{}, + expected: NewOperationV128FloatDemote(), }, { name: wasm.OpcodeVecF32x4ConvertI32x4SName, body: v2v(wasm.OpcodeVecF32x4ConvertI32x4S), needDropBeforeReturn: true, - expected: OperationV128FConvertFromI{DestinationShape: ShapeF32x4, Signed: true}, + expected: NewOperationV128FConvertFromI(ShapeF32x4, true), }, { name: wasm.OpcodeVecF32x4ConvertI32x4UName, body: v2v(wasm.OpcodeVecF32x4ConvertI32x4U), needDropBeforeReturn: true, - expected: OperationV128FConvertFromI{DestinationShape: ShapeF32x4, Signed: false}, + expected: NewOperationV128FConvertFromI(ShapeF32x4, false), }, { name: wasm.OpcodeVecF64x2ConvertLowI32x4SName, body: v2v(wasm.OpcodeVecF64x2ConvertLowI32x4S), needDropBeforeReturn: true, - expected: OperationV128FConvertFromI{DestinationShape: ShapeF64x2, Signed: true}, + expected: NewOperationV128FConvertFromI(ShapeF64x2, true), }, { name: wasm.OpcodeVecF64x2ConvertLowI32x4UName, body: v2v(wasm.OpcodeVecF64x2ConvertLowI32x4U), needDropBeforeReturn: true, - expected: OperationV128FConvertFromI{DestinationShape: ShapeF64x2, Signed: false}, + expected: NewOperationV128FConvertFromI(ShapeF64x2, false), }, { name: wasm.OpcodeVecI32x4DotI16x8SName, body: vv2v(wasm.OpcodeVecI32x4DotI16x8S), needDropBeforeReturn: true, - expected: OperationV128Dot{}, + expected: NewOperationV128Dot(), }, { name: wasm.OpcodeVecI8x16NarrowI16x8SName, body: vv2v(wasm.OpcodeVecI8x16NarrowI16x8S), needDropBeforeReturn: true, - expected: OperationV128Narrow{OriginShape: ShapeI16x8, Signed: true}, + expected: NewOperationV128Narrow(ShapeI16x8, true), }, { name: wasm.OpcodeVecI8x16NarrowI16x8UName, body: vv2v(wasm.OpcodeVecI8x16NarrowI16x8U), needDropBeforeReturn: true, - expected: OperationV128Narrow{OriginShape: ShapeI16x8, Signed: false}, + expected: NewOperationV128Narrow(ShapeI16x8, false), }, { name: wasm.OpcodeVecI16x8NarrowI32x4SName, body: vv2v(wasm.OpcodeVecI16x8NarrowI32x4S), needDropBeforeReturn: true, - expected: OperationV128Narrow{OriginShape: ShapeI32x4, Signed: true}, + expected: NewOperationV128Narrow(ShapeI32x4, true), }, { name: wasm.OpcodeVecI16x8NarrowI32x4UName, body: vv2v(wasm.OpcodeVecI16x8NarrowI32x4U), needDropBeforeReturn: true, - expected: OperationV128Narrow{OriginShape: ShapeI32x4, Signed: false}, + expected: NewOperationV128Narrow(ShapeI32x4, false), }, { name: wasm.OpcodeVecI32x4TruncSatF32x4SName, body: v2v(wasm.OpcodeVecI32x4TruncSatF32x4S), needDropBeforeReturn: true, - expected: OperationV128ITruncSatFromF{OriginShape: ShapeF32x4, Signed: true}, + expected: NewOperationV128ITruncSatFromF(ShapeF32x4, true), }, { name: wasm.OpcodeVecI32x4TruncSatF32x4UName, body: v2v(wasm.OpcodeVecI32x4TruncSatF32x4U), needDropBeforeReturn: true, - expected: OperationV128ITruncSatFromF{OriginShape: ShapeF32x4, Signed: false}, + expected: NewOperationV128ITruncSatFromF(ShapeF32x4, false), }, { name: wasm.OpcodeVecI32x4TruncSatF64x2SZeroName, body: v2v(wasm.OpcodeVecI32x4TruncSatF64x2SZero), needDropBeforeReturn: true, - expected: OperationV128ITruncSatFromF{OriginShape: ShapeF64x2, Signed: true}, + expected: NewOperationV128ITruncSatFromF(ShapeF64x2, true), }, { name: wasm.OpcodeVecI32x4TruncSatF64x2UZeroName, body: v2v(wasm.OpcodeVecI32x4TruncSatF64x2UZero), needDropBeforeReturn: true, - expected: OperationV128ITruncSatFromF{OriginShape: ShapeF64x2, Signed: false}, + expected: NewOperationV128ITruncSatFromF(ShapeF64x2, false), }, } @@ -2790,7 +2686,7 @@ func TestCompile_Vec(t *testing.T) { res, err := CompileFunctions(api.CoreFeaturesV2, 0, module, false) require.NoError(t, err) - var actual Operation + var actual UnionOperation if tc.needDropBeforeReturn { // If the drop operation is inserted, the target op exits at -3 // as the operations looks like: [... target, drop, br(to return)]. @@ -2811,7 +2707,7 @@ func TestCompile_unreachable_Br_BrIf_BrTable(t *testing.T) { tests := []struct { name string mod *wasm.Module - expected []Operation + expected []UnionOperation }{ { name: "br", @@ -2826,7 +2722,7 @@ func TestCompile_unreachable_Br_BrIf_BrTable(t *testing.T) { wasm.OpcodeEnd, // End the function. }}}, }, - expected: []Operation{OperationBr{Target: Label{Kind: LabelKindReturn}}}, + expected: []UnionOperation{NewOperationBr(NewLabel(LabelKindReturn, 0))}, }, { name: "br_if", @@ -2842,7 +2738,7 @@ func TestCompile_unreachable_Br_BrIf_BrTable(t *testing.T) { wasm.OpcodeEnd, // End the function. }}}, }, - expected: []Operation{OperationBr{Target: Label{Kind: LabelKindReturn}}}, + expected: []UnionOperation{NewOperationBr(NewLabel(LabelKindReturn, 0))}, }, { name: "br_table", @@ -2857,7 +2753,7 @@ func TestCompile_unreachable_Br_BrIf_BrTable(t *testing.T) { wasm.OpcodeEnd, // End the function. }}}, }, - expected: []Operation{OperationBr{Target: Label{Kind: LabelKindReturn}}}, + expected: []UnionOperation{NewOperationBr(NewLabel(LabelKindReturn, 0))}, }, } @@ -2877,7 +2773,7 @@ func TestCompile_drop_vectors(t *testing.T) { tests := []struct { name string mod *wasm.Module - expected []Operation + expected []UnionOperation }{ { name: "basic", @@ -2891,12 +2787,12 @@ func TestCompile_drop_vectors(t *testing.T) { wasm.OpcodeEnd, }}}, }, - expected: []Operation{ - OperationV128Const{Lo: 0x1, Hi: 0x2}, + expected: []UnionOperation{ + NewOperationV128Const(0x1, 0x2), // InclusiveRange is the range in uint64 representation, so dropping a vector value on top // should be translated as drop [0..1] inclusively. - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 1}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, + NewOperationDrop(&InclusiveRange{Start: 0, End: 1}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), }, }, } @@ -2915,7 +2811,7 @@ func TestCompile_select_vectors(t *testing.T) { tests := []struct { name string mod *wasm.Module - expected []Operation + expected []UnionOperation }{ { name: "non typed", @@ -2934,13 +2830,13 @@ func TestCompile_select_vectors(t *testing.T) { }}}, FunctionDefinitionSection: []wasm.FunctionDefinition{{}}, }, - expected: []Operation{ - OperationV128Const{Lo: 0x1, Hi: 0x2}, - OperationV128Const{Lo: 0x3, Hi: 0x4}, + expected: []UnionOperation{ + NewOperationV128Const(0x1, 0x2), + NewOperationV128Const(0x3, 0x4), NewOperationConstI32(0), NewOperationSelect(true), - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 1}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, + NewOperationDrop(&InclusiveRange{Start: 0, End: 1}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), }, }, { @@ -2960,13 +2856,13 @@ func TestCompile_select_vectors(t *testing.T) { }}}, FunctionDefinitionSection: []wasm.FunctionDefinition{{}}, }, - expected: []Operation{ - OperationV128Const{Lo: 0x1, Hi: 0x2}, - OperationV128Const{Lo: 0x3, Hi: 0x4}, + expected: []UnionOperation{ + NewOperationV128Const(0x1, 0x2), + NewOperationV128Const(0x3, 0x4), NewOperationConstI32(0), NewOperationSelect(true), - OperationDrop{Depth: &InclusiveRange{Start: 0, End: 1}}, - OperationBr{Target: Label{Kind: LabelKindReturn}}, + NewOperationDrop(&InclusiveRange{Start: 0, End: 1}), + NewOperationBr(NewLabel(LabelKindReturn, 0)), }, }, } diff --git a/internal/wazeroir/format.go b/internal/wazeroir/format.go index 152cfd4a..789de002 100644 --- a/internal/wazeroir/format.go +++ b/internal/wazeroir/format.go @@ -6,13 +6,14 @@ import ( const EntrypointLabel = ".entrypoint" -func Format(ops []Operation) string { +func Format(ops []UnionOperation) string { buf := bytes.NewBuffer(nil) _, _ = buf.WriteString(EntrypointLabel + "\n") - for _, op := range ops { + for i := range ops { + op := &ops[i] str := op.String() - isLabel := op.Kind() == OperationKindLabel + isLabel := op.Kind == OperationKindLabel if !isLabel { const indent = "\t" str = indent + str diff --git a/internal/wazeroir/operations.go b/internal/wazeroir/operations.go index e568bbf2..3ea0d178 100644 --- a/internal/wazeroir/operations.go +++ b/internal/wazeroir/operations.go @@ -131,14 +131,7 @@ func (s SignedType) String() (ret string) { return } -// Operation is the interface implemented by each individual operation. -type Operation interface { - // Kind returns the OpKind of the implementation. - Kind() OperationKind - fmt.Stringer -} - -// OperationKind is the OpKind of each implementation of Operation interface. +// OperationKind is the Kind of each implementation of Operation interface. type OperationKind uint16 // String implements fmt.Stringer. @@ -147,7 +140,7 @@ func (o OperationKind) String() (ret string) { case OperationKindUnreachable: ret = "Unreachable" case OperationKindLabel: - ret = "Label" + ret = "label" case OperationKindBr: ret = "Br" case OperationKindBrIf: @@ -429,409 +422,331 @@ func (o OperationKind) String() (ret string) { } const ( - // OperationKindUnreachable is the OpKind for OperationUnreachable. + // OperationKindUnreachable is the Kind for NewOperationUnreachable. OperationKindUnreachable OperationKind = iota - // OperationKindLabel is the OpKind for OperationLabel. + // OperationKindLabel is the Kind for NewOperationLabel. OperationKindLabel - // OperationKindBr is the OpKind for OperationBr. + // OperationKindBr is the Kind for NewOperationBr. OperationKindBr - // OperationKindBrIf is the OpKind for OperationBrIf. + // OperationKindBrIf is the Kind for NewOperationBrIf. OperationKindBrIf - // OperationKindBrTable is the OpKind for OperationBrTable. + // OperationKindBrTable is the Kind for NewOperationBrTable. OperationKindBrTable - // OperationKindCall is the OpKind for OperationCall. + // OperationKindCall is the Kind for NewOperationCall. OperationKindCall - // OperationKindCallIndirect is the OpKind for OperationCallIndirect. + // OperationKindCallIndirect is the Kind for NewOperationCallIndirect. OperationKindCallIndirect - // OperationKindDrop is the OpKind for OperationDrop. + // OperationKindDrop is the Kind for NewOperationDrop. OperationKindDrop - // OperationKindSelect is the OpKind for OperationSelect. + // OperationKindSelect is the Kind for NewOperationSelect. OperationKindSelect - // OperationKindPick is the OpKind for OperationPick. + // OperationKindPick is the Kind for NewOperationPick. OperationKindPick - // OperationKindSet is the OpKind for OperationSet. + // OperationKindSet is the Kind for NewOperationSet. OperationKindSet - // OperationKindGlobalGet is the OpKind for OperationGlobalGet. + // OperationKindGlobalGet is the Kind for NewOperationGlobalGet. OperationKindGlobalGet - // OperationKindGlobalSet is the OpKind for OperationGlobalSet. + // OperationKindGlobalSet is the Kind for NewOperationGlobalSet. OperationKindGlobalSet - // OperationKindLoad is the OpKind for OperationLoad. + // OperationKindLoad is the Kind for NewOperationLoad. OperationKindLoad - // OperationKindLoad8 is the OpKind for OperationLoad8. + // OperationKindLoad8 is the Kind for NewOperationLoad8. OperationKindLoad8 - // OperationKindLoad16 is the OpKind for OperationLoad16. + // OperationKindLoad16 is the Kind for NewOperationLoad16. OperationKindLoad16 - // OperationKindLoad32 is the OpKind for OperationLoad32. + // OperationKindLoad32 is the Kind for NewOperationLoad32. OperationKindLoad32 - // OperationKindStore is the OpKind for OperationStore. + // OperationKindStore is the Kind for NewOperationStore. OperationKindStore - // OperationKindStore8 is the OpKind for OperationStore8. + // OperationKindStore8 is the Kind for NewOperationStore8. OperationKindStore8 - // OperationKindStore16 is the OpKind for OperationStore16. + // OperationKindStore16 is the Kind for NewOperationStore16. OperationKindStore16 - // OperationKindStore32 is the OpKind for OperationStore32. + // OperationKindStore32 is the Kind for NewOperationStore32. OperationKindStore32 - // OperationKindMemorySize is the OpKind for OperationMemorySize. + // OperationKindMemorySize is the Kind for NewOperationMemorySize. OperationKindMemorySize - // OperationKindMemoryGrow is the OpKind for OperationMemoryGrow. + // OperationKindMemoryGrow is the Kind for NewOperationMemoryGrow. OperationKindMemoryGrow - // OperationKindConstI32 is the OpKind for NewOperationConstI32. + // OperationKindConstI32 is the Kind for NewOperationConstI32. OperationKindConstI32 - // OperationKindConstI64 is the OpKind for NewOperationConstI64. + // OperationKindConstI64 is the Kind for NewOperationConstI64. OperationKindConstI64 - // OperationKindConstF32 is the OpKind for NewOperationConstF32. + // OperationKindConstF32 is the Kind for NewOperationConstF32. OperationKindConstF32 - // OperationKindConstF64 is the OpKind for NewOperationConstF64. + // OperationKindConstF64 is the Kind for NewOperationConstF64. OperationKindConstF64 - // OperationKindEq is the OpKind for OperationEq. + // OperationKindEq is the Kind for NewOperationEq. OperationKindEq - // OperationKindNe is the OpKind for OperationNe. + // OperationKindNe is the Kind for NewOperationNe. OperationKindNe - // OperationKindEqz is the OpKind for OperationEqz. + // OperationKindEqz is the Kind for NewOperationEqz. OperationKindEqz - // OperationKindLt is the OpKind for OperationLt. + // OperationKindLt is the Kind for NewOperationLt. OperationKindLt - // OperationKindGt is the OpKind for OperationGt. + // OperationKindGt is the Kind for NewOperationGt. OperationKindGt - // OperationKindLe is the OpKind for OperationLe. + // OperationKindLe is the Kind for NewOperationLe. OperationKindLe - // OperationKindGe is the OpKind for OperationGe. + // OperationKindGe is the Kind for NewOperationGe. OperationKindGe - // OperationKindAdd is the OpKind for OperationAdd. + // OperationKindAdd is the Kind for NewOperationAdd. OperationKindAdd - // OperationKindSub is the OpKind for OperationSub. + // OperationKindSub is the Kind for NewOperationSub. OperationKindSub - // OperationKindMul is the OpKind for OperationMul. + // OperationKindMul is the Kind for NewOperationMul. OperationKindMul - // OperationKindClz is the OpKind for OperationClz. + // OperationKindClz is the Kind for NewOperationClz. OperationKindClz - // OperationKindCtz is the OpKind for OperationCtz. + // OperationKindCtz is the Kind for NewOperationCtz. OperationKindCtz - // OperationKindPopcnt is the OpKind for OperationPopcnt. + // OperationKindPopcnt is the Kind for NewOperationPopcnt. OperationKindPopcnt - // OperationKindDiv is the OpKind for OperationDiv. + // OperationKindDiv is the Kind for NewOperationDiv. OperationKindDiv - // OperationKindRem is the OpKind for OperationRem. + // OperationKindRem is the Kind for NewOperationRem. OperationKindRem - // OperationKindAnd is the OpKind for OperationAnd. + // OperationKindAnd is the Kind for NewOperationAnd. OperationKindAnd - // OperationKindOr is the OpKind for OperationOr. + // OperationKindOr is the Kind for NewOperationOr. OperationKindOr - // OperationKindXor is the OpKind for OperationXor. + // OperationKindXor is the Kind for NewOperationXor. OperationKindXor - // OperationKindShl is the OpKind for OperationShl. + // OperationKindShl is the Kind for NewOperationShl. OperationKindShl - // OperationKindShr is the OpKind for OperationShr. + // OperationKindShr is the Kind for NewOperationShr. OperationKindShr - // OperationKindRotl is the OpKind for OperationRotl. + // OperationKindRotl is the Kind for NewOperationRotl. OperationKindRotl - // OperationKindRotr is the OpKind for OperationRotr. + // OperationKindRotr is the Kind for NewOperationRotr. OperationKindRotr - // OperationKindAbs is the OpKind for OperationAbs. + // OperationKindAbs is the Kind for NewOperationAbs. OperationKindAbs - // OperationKindNeg is the OpKind for OperationNeg. + // OperationKindNeg is the Kind for NewOperationNeg. OperationKindNeg - // OperationKindCeil is the OpKind for OperationCeil. + // OperationKindCeil is the Kind for NewOperationCeil. OperationKindCeil - // OperationKindFloor is the OpKind for OperationFloor. + // OperationKindFloor is the Kind for NewOperationFloor. OperationKindFloor - // OperationKindTrunc is the OpKind for OperationTrunc. + // OperationKindTrunc is the Kind for NewOperationTrunc. OperationKindTrunc - // OperationKindNearest is the OpKind for OperationNearest. + // OperationKindNearest is the Kind for NewOperationNearest. OperationKindNearest - // OperationKindSqrt is the OpKind for OperationSqrt. + // OperationKindSqrt is the Kind for NewOperationSqrt. OperationKindSqrt - // OperationKindMin is the OpKind for OperationMin. + // OperationKindMin is the Kind for NewOperationMin. OperationKindMin - // OperationKindMax is the OpKind for OperationMax. + // OperationKindMax is the Kind for NewOperationMax. OperationKindMax - // OperationKindCopysign is the OpKind for OperationCopysign. + // OperationKindCopysign is the Kind for NewOperationCopysign. OperationKindCopysign - // OperationKindI32WrapFromI64 is the OpKind for OperationI32WrapFromI64. + // OperationKindI32WrapFromI64 is the Kind for NewOperationI32WrapFromI64. OperationKindI32WrapFromI64 - // OperationKindITruncFromF is the OpKind for OperationITruncFromF. + // OperationKindITruncFromF is the Kind for NewOperationITruncFromF. OperationKindITruncFromF - // OperationKindFConvertFromI is the OpKind for OperationFConvertFromI. + // OperationKindFConvertFromI is the Kind for NewOperationFConvertFromI. OperationKindFConvertFromI - // OperationKindF32DemoteFromF64 is the OpKind for OperationF32DemoteFromF64. + // OperationKindF32DemoteFromF64 is the Kind for NewOperationF32DemoteFromF64. OperationKindF32DemoteFromF64 - // OperationKindF64PromoteFromF32 is the OpKind for OperationF64PromoteFromF32. + // OperationKindF64PromoteFromF32 is the Kind for NewOperationF64PromoteFromF32. OperationKindF64PromoteFromF32 - // OperationKindI32ReinterpretFromF32 is the OpKind for OperationI32ReinterpretFromF32. + // OperationKindI32ReinterpretFromF32 is the Kind for NewOperationI32ReinterpretFromF32. OperationKindI32ReinterpretFromF32 - // OperationKindI64ReinterpretFromF64 is the OpKind for OperationI64ReinterpretFromF64. + // OperationKindI64ReinterpretFromF64 is the Kind for NewOperationI64ReinterpretFromF64. OperationKindI64ReinterpretFromF64 - // OperationKindF32ReinterpretFromI32 is the OpKind for OperationF32ReinterpretFromI32. + // OperationKindF32ReinterpretFromI32 is the Kind for NewOperationF32ReinterpretFromI32. OperationKindF32ReinterpretFromI32 - // OperationKindF64ReinterpretFromI64 is the OpKind for OperationF64ReinterpretFromI64. + // OperationKindF64ReinterpretFromI64 is the Kind for NewOperationF64ReinterpretFromI64. OperationKindF64ReinterpretFromI64 - // OperationKindExtend is the OpKind for OperationExtend. + // OperationKindExtend is the Kind for NewOperationExtend. OperationKindExtend - // OperationKindSignExtend32From8 is the OpKind for OperationSignExtend32From8. + // OperationKindSignExtend32From8 is the Kind for NewOperationSignExtend32From8. OperationKindSignExtend32From8 - // OperationKindSignExtend32From16 is the OpKind for OperationSignExtend32From16. + // OperationKindSignExtend32From16 is the Kind for NewOperationSignExtend32From16. OperationKindSignExtend32From16 - // OperationKindSignExtend64From8 is the OpKind for OperationSignExtend64From8. + // OperationKindSignExtend64From8 is the Kind for NewOperationSignExtend64From8. OperationKindSignExtend64From8 - // OperationKindSignExtend64From16 is the OpKind for OperationSignExtend64From16. + // OperationKindSignExtend64From16 is the Kind for NewOperationSignExtend64From16. OperationKindSignExtend64From16 - // OperationKindSignExtend64From32 is the OpKind for OperationSignExtend64From32. + // OperationKindSignExtend64From32 is the Kind for NewOperationSignExtend64From32. OperationKindSignExtend64From32 - // OperationKindMemoryInit is the OpKind for OperationMemoryInit. + // OperationKindMemoryInit is the Kind for NewOperationMemoryInit. OperationKindMemoryInit - // OperationKindDataDrop is the OpKind for OperationDataDrop. + // OperationKindDataDrop is the Kind for NewOperationDataDrop. OperationKindDataDrop - // OperationKindMemoryCopy is the OpKind for OperationMemoryCopy. + // OperationKindMemoryCopy is the Kind for NewOperationMemoryCopy. OperationKindMemoryCopy - // OperationKindMemoryFill is the OpKind for OperationMemoryFill. + // OperationKindMemoryFill is the Kind for NewOperationMemoryFill. OperationKindMemoryFill - // OperationKindTableInit is the OpKind for OperationTableInit. + // OperationKindTableInit is the Kind for NewOperationTableInit. OperationKindTableInit - // OperationKindElemDrop is the OpKind for OperationElemDrop. + // OperationKindElemDrop is the Kind for NewOperationElemDrop. OperationKindElemDrop - // OperationKindTableCopy is the OpKind for OperationTableCopy. + // OperationKindTableCopy is the Kind for NewOperationTableCopy. OperationKindTableCopy - // OperationKindRefFunc is the OpKind for OperationRefFunc. + // OperationKindRefFunc is the Kind for NewOperationRefFunc. OperationKindRefFunc - // OperationKindTableGet is the OpKind for OperationTableGet. + // OperationKindTableGet is the Kind for NewOperationTableGet. OperationKindTableGet - // OperationKindTableSet is the OpKind for OperationTableSet. + // OperationKindTableSet is the Kind for NewOperationTableSet. OperationKindTableSet - // OperationKindTableSize is the OpKind for OperationTableSize. + // OperationKindTableSize is the Kind for NewOperationTableSize. OperationKindTableSize - // OperationKindTableGrow is the OpKind for OperationTableGrow. + // OperationKindTableGrow is the Kind for NewOperationTableGrow. OperationKindTableGrow - // OperationKindTableFill is the OpKind for OperationTableFill. + // OperationKindTableFill is the Kind for NewOperationTableFill. OperationKindTableFill // Vector value related instructions are prefixed by V128. - // OperationKindV128Const is the OpKind for OperationV128Const. + // OperationKindV128Const is the Kind for NewOperationV128Const. OperationKindV128Const - // OperationKindV128Add is the OpKind for OperationV128Add. + // OperationKindV128Add is the Kind for NewOperationV128Add. OperationKindV128Add - // OperationKindV128Sub is the OpKind for OperationV128Sub. + // OperationKindV128Sub is the Kind for NewOperationV128Sub. OperationKindV128Sub - // OperationKindV128Load is the OpKind for OperationV128Load. + // OperationKindV128Load is the Kind for NewOperationV128Load. OperationKindV128Load - // OperationKindV128LoadLane is the OpKind for OperationV128LoadLane. + // OperationKindV128LoadLane is the Kind for NewOperationV128LoadLane. OperationKindV128LoadLane - // OperationKindV128Store is the OpKind for OperationV128Store. + // OperationKindV128Store is the Kind for NewOperationV128Store. OperationKindV128Store - // OperationKindV128StoreLane is the OpKind for OperationV128StoreLane. + // OperationKindV128StoreLane is the Kind for NewOperationV128StoreLane. OperationKindV128StoreLane - // OperationKindV128ExtractLane is the OpKind for OperationV128ExtractLane. + // OperationKindV128ExtractLane is the Kind for NewOperationV128ExtractLane. OperationKindV128ExtractLane - // OperationKindV128ReplaceLane is the OpKind for OperationV128ReplaceLane. + // OperationKindV128ReplaceLane is the Kind for NewOperationV128ReplaceLane. OperationKindV128ReplaceLane - // OperationKindV128Splat is the OpKind for OperationV128Splat. + // OperationKindV128Splat is the Kind for NewOperationV128Splat. OperationKindV128Splat - // OperationKindV128Shuffle is the OpKind for OperationV128Shuffle. + // OperationKindV128Shuffle is the Kind for NewOperationV128Shuffle. OperationKindV128Shuffle - // OperationKindV128Swizzle is the OpKind for OperationV128Swizzle. + // OperationKindV128Swizzle is the Kind for NewOperationV128Swizzle. OperationKindV128Swizzle - // OperationKindV128AnyTrue is the OpKind for OperationV128AnyTrue. + // OperationKindV128AnyTrue is the Kind for NewOperationV128AnyTrue. OperationKindV128AnyTrue - // OperationKindV128AllTrue is the OpKind for OperationV128AllTrue. + // OperationKindV128AllTrue is the Kind for NewOperationV128AllTrue. OperationKindV128AllTrue - // OperationKindV128BitMask is the OpKind for OperationV128BitMask. + // OperationKindV128BitMask is the Kind for NewOperationV128BitMask. OperationKindV128BitMask - // OperationKindV128And is the OpKind for OperationV128And. + // OperationKindV128And is the Kind for NewOperationV128And. OperationKindV128And - // OperationKindV128Not is the OpKind for OperationV128Not. + // OperationKindV128Not is the Kind for NewOperationV128Not. OperationKindV128Not - // OperationKindV128Or is the OpKind for OperationV128Or. + // OperationKindV128Or is the Kind for NewOperationV128Or. OperationKindV128Or - // OperationKindV128Xor is the OpKind for OperationV128Xor. + // OperationKindV128Xor is the Kind for NewOperationV128Xor. OperationKindV128Xor - // OperationKindV128Bitselect is the OpKind for OperationV128Bitselect. + // OperationKindV128Bitselect is the Kind for NewOperationV128Bitselect. OperationKindV128Bitselect - // OperationKindV128AndNot is the OpKind for OperationV128AndNot. + // OperationKindV128AndNot is the Kind for NewOperationV128AndNot. OperationKindV128AndNot - // OperationKindV128Shl is the OpKind for OperationV128Shl. + // OperationKindV128Shl is the Kind for NewOperationV128Shl. OperationKindV128Shl - // OperationKindV128Shr is the OpKind for OperationV128Shr. + // OperationKindV128Shr is the Kind for NewOperationV128Shr. OperationKindV128Shr - // OperationKindV128Cmp is the OpKind for OperationV128Cmp. + // OperationKindV128Cmp is the Kind for NewOperationV128Cmp. OperationKindV128Cmp - // OperationKindV128AddSat is the OpKind for OperationV128AddSat. + // OperationKindV128AddSat is the Kind for NewOperationV128AddSat. OperationKindV128AddSat - // OperationKindV128SubSat is the OpKind for OperationV128SubSat. + // OperationKindV128SubSat is the Kind for NewOperationV128SubSat. OperationKindV128SubSat - // OperationKindV128Mul is the OpKind for OperationV128Mul. + // OperationKindV128Mul is the Kind for NewOperationV128Mul. OperationKindV128Mul - // OperationKindV128Div is the OpKind for OperationV128Div. + // OperationKindV128Div is the Kind for NewOperationV128Div. OperationKindV128Div - // OperationKindV128Neg is the OpKind for OperationV128Neg. + // OperationKindV128Neg is the Kind for NewOperationV128Neg. OperationKindV128Neg - // OperationKindV128Sqrt is the OpKind for OperationV128Sqrt. + // OperationKindV128Sqrt is the Kind for NewOperationV128Sqrt. OperationKindV128Sqrt - // OperationKindV128Abs is the OpKind for OperationV128Abs. + // OperationKindV128Abs is the Kind for NewOperationV128Abs. OperationKindV128Abs - // OperationKindV128Popcnt is the OpKind for OperationV128Popcnt. + // OperationKindV128Popcnt is the Kind for NewOperationV128Popcnt. OperationKindV128Popcnt - // OperationKindV128Min is the OpKind for OperationV128Min. + // OperationKindV128Min is the Kind for NewOperationV128Min. OperationKindV128Min - // OperationKindV128Max is the OpKind for OperationV128Max. + // OperationKindV128Max is the Kind for NewOperationV128Max. OperationKindV128Max - // OperationKindV128AvgrU is the OpKind for OperationV128AvgrU. + // OperationKindV128AvgrU is the Kind for NewOperationV128AvgrU. OperationKindV128AvgrU - // OperationKindV128Pmin is the OpKind for OperationV128Pmin. + // OperationKindV128Pmin is the Kind for NewOperationV128Pmin. OperationKindV128Pmin - // OperationKindV128Pmax is the OpKind for OperationV128Pmax. + // OperationKindV128Pmax is the Kind for NewOperationV128Pmax. OperationKindV128Pmax - // OperationKindV128Ceil is the OpKind for OperationV128Ceil. + // OperationKindV128Ceil is the Kind for NewOperationV128Ceil. OperationKindV128Ceil - // OperationKindV128Floor is the OpKind for OperationV128Floor. + // OperationKindV128Floor is the Kind for NewOperationV128Floor. OperationKindV128Floor - // OperationKindV128Trunc is the OpKind for OperationV128Trunc. + // OperationKindV128Trunc is the Kind for NewOperationV128Trunc. OperationKindV128Trunc - // OperationKindV128Nearest is the OpKind for OperationV128Nearest. + // OperationKindV128Nearest is the Kind for NewOperationV128Nearest. OperationKindV128Nearest - // OperationKindV128Extend is the OpKind for OperationV128Extend. + // OperationKindV128Extend is the Kind for NewOperationV128Extend. OperationKindV128Extend - // OperationKindV128ExtMul is the OpKind for OperationV128ExtMul. + // OperationKindV128ExtMul is the Kind for NewOperationV128ExtMul. OperationKindV128ExtMul - // OperationKindV128Q15mulrSatS is the OpKind for OperationV128Q15mulrSatS. + // OperationKindV128Q15mulrSatS is the Kind for NewOperationV128Q15mulrSatS. OperationKindV128Q15mulrSatS - // OperationKindV128ExtAddPairwise is the OpKind for OperationV128ExtAddPairwise. + // OperationKindV128ExtAddPairwise is the Kind for NewOperationV128ExtAddPairwise. OperationKindV128ExtAddPairwise - // OperationKindV128FloatPromote is the OpKind for OperationV128FloatPromote. + // OperationKindV128FloatPromote is the Kind for NewOperationV128FloatPromote. OperationKindV128FloatPromote - // OperationKindV128FloatDemote is the OpKind for OperationV128FloatDemote. + // OperationKindV128FloatDemote is the Kind for NewOperationV128FloatDemote. OperationKindV128FloatDemote - // OperationKindV128FConvertFromI is the OpKind for OperationV128FConvertFromI. + // OperationKindV128FConvertFromI is the Kind for NewOperationV128FConvertFromI. OperationKindV128FConvertFromI - // OperationKindV128Dot is the OpKind for OperationV128Dot. + // OperationKindV128Dot is the Kind for NewOperationV128Dot. OperationKindV128Dot - // OperationKindV128Narrow is the OpKind for OperationV128Narrow. + // OperationKindV128Narrow is the Kind for NewOperationV128Narrow. OperationKindV128Narrow - // OperationKindV128ITruncSatFromF is the OpKind for OperationV128ITruncSatFromF. + // OperationKindV128ITruncSatFromF is the Kind for NewOperationV128ITruncSatFromF. OperationKindV128ITruncSatFromF - // OperationKindBuiltinFunctionCheckExitCode is the OpKind for OperationBuiltinFunctionCheckExitCode. + // OperationKindBuiltinFunctionCheckExitCode is the Kind for NewOperationBuiltinFunctionCheckExitCode. OperationKindBuiltinFunctionCheckExitCode // operationKindEnd is always placed at the bottom of this iota definition to be used in the test. operationKindEnd ) -var ( - _ Operation = OperationLabel{} - _ Operation = OperationBr{} - _ Operation = OperationBrIf{} - _ Operation = OperationBrTable{} - _ Operation = OperationDrop{} - _ Operation = OperationITruncFromF{} - _ Operation = OperationFConvertFromI{} - _ Operation = OperationExtend{} - _ Operation = OperationMemoryInit{} - _ Operation = OperationDataDrop{} - _ Operation = OperationTableInit{} - _ Operation = OperationElemDrop{} - _ Operation = OperationTableCopy{} - _ Operation = OperationRefFunc{} - _ Operation = OperationTableGet{} - _ Operation = OperationTableSet{} - _ Operation = OperationTableSize{} - _ Operation = OperationTableGrow{} - _ Operation = OperationTableFill{} - _ Operation = OperationV128Const{} - _ Operation = OperationV128Add{} - _ Operation = OperationV128Sub{} - _ Operation = OperationV128Load{} - _ Operation = OperationV128LoadLane{} - _ Operation = OperationV128Store{} - _ Operation = OperationV128StoreLane{} - _ Operation = OperationV128ExtractLane{} - _ Operation = OperationV128ReplaceLane{} - _ Operation = OperationV128Splat{} - _ Operation = OperationV128Shuffle{} - _ Operation = OperationV128Swizzle{} - _ Operation = OperationV128AnyTrue{} - _ Operation = OperationV128AllTrue{} - _ Operation = OperationV128BitMask{} - _ Operation = OperationV128And{} - _ Operation = OperationV128Not{} - _ Operation = OperationV128Or{} - _ Operation = OperationV128Xor{} - _ Operation = OperationV128Bitselect{} - _ Operation = OperationV128AndNot{} - _ Operation = OperationV128Shl{} - _ Operation = OperationV128Shr{} - _ Operation = OperationV128Cmp{} - _ Operation = OperationV128AddSat{} - _ Operation = OperationV128SubSat{} - _ Operation = OperationV128Mul{} - _ Operation = OperationV128Div{} - _ Operation = OperationV128Neg{} - _ Operation = OperationV128Sqrt{} - _ Operation = OperationV128Abs{} - _ Operation = OperationV128Popcnt{} - _ Operation = OperationV128Min{} - _ Operation = OperationV128Max{} - _ Operation = OperationV128AvgrU{} - _ Operation = OperationV128Pmin{} - _ Operation = OperationV128Pmax{} - _ Operation = OperationV128Ceil{} - _ Operation = OperationV128Floor{} - _ Operation = OperationV128Trunc{} - _ Operation = OperationV128Nearest{} - _ Operation = OperationV128Extend{} - _ Operation = OperationV128ExtMul{} - _ Operation = OperationV128Q15mulrSatS{} - _ Operation = OperationV128ExtAddPairwise{} - _ Operation = OperationV128FloatPromote{} - _ Operation = OperationV128FloatDemote{} - _ Operation = OperationV128FConvertFromI{} - _ Operation = OperationV128Dot{} - _ Operation = OperationV128Narrow{} - _ Operation = OperationV128ITruncSatFromF{} -) - // NewOperationBuiltinFunctionCheckExitCode is a constructor for UnionOperation with Kind OperationKindBuiltinFunctionCheckExitCode. // // OperationBuiltinFunctionCheckExitCode corresponds to the instruction to check the api.Module is already closed due to // context.DeadlineExceeded, context.Canceled, or the explicit call of CloseWithExitCode on api.Module. func NewOperationBuiltinFunctionCheckExitCode() UnionOperation { - return UnionOperation{OpKind: OperationKindBuiltinFunctionCheckExitCode} + return UnionOperation{Kind: OperationKindBuiltinFunctionCheckExitCode} } -// Label is the label of each block in wazeroir where "block" consists of multiple operations, -// and must end with branching operations (e.g. OperationBr or OperationBrIf). -type Label struct { - FrameID uint32 - Kind LabelKind -} +// Label is the unique identifier for each block in a single function in wazeroir +// where "block" consists of multiple operations, and must end with branching operations +// (e.g. OperationKindBr or OperationKindBrIf). +type Label uint64 -// LabelID is the unique identifiers for blocks in a single function. -type LabelID uint64 - -// Kind returns the LabelKind encoded in this LabelID. -func (l LabelID) Kind() LabelKind { +// Kind returns the LabelKind encoded in this Label. +func (l Label) Kind() LabelKind { return LabelKind(uint32(l)) } -// FrameID returns the frame id encoded in this LabelID. -func (l LabelID) FrameID() int { +// FrameID returns the frame id encoded in this Label. +func (l Label) FrameID() int { return int(uint32(l >> 32)) } -// ID returns the LabelID for this Label. -func (l Label) ID() (id LabelID) { - id = LabelID(l.Kind) | LabelID(l.FrameID)<<32 - return +// NewLabel is a constructor for a Label. +func NewLabel(kind LabelKind, frameID uint32) Label { + return Label(kind) | Label(frameID)<<32 } // String implements fmt.Stringer. func (l Label) String() (ret string) { - switch l.Kind { + frameID := l.FrameID() + switch l.Kind() { case LabelKindHeader: - ret = fmt.Sprintf(".L%d", l.FrameID) + ret = fmt.Sprintf(".L%d", frameID) case LabelKindElse: - ret = fmt.Sprintf(".L%d_else", l.FrameID) + ret = fmt.Sprintf(".L%d_else", frameID) case LabelKindContinuation: - ret = fmt.Sprintf(".L%d_cont", l.FrameID) + ret = fmt.Sprintf(".L%d_cont", frameID) case LabelKindReturn: return ".return" } @@ -839,19 +754,19 @@ func (l Label) String() (ret string) { } func (l Label) IsReturnTarget() bool { - return l.Kind == LabelKindReturn + return l.Kind() == LabelKindReturn } -// LabelKind is the OpKind of the label. +// LabelKind is the Kind of the label. type LabelKind = byte const ( // LabelKindHeader is the header for various blocks. For example, the "then" block of - // wasm.OpcodeIfName in Wasm has the label of this OpKind. + // wasm.OpcodeIfName in Wasm has the label of this Kind. LabelKindHeader LabelKind = iota - // LabelKindElse is the OpKind of label for "else" block of wasm.OpcodeIfName in Wasm. + // LabelKindElse is the Kind of label for "else" block of wasm.OpcodeIfName in Wasm. LabelKindElse - // LabelKindContinuation is the OpKind of label which is the continuation of blocks. + // LabelKindContinuation is the Kind of label which is the continuation of blocks. // For example, for wasm text like // (func // .... @@ -888,13 +803,13 @@ func (b BranchTargetDrop) String() (ret string) { // UnionOperation implements Operation and is the compilation (engine.lowerIR) result of a wazeroir.Operation. // // Not all operations result in a UnionOperation, e.g. wazeroir.OperationI32ReinterpretFromF32, and some operations are -// more complex than others, e.g. wazeroir.OperationBrTable. +// more complex than others, e.g. wazeroir.NewOperationBrTable. // // Note: This is a form of union type as it can store fields needed for any operation. Hence, most fields are opaque and // only relevant when in context of its kind. type UnionOperation struct { - // OpKind determines how to interpret the other fields in this struct. - OpKind OperationKind + // Kind determines how to interpret the other fields in this struct. + Kind OperationKind B1, B2 byte B3 bool U1, U2 uint64 @@ -905,7 +820,7 @@ type UnionOperation struct { // String implements fmt.Stringer. func (o UnionOperation) String() string { - switch o.OpKind { + switch o.Kind { case OperationKindUnreachable, OperationKindSelect, OperationKindMemorySize, @@ -922,33 +837,80 @@ func (o UnionOperation) String() string { OperationKindSignExtend64From8, OperationKindSignExtend64From16, OperationKindSignExtend64From32, + OperationKindMemoryInit, + OperationKindDataDrop, OperationKindMemoryCopy, OperationKindMemoryFill, + OperationKindTableInit, + OperationKindElemDrop, + OperationKindTableCopy, + OperationKindRefFunc, + OperationKindTableGet, + OperationKindTableSet, + OperationKindTableSize, + OperationKindTableGrow, + OperationKindTableFill, OperationKindBuiltinFunctionCheckExitCode: - return o.Kind().String() + return o.Kind.String() case OperationKindCall, OperationKindGlobalGet, OperationKindGlobalSet: - return fmt.Sprintf("%s %d", o.Kind(), o.B1) + return fmt.Sprintf("%s %d", o.Kind, o.B1) + + case OperationKindLabel: + return Label(o.U1).String() + + case OperationKindBr: + return fmt.Sprintf("%s %s", o.Kind, Label(o.U1).String()) + + case OperationKindBrIf: + var thenTarget Label + var elseTarget Label + if len(o.Us) > 0 { + thenTarget = Label(o.Us[0]) + elseTarget = Label(o.Us[1]) + } + return fmt.Sprintf("%s %s, %s", o.Kind, thenTarget, elseTarget) + + case OperationKindBrTable: + var targets []string + var defaultLabel Label + if len(o.Us) > 0 { + targets = make([]string, len(o.Us)-1) + for i, t := range o.Us[1:] { + targets[i] = Label(t).String() + } + defaultLabel = Label(o.Us[0]) + } + return fmt.Sprintf("%s [%s] %s", o.Kind, strings.Join(targets, ","), defaultLabel) case OperationKindCallIndirect: - return fmt.Sprintf("%s: type=%d, table=%d", o.Kind(), o.U1, o.U2) + return fmt.Sprintf("%s: type=%d, table=%d", o.Kind, o.U1, o.U2) + + case OperationKindDrop: + start := -1 + end := -1 + if len(o.Rs) > 0 { + start = o.Rs[0].Start + end = o.Rs[0].End + } + return fmt.Sprintf("%s %d..%d", o.Kind, start, end) case OperationKindPick, OperationKindSet: - return fmt.Sprintf("%s %d (is_vector=%v)", o.Kind(), o.U1, o.B3) + return fmt.Sprintf("%s %d (is_vector=%v)", o.Kind, o.U1, o.B3) case OperationKindLoad, OperationKindStore: - return fmt.Sprintf("%s.%s (align=%d, offset=%d)", UnsignedType(o.B1), o.Kind(), o.U1, o.U2) + return fmt.Sprintf("%s.%s (align=%d, offset=%d)", UnsignedType(o.B1), o.Kind, o.U1, o.U2) case OperationKindLoad8, OperationKindLoad16: - return fmt.Sprintf("%s.%s (align=%d, offset=%d)", SignedType(o.B1), o.Kind(), o.U1, o.U2) + return fmt.Sprintf("%s.%s (align=%d, offset=%d)", SignedType(o.B1), o.Kind, o.U1, o.U2) case OperationKindStore8, OperationKindStore16, OperationKindStore32: - return fmt.Sprintf("%s (align=%d, offset=%d)", o.Kind(), o.U1, o.U2) + return fmt.Sprintf("%s (align=%d, offset=%d)", o.Kind, o.U1, o.U2) case OperationKindLoad32: var t string @@ -957,14 +919,14 @@ func (o UnionOperation) String() string { } else { t = "u64" } - return fmt.Sprintf("%s.%s (align=%d, offset=%d)", t, o.Kind(), o.U1, o.U2) + return fmt.Sprintf("%s.%s (align=%d, offset=%d)", t, o.Kind, o.U1, o.U2) case OperationKindEq, OperationKindNe, OperationKindAdd, OperationKindSub, OperationKindMul: - return fmt.Sprintf("%s.%s", UnsignedType(o.B1), o.Kind()) + return fmt.Sprintf("%s.%s", UnsignedType(o.B1), o.Kind) case OperationKindEqz, OperationKindClz, @@ -976,17 +938,17 @@ func (o UnionOperation) String() string { OperationKindShl, OperationKindRotl, OperationKindRotr: - return fmt.Sprintf("%s.%s", UnsignedInt(o.B1), o.Kind()) + return fmt.Sprintf("%s.%s", UnsignedInt(o.B1), o.Kind) case OperationKindRem, OperationKindShr: - return fmt.Sprintf("%s.%s", SignedInt(o.B1), o.Kind()) + return fmt.Sprintf("%s.%s", SignedInt(o.B1), o.Kind) case OperationKindLt, OperationKindGt, OperationKindLe, OperationKindGe, OperationKindDiv: - return fmt.Sprintf("%s.%s", SignedType(o.B1), o.Kind()) + return fmt.Sprintf("%s.%s", SignedType(o.B1), o.Kind) case OperationKindAbs, OperationKindNeg, @@ -998,119 +960,161 @@ func (o UnionOperation) String() string { OperationKindMin, OperationKindMax, OperationKindCopysign: - return fmt.Sprintf("%s.%s", Float(o.B1), o.Kind()) + return fmt.Sprintf("%s.%s", Float(o.B1), o.Kind) case OperationKindConstI32, OperationKindConstI64: - return fmt.Sprintf("%s %#x", o.Kind(), o.U1) + return fmt.Sprintf("%s %#x", o.Kind, o.U1) case OperationKindConstF32: - return fmt.Sprintf("%s %f", o.Kind(), math.Float32frombits(uint32(o.U1))) + return fmt.Sprintf("%s %f", o.Kind, math.Float32frombits(uint32(o.U1))) case OperationKindConstF64: - return fmt.Sprintf("%s %f", o.Kind(), math.Float64frombits(o.U1)) + return fmt.Sprintf("%s %f", o.Kind, math.Float64frombits(o.U1)) + + case OperationKindITruncFromF: + return fmt.Sprintf("%s.%s.%s (non_trapping=%v)", SignedInt(o.B2), o.Kind, Float(o.B1), o.B3) + case OperationKindFConvertFromI: + return fmt.Sprintf("%s.%s.%s", Float(o.B2), o.Kind, SignedInt(o.B1)) + case OperationKindExtend: + var in, out string + if o.B3 { + in = "i32" + out = "i64" + } else { + in = "u32" + out = "u64" + } + return fmt.Sprintf("%s.%s.%s", out, o.Kind, in) + + case OperationKindV128Const: + return fmt.Sprintf("%s [%#x, %#x]", o.Kind, o.U1, o.U2) + case OperationKindV128Add, + OperationKindV128Sub: + return fmt.Sprintf("%s (shape=%s)", o.Kind, shapeName(o.B1)) + case OperationKindV128Load, + OperationKindV128LoadLane, + OperationKindV128Store, + OperationKindV128StoreLane, + OperationKindV128ExtractLane, + OperationKindV128ReplaceLane, + OperationKindV128Splat, + OperationKindV128Shuffle, + OperationKindV128Swizzle, + OperationKindV128AnyTrue, + OperationKindV128AllTrue, + OperationKindV128BitMask, + OperationKindV128And, + OperationKindV128Not, + OperationKindV128Or, + OperationKindV128Xor, + OperationKindV128Bitselect, + OperationKindV128AndNot, + OperationKindV128Shl, + OperationKindV128Shr, + OperationKindV128Cmp, + OperationKindV128AddSat, + OperationKindV128SubSat, + OperationKindV128Mul, + OperationKindV128Div, + OperationKindV128Neg, + OperationKindV128Sqrt, + OperationKindV128Abs, + OperationKindV128Popcnt, + OperationKindV128Min, + OperationKindV128Max, + OperationKindV128AvgrU, + OperationKindV128Pmin, + OperationKindV128Pmax, + OperationKindV128Ceil, + OperationKindV128Floor, + OperationKindV128Trunc, + OperationKindV128Nearest, + OperationKindV128Extend, + OperationKindV128ExtMul, + OperationKindV128Q15mulrSatS, + OperationKindV128ExtAddPairwise, + OperationKindV128FloatPromote, + OperationKindV128FloatDemote, + OperationKindV128FConvertFromI, + OperationKindV128Dot, + OperationKindV128Narrow: + return o.Kind.String() + + case OperationKindV128ITruncSatFromF: + if o.B3 { + return fmt.Sprintf("%s.%sS", o.Kind, shapeName(o.B1)) + } else { + return fmt.Sprintf("%s.%sU", o.Kind, shapeName(o.B1)) + } + default: - panic(fmt.Sprintf("TODO: %v", o.OpKind)) + panic(fmt.Sprintf("TODO: %v", o.Kind)) } } -// Kind implements Operation.Kind -func (o UnionOperation) Kind() OperationKind { - return o.OpKind -} - -// NewOperationUnreachable is a constructor for UnionOperation with Kind OperationKindUnreachable +// NewOperationUnreachable is a constructor for UnionOperation with OperationKindUnreachable // // This corresponds to wasm.OpcodeUnreachable. // // The engines are expected to exit the execution with wasmruntime.ErrRuntimeUnreachable error. func NewOperationUnreachable() UnionOperation { - return UnionOperation{OpKind: OperationKindUnreachable} + return UnionOperation{Kind: OperationKindUnreachable} } -// OperationLabel implements Operation. +// NewOperationLabel is a constructor for UnionOperation with OperationKindLabel. // // This is used to inform the engines of the beginning of a label. -type OperationLabel struct { - Label Label +func NewOperationLabel(label Label) UnionOperation { + return UnionOperation{Kind: OperationKindLabel, U1: uint64(label)} } -// String implements fmt.Stringer. -func (o OperationLabel) String() string { return o.Label.String() } - -// Kind implements Operation.Kind -func (OperationLabel) Kind() OperationKind { - return OperationKindLabel -} - -// OperationBr implements Operation. +// NewOperationBr is a constructor for UnionOperation with OperationKindBr. // -// The engines are expected to branch into OperationBr.Target label. -type OperationBr struct { - Target Label +// The engines are expected to branch into U1 label. +func NewOperationBr(target Label) UnionOperation { + return UnionOperation{Kind: OperationKindBr, U1: uint64(target)} } -// String implements fmt.Stringer. -func (o OperationBr) String() string { return fmt.Sprintf("%s %s", o.Kind(), o.Target.String()) } - -// Kind implements Operation.Kind -func (OperationBr) Kind() OperationKind { - return OperationKindBr -} - -// OperationBrIf implements Operation. +// NewOperationBrIf is a constructor for UnionOperation with OperationKindBrIf. // -// The engines are expected to pop a value and branch into OperationBrIf.Then label if the value equals 1. -// Otherwise, the code branches into OperationBrIf.Else label. -type OperationBrIf struct { - Then, Else BranchTargetDrop +// The engines are expected to pop a value and branch into Us[0] label if the value equals 1. +// Otherwise, the code branches into Us[1] label. +func NewOperationBrIf(thenTarget, elseTarget BranchTargetDrop) UnionOperation { + return UnionOperation{ + Kind: OperationKindBrIf, + Us: []uint64{uint64(thenTarget.Target), uint64(elseTarget.Target)}, + Rs: []*InclusiveRange{thenTarget.ToDrop, elseTarget.ToDrop}, + } } -// String implements fmt.Stringer. -func (o OperationBrIf) String() string { return fmt.Sprintf("%s %s, %s", o.Kind(), o.Then, o.Else) } - -// Kind implements Operation.Kind -func (OperationBrIf) Kind() OperationKind { - return OperationKindBrIf -} - -// OperationBrTable implements Operation. +// NewOperationBrTable is a constructor for UnionOperation with OperationKindBrTable. // // This corresponds to wasm.OpcodeBrTableName except that the label // here means the wazeroir level, not the ones of Wasm. // -// The engines are expected to do the br_table operation base on the -// OperationBrTable.Default and OperationBrTable.Targets. More precisely, -// this pops a value from the stack (called "index") and decide which branch we go into next -// based on the value. +// The engines are expected to do the br_table operation base on the default (Us[0], Rs[0]) and +// targets (Us[1:], Rs[1:]). More precisely, this pops a value from the stack (called "index") +// and decides which branch we go into next based on the value. // // For example, assume we have operations like {default: L_DEFAULT, targets: [L0, L1, L2]}. // If "index" >= len(defaults), then branch into the L_DEFAULT label. // Otherwise, we enter label of targets[index]. -type OperationBrTable struct { - Targets []*BranchTargetDrop - Default *BranchTargetDrop -} - -// String implements fmt.Stringer. -func (o OperationBrTable) String() string { - targets := make([]string, len(o.Targets)) - for i, t := range o.Targets { - targets[i] = t.String() +// +// targetRanges must be the same length of targetLabels, padded with `nil`s if necessary +func NewOperationBrTable(targetLabels []uint64, targetRanges []*InclusiveRange) UnionOperation { + return UnionOperation{ + Kind: OperationKindBrTable, + Us: targetLabels, + Rs: targetRanges, } - return fmt.Sprintf("%s [%s] %s", o.Kind(), strings.Join(targets, ","), o.Default) } -// Kind implements Operation.Kind -func (OperationBrTable) Kind() OperationKind { - return OperationKindBrTable -} - -// NewOperationCall is a constructor for UnionOperation with Kind OperationKindCall. +// NewOperationCall is a constructor for UnionOperation with OperationKindCall. // // This corresponds to wasm.OpcodeCallName, and engines are expected to // enter into a function whose index equals OperationCall.FunctionIndex. func NewOperationCall(functionIndex uint32) UnionOperation { - return UnionOperation{OpKind: OperationKindCall, U1: uint64(functionIndex)} + return UnionOperation{Kind: OperationKindCall, U1: uint64(functionIndex)} } // NewOperationCallIndirect implements Operation. @@ -1126,7 +1130,7 @@ func NewOperationCall(functionIndex uint32) UnionOperation { // 1) whether "offset" exceeds the length of table Tables[OperationCallIndirect.TableIndex]. // 2) whether the type of the function table[offset] matches the function type specified by OperationCallIndirect.TypeIndex. func NewOperationCallIndirect(typeIndex, tableIndex uint32) UnionOperation { - return UnionOperation{OpKind: OperationKindCallIndirect, U1: uint64(typeIndex), U2: uint64(tableIndex)} + return UnionOperation{Kind: OperationKindCallIndirect, U1: uint64(typeIndex), U2: uint64(tableIndex)} } // InclusiveRange is the range which spans across the value stack starting from the top to the bottom, and @@ -1135,26 +1139,17 @@ type InclusiveRange struct { Start, End int } -// OperationDrop implements Operation. +// NewOperationDrop is a constructor for UnionOperation with OperationKindDrop. // -// The engines are expected to discard the values selected by OperationDrop.Depth which +// The engines are expected to discard the values selected by NewOperationDrop.Depth which // starts from the top of the stack to the bottom. -type OperationDrop struct { - // Depths spans across the uint64 value stack at runtime to be dropped by this operation. - Depth *InclusiveRange +// +// depth spans across the uint64 value stack at runtime to be dropped by this operation. +func NewOperationDrop(depth *InclusiveRange) UnionOperation { + return UnionOperation{Kind: OperationKindDrop, Rs: []*InclusiveRange{depth}} } -// String implements fmt.Stringer. -func (o OperationDrop) String() string { - return fmt.Sprintf("%s %d..%d", o.Kind(), o.Depth.Start, o.Depth.End) -} - -// Kind implements Operation.Kind -func (OperationDrop) Kind() OperationKind { - return OperationKindDrop -} - -// NewOperationSelect is a constructor for UnionOperation with Kind OperationKindSelect. +// NewOperationSelect is a constructor for UnionOperation with OperationKindSelect. // // This corresponds to wasm.OpcodeSelect. // @@ -1163,10 +1158,10 @@ func (OperationDrop) Kind() OperationKind { // // isTargetVector true if the selection target value's type is wasm.ValueTypeV128. func NewOperationSelect(isTargetVector bool) UnionOperation { - return UnionOperation{OpKind: OperationKindSelect, B3: isTargetVector} + return UnionOperation{Kind: OperationKindSelect, B3: isTargetVector} } -// NewOperationPick is a constructor for UnionOperation with Kind OperationKindPick. +// NewOperationPick is a constructor for UnionOperation with OperationKindPick. // // The engines are expected to copy a value pointed by depth, and push the // copied value onto the top of the stack. @@ -1174,10 +1169,10 @@ func NewOperationSelect(isTargetVector bool) UnionOperation { // depth is the location of the pick target in the uint64 value stack at runtime. // If isTargetVector=true, this points to the location of the lower 64-bits of the vector. func NewOperationPick(depth int, isTargetVector bool) UnionOperation { - return UnionOperation{OpKind: OperationKindPick, U1: uint64(depth), B3: isTargetVector} + return UnionOperation{Kind: OperationKindPick, U1: uint64(depth), B3: isTargetVector} } -// NewOperationSet is a constructor for UnionOperation with Kind OperationKindSet. +// NewOperationSet is a constructor for UnionOperation with OperationKindSet. // // The engines are expected to set the top value of the stack to the location specified by // depth. @@ -1185,27 +1180,27 @@ func NewOperationPick(depth int, isTargetVector bool) UnionOperation { // depth is the location of the set target in the uint64 value stack at runtime. // If isTargetVector=true, this points the location of the lower 64-bits of the vector. func NewOperationSet(depth int, isTargetVector bool) UnionOperation { - return UnionOperation{OpKind: OperationKindSet, U1: uint64(depth), B3: isTargetVector} + return UnionOperation{Kind: OperationKindSet, U1: uint64(depth), B3: isTargetVector} } -// NewOperationGlobalGet is a constructor for UnionOperation with Kind OperationKindGlobalGet. +// NewOperationGlobalGet is a constructor for UnionOperation with OperationKindGlobalGet. // // The engines are expected to read the global value specified by OperationGlobalGet.Index, // and push the copy of the value onto the stack. // // See wasm.OpcodeGlobalGet. func NewOperationGlobalGet(index uint32) UnionOperation { - return UnionOperation{OpKind: OperationKindGlobalGet, U1: uint64(index)} + return UnionOperation{Kind: OperationKindGlobalGet, U1: uint64(index)} } -// NewOperationGlobalSet is a constructor for UnionOperation with Kind OperationKindGlobalSet. +// NewOperationGlobalSet is a constructor for UnionOperation with OperationKindGlobalSet. // // The engines are expected to consume the value from the top of the stack, // and write the value into the global specified by OperationGlobalSet.Index. // // See wasm.OpcodeGlobalSet. func NewOperationGlobalSet(index uint32) UnionOperation { - return UnionOperation{OpKind: OperationKindGlobalSet, U1: uint64(index)} + return UnionOperation{Kind: OperationKindGlobalSet, U1: uint64(index)} } // MemoryArg is the "memarg" to all memory instructions. @@ -1223,37 +1218,37 @@ type MemoryArg struct { Offset uint32 } -// NewOperationLoad is a constructor for UnionOperation with Kind OperationKindLoad. +// NewOperationLoad is a constructor for UnionOperation with OperationKindLoad. // // This corresponds to wasm.OpcodeI32LoadName wasm.OpcodeI64LoadName wasm.OpcodeF32LoadName and wasm.OpcodeF64LoadName. // // The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary, // otherwise load the corresponding value following the semantics of the corresponding WebAssembly instruction. func NewOperationLoad(unsignedType UnsignedType, arg MemoryArg) UnionOperation { - return UnionOperation{OpKind: OperationKindLoad, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} + return UnionOperation{Kind: OperationKindLoad, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} } -// NewOperationLoad8 is a constructor for UnionOperation with Kind OperationKindLoad8. +// NewOperationLoad8 is a constructor for UnionOperation with OperationKindLoad8. // // This corresponds to wasm.OpcodeI32Load8SName wasm.OpcodeI32Load8UName wasm.OpcodeI64Load8SName wasm.OpcodeI64Load8UName. // // The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary, // otherwise load the corresponding value following the semantics of the corresponding WebAssembly instruction. func NewOperationLoad8(signedInt SignedInt, arg MemoryArg) UnionOperation { - return UnionOperation{OpKind: OperationKindLoad8, B1: byte(signedInt), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} + return UnionOperation{Kind: OperationKindLoad8, B1: byte(signedInt), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} } -// NewOperationLoad16 is a constructor for UnionOperation with Kind OperationKindLoad16. +// NewOperationLoad16 is a constructor for UnionOperation with OperationKindLoad16. // // This corresponds to wasm.OpcodeI32Load16SName wasm.OpcodeI32Load16UName wasm.OpcodeI64Load16SName wasm.OpcodeI64Load16UName. // // The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary, // otherwise load the corresponding value following the semantics of the corresponding WebAssembly instruction. func NewOperationLoad16(signedInt SignedInt, arg MemoryArg) UnionOperation { - return UnionOperation{OpKind: OperationKindLoad16, B1: byte(signedInt), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} + return UnionOperation{Kind: OperationKindLoad16, B1: byte(signedInt), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} } -// NewOperationLoad32 is a constructor for UnionOperation with Kind OperationKindLoad32. +// NewOperationLoad32 is a constructor for UnionOperation with OperationKindLoad32. // // This corresponds to wasm.OpcodeI64Load32SName wasm.OpcodeI64Load32UName. // @@ -1264,59 +1259,59 @@ func NewOperationLoad32(signed bool, arg MemoryArg) UnionOperation { if signed { sigB = 1 } - return UnionOperation{OpKind: OperationKindLoad32, B1: sigB, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} + return UnionOperation{Kind: OperationKindLoad32, B1: sigB, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} } -// NewOperationStore is a constructor for UnionOperation with Kind OperationKindStore. +// NewOperationStore is a constructor for UnionOperation with OperationKindStore. // // # This corresponds to wasm.OpcodeI32StoreName wasm.OpcodeI64StoreName wasm.OpcodeF32StoreName wasm.OpcodeF64StoreName // // The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary, // otherwise store the corresponding value following the semantics of the corresponding WebAssembly instruction. func NewOperationStore(unsignedType UnsignedType, arg MemoryArg) UnionOperation { - return UnionOperation{OpKind: OperationKindStore, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} + return UnionOperation{Kind: OperationKindStore, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} } -// NewOperationStore8 is a constructor for UnionOperation with Kind OperationKindStore8. +// NewOperationStore8 is a constructor for UnionOperation with OperationKindStore8. // // # This corresponds to wasm.OpcodeI32Store8Name wasm.OpcodeI64Store8Name // // The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary, // otherwise store the corresponding value following the semantics of the corresponding WebAssembly instruction. func NewOperationStore8(arg MemoryArg) UnionOperation { - return UnionOperation{OpKind: OperationKindStore8, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} + return UnionOperation{Kind: OperationKindStore8, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} } -// NewOperationStore16 is a constructor for UnionOperation with Kind OperationKindStore16. +// NewOperationStore16 is a constructor for UnionOperation with OperationKindStore16. // // # This corresponds to wasm.OpcodeI32Store16Name wasm.OpcodeI64Store16Name // // The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary, // otherwise store the corresponding value following the semantics of the corresponding WebAssembly instruction. func NewOperationStore16(arg MemoryArg) UnionOperation { - return UnionOperation{OpKind: OperationKindStore16, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} + return UnionOperation{Kind: OperationKindStore16, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} } -// NewOperationStore32 is a constructor for UnionOperation with Kind OperationKindStore32. +// NewOperationStore32 is a constructor for UnionOperation with OperationKindStore32. // // # This corresponds to wasm.OpcodeI64Store32Name // // The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary, // otherwise store the corresponding value following the semantics of the corresponding WebAssembly instruction. func NewOperationStore32(arg MemoryArg) UnionOperation { - return UnionOperation{OpKind: OperationKindStore32, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} + return UnionOperation{Kind: OperationKindStore32, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} } -// NewOperationMemorySize is a constructor for UnionOperation with Kind OperationKindMemorySize. +// NewOperationMemorySize is a constructor for UnionOperation with OperationKindMemorySize. // // This corresponds to wasm.OpcodeMemorySize. // // The engines are expected to push the current page size of the memory onto the stack. func NewOperationMemorySize() UnionOperation { - return UnionOperation{OpKind: OperationKindMemorySize} + return UnionOperation{Kind: OperationKindMemorySize} } -// NewOperationMemoryGrow is a constructor for UnionOperation with Kind OperationKindMemoryGrow. +// NewOperationMemoryGrow is a constructor for UnionOperation with OperationKindMemoryGrow. // // This corresponds to wasm.OpcodeMemoryGrow. // @@ -1324,110 +1319,110 @@ func NewOperationMemorySize() UnionOperation { // execute wasm.MemoryInstance Grow with the value, and push the previous // page size of the memory onto the stack. func NewOperationMemoryGrow() UnionOperation { - return UnionOperation{OpKind: OperationKindMemoryGrow} + return UnionOperation{Kind: OperationKindMemoryGrow} } -// NewOperationConstI32 is a constructor for UnionOperation with Kind OperationConstI32. +// NewOperationConstI32 is a constructor for UnionOperation with OperationConstI32. // // This corresponds to wasm.OpcodeI32Const. func NewOperationConstI32(value uint32) UnionOperation { - return UnionOperation{OpKind: OperationKindConstI32, U1: uint64(value)} + return UnionOperation{Kind: OperationKindConstI32, U1: uint64(value)} } -// NewOperationConstI64 is a constructor for UnionOperation with Kind OperationConstI64. +// NewOperationConstI64 is a constructor for UnionOperation with OperationConstI64. // // This corresponds to wasm.OpcodeI64Const. func NewOperationConstI64(value uint64) UnionOperation { - return UnionOperation{OpKind: OperationKindConstI64, U1: value} + return UnionOperation{Kind: OperationKindConstI64, U1: value} } -// NewOperationConstF32 is a constructor for UnionOperation with Kind OperationConstF32. +// NewOperationConstF32 is a constructor for UnionOperation with OperationConstF32. // // This corresponds to wasm.OpcodeF32Const. func NewOperationConstF32(value float32) UnionOperation { - return UnionOperation{OpKind: OperationKindConstF32, U1: uint64(math.Float32bits(value))} + return UnionOperation{Kind: OperationKindConstF32, U1: uint64(math.Float32bits(value))} } -// NewOperationConstF64 is a constructor for UnionOperation with Kind OperationConstF64. +// NewOperationConstF64 is a constructor for UnionOperation with OperationConstF64. // // This corresponds to wasm.OpcodeF64Const. func NewOperationConstF64(value float64) UnionOperation { - return UnionOperation{OpKind: OperationKindConstF64, U1: math.Float64bits(value)} + return UnionOperation{Kind: OperationKindConstF64, U1: math.Float64bits(value)} } -// NewOperationEq is a constructor for UnionOperation with Kind OperationKindEq. +// NewOperationEq is a constructor for UnionOperation with OperationKindEq. // // This corresponds to wasm.OpcodeI32EqName wasm.OpcodeI64EqName wasm.OpcodeF32EqName wasm.OpcodeF64EqName func NewOperationEq(b UnsignedType) UnionOperation { - return UnionOperation{OpKind: OperationKindEq, B1: byte(b)} + return UnionOperation{Kind: OperationKindEq, B1: byte(b)} } -// NewOperationNe is a constructor for UnionOperation with Kind OperationKindNe. +// NewOperationNe is a constructor for UnionOperation with OperationKindNe. // // This corresponds to wasm.OpcodeI32NeName wasm.OpcodeI64NeName wasm.OpcodeF32NeName wasm.OpcodeF64NeName func NewOperationNe(b UnsignedType) UnionOperation { - return UnionOperation{OpKind: OperationKindNe, B1: byte(b)} + return UnionOperation{Kind: OperationKindNe, B1: byte(b)} } -// NewOperationEqz is a constructor for UnionOperation with Kind OperationKindEqz. +// NewOperationEqz is a constructor for UnionOperation with OperationKindEqz. // // This corresponds to wasm.OpcodeI32EqzName wasm.OpcodeI64EqzName func NewOperationEqz(b UnsignedInt) UnionOperation { - return UnionOperation{OpKind: OperationKindEqz, B1: byte(b)} + return UnionOperation{Kind: OperationKindEqz, B1: byte(b)} } -// NewOperationLt is a constructor for UnionOperation with Kind OperationKindLt. +// NewOperationLt is a constructor for UnionOperation with OperationKindLt. // // This corresponds to wasm.OpcodeI32LtS wasm.OpcodeI32LtU wasm.OpcodeI64LtS wasm.OpcodeI64LtU wasm.OpcodeF32Lt wasm.OpcodeF64Lt func NewOperationLt(b SignedType) UnionOperation { - return UnionOperation{OpKind: OperationKindLt, B1: byte(b)} + return UnionOperation{Kind: OperationKindLt, B1: byte(b)} } -// NewOperationGt is a constructor for UnionOperation with Kind OperationKindGt. +// NewOperationGt is a constructor for UnionOperation with OperationKindGt. // // This corresponds to wasm.OpcodeI32GtS wasm.OpcodeI32GtU wasm.OpcodeI64GtS wasm.OpcodeI64GtU wasm.OpcodeF32Gt wasm.OpcodeF64Gt func NewOperationGt(b SignedType) UnionOperation { - return UnionOperation{OpKind: OperationKindGt, B1: byte(b)} + return UnionOperation{Kind: OperationKindGt, B1: byte(b)} } -// NewOperationLe is a constructor for UnionOperation with Kind OperationKindLe. +// NewOperationLe is a constructor for UnionOperation with OperationKindLe. // // This corresponds to wasm.OpcodeI32LeS wasm.OpcodeI32LeU wasm.OpcodeI64LeS wasm.OpcodeI64LeU wasm.OpcodeF32Le wasm.OpcodeF64Le func NewOperationLe(b SignedType) UnionOperation { - return UnionOperation{OpKind: OperationKindLe, B1: byte(b)} + return UnionOperation{Kind: OperationKindLe, B1: byte(b)} } -// NewOperationGe is a constructor for UnionOperation with Kind OperationKindGe. +// NewOperationGe is a constructor for UnionOperation with OperationKindGe. // // This corresponds to wasm.OpcodeI32GeS wasm.OpcodeI32GeU wasm.OpcodeI64GeS wasm.OpcodeI64GeU wasm.OpcodeF32Ge wasm.OpcodeF64Ge // NewOperationGe is the constructor for OperationGe func NewOperationGe(b SignedType) UnionOperation { - return UnionOperation{OpKind: OperationKindGe, B1: byte(b)} + return UnionOperation{Kind: OperationKindGe, B1: byte(b)} } -// NewOperationAdd is a constructor for UnionOperation with Kind OperationKindAdd. +// NewOperationAdd is a constructor for UnionOperation with OperationKindAdd. // // This corresponds to wasm.OpcodeI32AddName wasm.OpcodeI64AddName wasm.OpcodeF32AddName wasm.OpcodeF64AddName. func NewOperationAdd(b UnsignedType) UnionOperation { - return UnionOperation{OpKind: OperationKindAdd, B1: byte(b)} + return UnionOperation{Kind: OperationKindAdd, B1: byte(b)} } -// NewOperationSub is a constructor for UnionOperation with Kind OperationKindSub. +// NewOperationSub is a constructor for UnionOperation with OperationKindSub. // // This corresponds to wasm.OpcodeI32SubName wasm.OpcodeI64SubName wasm.OpcodeF32SubName wasm.OpcodeF64SubName. func NewOperationSub(b UnsignedType) UnionOperation { - return UnionOperation{OpKind: OperationKindSub, B1: byte(b)} + return UnionOperation{Kind: OperationKindSub, B1: byte(b)} } -// NewOperationMul is a constructor for UnionOperation with Kind wperationKindMul. +// NewOperationMul is a constructor for UnionOperation with wperationKindMul. // // This corresponds to wasm.OpcodeI32MulName wasm.OpcodeI64MulName wasm.OpcodeF32MulName wasm.OpcodeF64MulName. // NewOperationMul is the constructor for OperationMul func NewOperationMul(b UnsignedType) UnionOperation { - return UnionOperation{OpKind: OperationKindMul, B1: byte(b)} + return UnionOperation{Kind: OperationKindMul, B1: byte(b)} } -// NewOperationClz is a constructor for UnionOperation with Kind OperationKindClz. +// NewOperationClz is a constructor for UnionOperation with OperationKindClz. // // This corresponds to wasm.OpcodeI32ClzName wasm.OpcodeI64ClzName. // @@ -1436,10 +1431,10 @@ func NewOperationMul(b UnsignedType) UnionOperation { // For example, stack of [..., 0x00_ff_ff_ff] results in [..., 8]. // See wasm.OpcodeI32Clz wasm.OpcodeI64Clz func NewOperationClz(b UnsignedInt) UnionOperation { - return UnionOperation{OpKind: OperationKindClz, B1: byte(b)} + return UnionOperation{Kind: OperationKindClz, B1: byte(b)} } -// NewOperationCtz is a constructor for UnionOperation with Kind OperationKindCtz. +// NewOperationCtz is a constructor for UnionOperation with OperationKindCtz. // // This corresponds to wasm.OpcodeI32CtzName wasm.OpcodeI64CtzName. // @@ -1447,10 +1442,10 @@ func NewOperationClz(b UnsignedInt) UnionOperation { // current top of the stack, and push the count result. // For example, stack of [..., 0xff_ff_ff_00] results in [..., 8]. func NewOperationCtz(b UnsignedInt) UnionOperation { - return UnionOperation{OpKind: OperationKindCtz, B1: byte(b)} + return UnionOperation{Kind: OperationKindCtz, B1: byte(b)} } -// NewOperationPopcnt is a constructor for UnionOperation with Kind OperationKindPopcnt. +// NewOperationPopcnt is a constructor for UnionOperation with OperationKindPopcnt. // // This corresponds to wasm.OpcodeI32PopcntName wasm.OpcodeI64PopcntName. // @@ -1458,19 +1453,19 @@ func NewOperationCtz(b UnsignedInt) UnionOperation { // current top of the stack, and push the count result. // For example, stack of [..., 0b00_00_00_11] results in [..., 2]. func NewOperationPopcnt(b UnsignedInt) UnionOperation { - return UnionOperation{OpKind: OperationKindPopcnt, B1: byte(b)} + return UnionOperation{Kind: OperationKindPopcnt, B1: byte(b)} } -// NewOperationDiv is a constructor for UnionOperation with Kind OperationKindDiv. +// NewOperationDiv is a constructor for UnionOperation with OperationKindDiv. // // This corresponds to wasm.OpcodeI32DivS wasm.OpcodeI32DivU wasm.OpcodeI64DivS // // wasm.OpcodeI64DivU wasm.OpcodeF32Div wasm.OpcodeF64Div. func NewOperationDiv(b SignedType) UnionOperation { - return UnionOperation{OpKind: OperationKindDiv, B1: byte(b)} + return UnionOperation{Kind: OperationKindDiv, B1: byte(b)} } -// NewOperationRem is a constructor for UnionOperation with Kind OperationKindRem. +// NewOperationRem is a constructor for UnionOperation with OperationKindRem. // // This corresponds to wasm.OpcodeI32RemS wasm.OpcodeI32RemU wasm.OpcodeI64RemS wasm.OpcodeI64RemU. // @@ -1480,115 +1475,115 @@ func NewOperationDiv(b SignedType) UnionOperation { // the quotient is discarded. // NewOperationRem is the constructor for OperationRem func NewOperationRem(b SignedInt) UnionOperation { - return UnionOperation{OpKind: OperationKindRem, B1: byte(b)} + return UnionOperation{Kind: OperationKindRem, B1: byte(b)} } -// NewOperationAnd is a constructor for UnionOperation with Kind OperationKindAnd. +// NewOperationAnd is a constructor for UnionOperation with OperationKindAnd. // // # This corresponds to wasm.OpcodeI32AndName wasm.OpcodeI64AndName // // The engines are expected to perform "And" operation on // top two values on the stack, and pushes the result. func NewOperationAnd(b UnsignedInt) UnionOperation { - return UnionOperation{OpKind: OperationKindAnd, B1: byte(b)} + return UnionOperation{Kind: OperationKindAnd, B1: byte(b)} } -// NewOperationOr is a constructor for UnionOperation with Kind OperationKindOr. +// NewOperationOr is a constructor for UnionOperation with OperationKindOr. // // # This corresponds to wasm.OpcodeI32OrName wasm.OpcodeI64OrName // // The engines are expected to perform "Or" operation on // top two values on the stack, and pushes the result. func NewOperationOr(b UnsignedInt) UnionOperation { - return UnionOperation{OpKind: OperationKindOr, B1: byte(b)} + return UnionOperation{Kind: OperationKindOr, B1: byte(b)} } -// NewOperationXor is a constructor for UnionOperation with Kind OperationKindXor. +// NewOperationXor is a constructor for UnionOperation with OperationKindXor. // // # This corresponds to wasm.OpcodeI32XorName wasm.OpcodeI64XorName // // The engines are expected to perform "Xor" operation on // top two values on the stack, and pushes the result. func NewOperationXor(b UnsignedInt) UnionOperation { - return UnionOperation{OpKind: OperationKindXor, B1: byte(b)} + return UnionOperation{Kind: OperationKindXor, B1: byte(b)} } -// NewOperationShl is a constructor for UnionOperation with Kind OperationKindShl. +// NewOperationShl is a constructor for UnionOperation with OperationKindShl. // // # This corresponds to wasm.OpcodeI32ShlName wasm.OpcodeI64ShlName // // The engines are expected to perform "Shl" operation on // top two values on the stack, and pushes the result. func NewOperationShl(b UnsignedInt) UnionOperation { - return UnionOperation{OpKind: OperationKindShl, B1: byte(b)} + return UnionOperation{Kind: OperationKindShl, B1: byte(b)} } -// NewOperationShr is a constructor for UnionOperation with Kind OperationKindShr. +// NewOperationShr is a constructor for UnionOperation with OperationKindShr. // // # This corresponds to wasm.OpcodeI32ShrSName wasm.OpcodeI32ShrUName wasm.OpcodeI64ShrSName wasm.OpcodeI64ShrUName // // If OperationShr.Type is signed integer, then, the engines are expected to perform arithmetic right shift on the two // top values on the stack, otherwise do the logical right shift. func NewOperationShr(b SignedInt) UnionOperation { - return UnionOperation{OpKind: OperationKindShr, B1: byte(b)} + return UnionOperation{Kind: OperationKindShr, B1: byte(b)} } -// NewOperationRotl is a constructor for UnionOperation with Kind OperationKindRotl. +// NewOperationRotl is a constructor for UnionOperation with OperationKindRotl. // // # This corresponds to wasm.OpcodeI32RotlName wasm.OpcodeI64RotlName // // The engines are expected to perform "Rotl" operation on // top two values on the stack, and pushes the result. func NewOperationRotl(b UnsignedInt) UnionOperation { - return UnionOperation{OpKind: OperationKindRotl, B1: byte(b)} + return UnionOperation{Kind: OperationKindRotl, B1: byte(b)} } -// NewOperationRotr is a constructor for UnionOperation with Kind OperationKindRotr. +// NewOperationRotr is a constructor for UnionOperation with OperationKindRotr. // // # This corresponds to wasm.OpcodeI32RotrName wasm.OpcodeI64RotrName // // The engines are expected to perform "Rotr" operation on // top two values on the stack, and pushes the result. func NewOperationRotr(b UnsignedInt) UnionOperation { - return UnionOperation{OpKind: OperationKindRotr, B1: byte(b)} + return UnionOperation{Kind: OperationKindRotr, B1: byte(b)} } -// NewOperationAbs is a constructor for UnionOperation with Kind OperationKindAbs. +// NewOperationAbs is a constructor for UnionOperation with OperationKindAbs. // // This corresponds to wasm.OpcodeF32Abs wasm.OpcodeF64Abs func NewOperationAbs(b Float) UnionOperation { - return UnionOperation{OpKind: OperationKindAbs, B1: byte(b)} + return UnionOperation{Kind: OperationKindAbs, B1: byte(b)} } -// NewOperationNeg is a constructor for UnionOperation with Kind OperationKindNeg. +// NewOperationNeg is a constructor for UnionOperation with OperationKindNeg. // // This corresponds to wasm.OpcodeF32Neg wasm.OpcodeF64Neg func NewOperationNeg(b Float) UnionOperation { - return UnionOperation{OpKind: OperationKindNeg, B1: byte(b)} + return UnionOperation{Kind: OperationKindNeg, B1: byte(b)} } -// NewOperationCeil is a constructor for UnionOperation with Kind OperationKindCeil. +// NewOperationCeil is a constructor for UnionOperation with OperationKindCeil. // // This corresponds to wasm.OpcodeF32CeilName wasm.OpcodeF64CeilName func NewOperationCeil(b Float) UnionOperation { - return UnionOperation{OpKind: OperationKindCeil, B1: byte(b)} + return UnionOperation{Kind: OperationKindCeil, B1: byte(b)} } -// NewOperationFloor is a constructor for UnionOperation with Kind OperationKindFloor. +// NewOperationFloor is a constructor for UnionOperation with OperationKindFloor. // // This corresponds to wasm.OpcodeF32FloorName wasm.OpcodeF64FloorName func NewOperationFloor(b Float) UnionOperation { - return UnionOperation{OpKind: OperationKindFloor, B1: byte(b)} + return UnionOperation{Kind: OperationKindFloor, B1: byte(b)} } -// NewOperationTrunc is a constructor for UnionOperation with Kind OperationKindTrunc. +// NewOperationTrunc is a constructor for UnionOperation with OperationKindTrunc. // // This corresponds to wasm.OpcodeF32TruncName wasm.OpcodeF64TruncName func NewOperationTrunc(b Float) UnionOperation { - return UnionOperation{OpKind: OperationKindTrunc, B1: byte(b)} + return UnionOperation{Kind: OperationKindTrunc, B1: byte(b)} } -// NewOperationNearest is a constructor for UnionOperation with Kind OperationKindNearest. +// NewOperationNearest is a constructor for UnionOperation with OperationKindNearest. // // # This corresponds to wasm.OpcodeF32NearestName wasm.OpcodeF64NearestName // @@ -1596,17 +1591,17 @@ func NewOperationTrunc(b Float) UnionOperation { // the semantics of LLVM's rint intrinsic. See https://llvm.org/docs/LangRef.html#llvm-rint-intrinsic. // For example, math.Round(-4.5) produces -5 while we want to produce -4. func NewOperationNearest(b Float) UnionOperation { - return UnionOperation{OpKind: OperationKindNearest, B1: byte(b)} + return UnionOperation{Kind: OperationKindNearest, B1: byte(b)} } -// NewOperationSqrt is a constructor for UnionOperation with Kind OperationKindSqrt. +// NewOperationSqrt is a constructor for UnionOperation with OperationKindSqrt. // // This corresponds to wasm.OpcodeF32SqrtName wasm.OpcodeF64SqrtName func NewOperationSqrt(b Float) UnionOperation { - return UnionOperation{OpKind: OperationKindSqrt, B1: byte(b)} + return UnionOperation{Kind: OperationKindSqrt, B1: byte(b)} } -// NewOperationMin is a constructor for UnionOperation with Kind OperationKindMin. +// NewOperationMin is a constructor for UnionOperation with OperationKindMin. // // # This corresponds to wasm.OpcodeF32MinName wasm.OpcodeF64MinName // @@ -1616,10 +1611,10 @@ func NewOperationSqrt(b Float) UnionOperation { // Note: WebAssembly specifies that min/max must always return NaN if one of values is NaN, // which is a different behavior different from math.Min. func NewOperationMin(b Float) UnionOperation { - return UnionOperation{OpKind: OperationKindMin, B1: byte(b)} + return UnionOperation{Kind: OperationKindMin, B1: byte(b)} } -// NewOperationMax is a constructor for UnionOperation with Kind OperationKindMax. +// NewOperationMax is a constructor for UnionOperation with OperationKindMax. // // # This corresponds to wasm.OpcodeF32MaxName wasm.OpcodeF64MaxName // @@ -1629,10 +1624,10 @@ func NewOperationMin(b Float) UnionOperation { // Note: WebAssembly specifies that min/max must always return NaN if one of values is NaN, // which is a different behavior different from math.Max. func NewOperationMax(b Float) UnionOperation { - return UnionOperation{OpKind: OperationKindMax, B1: byte(b)} + return UnionOperation{Kind: OperationKindMax, B1: byte(b)} } -// NewOperationCopysign is a constructor for UnionOperation with Kind OperationKindCopysign. +// NewOperationCopysign is a constructor for UnionOperation with OperationKindCopysign. // // # This corresponds to wasm.OpcodeF32CopysignName wasm.OpcodeF64CopysignName // @@ -1640,20 +1635,20 @@ func NewOperationMax(b Float) UnionOperation { // the first-popped value to the last one. // For example, stack [..., 1.213, -5.0] results in [..., -1.213]. func NewOperationCopysign(b Float) UnionOperation { - return UnionOperation{OpKind: OperationKindCopysign, B1: byte(b)} + return UnionOperation{Kind: OperationKindCopysign, B1: byte(b)} } -// NewOperationI32WrapFromI64 is a constructor for UnionOperation with Kind OperationKindI32WrapFromI64. +// NewOperationI32WrapFromI64 is a constructor for UnionOperation with OperationKindI32WrapFromI64. // // This corresponds to wasm.OpcodeI32WrapI64 and equivalent to uint64(uint32(v)) in Go. // // The engines are expected to replace the 64-bit int on top of the stack // with the corresponding 32-bit integer. func NewOperationI32WrapFromI64() UnionOperation { - return UnionOperation{OpKind: OperationKindI32WrapFromI64} + return UnionOperation{Kind: OperationKindI32WrapFromI64} } -// OperationITruncFromF implements Operation. +// NewOperationITruncFromF is a constructor for UnionOperation with OperationKindITruncFromF. // // This corresponds to // @@ -1663,32 +1658,26 @@ func NewOperationI32WrapFromI64() UnionOperation { // wasm.OpcodeI32TruncSatF64SName wasm.OpcodeI32TruncSatF64UName wasm.OpcodeI64TruncSatF32SName // wasm.OpcodeI64TruncSatF32UName wasm.OpcodeI64TruncSatF64SName wasm.OpcodeI64TruncSatF64UName // -// See [1] and [2] for when we encounter undefined behavior in the WebAssembly specification if OperationITruncFromF.NonTrapping == false. +// See [1] and [2] for when we encounter undefined behavior in the WebAssembly specification if NewOperationITruncFromF.NonTrapping == false. // To summarize, if the source float value is NaN or doesn't fit in the destination range of integers (incl. +=Inf), // then the runtime behavior is undefined. In wazero, the engines are expected to exit the execution in these undefined cases with // wasmruntime.ErrRuntimeInvalidConversionToInteger error. // // [1] https://www.w3.org/TR/2019/REC-wasm-core-1-20191205/#-hrefop-trunc-umathrmtruncmathsfu_m-n-z for unsigned integers. // [2] https://www.w3.org/TR/2019/REC-wasm-core-1-20191205/#-hrefop-trunc-smathrmtruncmathsfs_m-n-z for signed integers. -type OperationITruncFromF struct { - InputType Float - OutputType SignedInt - // NonTrapping true if this conversion is "nontrapping" in the sense of the - // https://github.com/WebAssembly/spec/blob/ce4b6c4d47eb06098cc7ab2e81f24748da822f20/proposals/nontrapping-float-to-int-conversion/Overview.md - NonTrapping bool +// +// nonTrapping true if this conversion is "nontrapping" in the sense of the +// https://github.com/WebAssembly/spec/blob/ce4b6c4d47eb06098cc7ab2e81f24748da822f20/proposals/nontrapping-float-to-int-conversion/Overview.md +func NewOperationITruncFromF(inputType Float, outputType SignedInt, nonTrapping bool) UnionOperation { + return UnionOperation{ + Kind: OperationKindITruncFromF, + B1: byte(inputType), + B2: byte(outputType), + B3: nonTrapping, + } } -// String implements fmt.Stringer. -func (o OperationITruncFromF) String() string { - return fmt.Sprintf("%s.%s.%s (non_trapping=%v)", o.OutputType, o.Kind(), o.InputType, o.NonTrapping) -} - -// Kind implements Operation.Kind. -func (OperationITruncFromF) Kind() OperationKind { - return OperationKindITruncFromF -} - -// OperationFConvertFromI implements Operation. +// NewOperationFConvertFromI is a constructor for UnionOperation with OperationKindFConvertFromI. // // This corresponds to // @@ -1696,64 +1685,57 @@ func (OperationITruncFromF) Kind() OperationKind { // wasm.OpcodeF64ConvertI32SName wasm.OpcodeF64ConvertI32UName wasm.OpcodeF64ConvertI64SName wasm.OpcodeF64ConvertI64UName // // and equivalent to float32(uint32(x)), float32(int32(x)), etc in Go. -type OperationFConvertFromI struct { - InputType SignedInt - OutputType Float +func NewOperationFConvertFromI(inputType SignedInt, outputType Float) UnionOperation { + return UnionOperation{ + Kind: OperationKindFConvertFromI, + B1: byte(inputType), + B2: byte(outputType), + } } -// String implements fmt.Stringer. -func (o OperationFConvertFromI) String() string { - return fmt.Sprintf("%s.%s.%s", o.OutputType, o.Kind(), o.InputType) -} - -// Kind implements Operation.Kind. -func (OperationFConvertFromI) Kind() OperationKind { - return OperationKindFConvertFromI -} - -// NewOperationF32DemoteFromF64 is a constructor for UnionOperation with Kind OperationKindF32DemoteFromF64. +// NewOperationF32DemoteFromF64 is a constructor for UnionOperation with OperationKindF32DemoteFromF64. // // This corresponds to wasm.OpcodeF32DemoteF64 and is equivalent float32(float64(v)). func NewOperationF32DemoteFromF64() UnionOperation { - return UnionOperation{OpKind: OperationKindF32DemoteFromF64} + return UnionOperation{Kind: OperationKindF32DemoteFromF64} } -// NewOperationF64PromoteFromF32 is a constructor for UnionOperation with Kind OperationKindF64PromoteFromF32. +// NewOperationF64PromoteFromF32 is a constructor for UnionOperation with OperationKindF64PromoteFromF32. // // This corresponds to wasm.OpcodeF64PromoteF32 and is equivalent float64(float32(v)). func NewOperationF64PromoteFromF32() UnionOperation { - return UnionOperation{OpKind: OperationKindF64PromoteFromF32} + return UnionOperation{Kind: OperationKindF64PromoteFromF32} } -// NewOperationI32ReinterpretFromF32 is a constructor for UnionOperation with Kind OperationKindI32ReinterpretFromF32. +// NewOperationI32ReinterpretFromF32 is a constructor for UnionOperation with OperationKindI32ReinterpretFromF32. // // This corresponds to wasm.OpcodeI32ReinterpretF32Name. func NewOperationI32ReinterpretFromF32() UnionOperation { - return UnionOperation{OpKind: OperationKindI32ReinterpretFromF32} + return UnionOperation{Kind: OperationKindI32ReinterpretFromF32} } -// NewOperationI64ReinterpretFromF64 is a constructor for UnionOperation with Kind OperationKindI64ReinterpretFromF64. +// NewOperationI64ReinterpretFromF64 is a constructor for UnionOperation with OperationKindI64ReinterpretFromF64. // // This corresponds to wasm.OpcodeI64ReinterpretF64Name. func NewOperationI64ReinterpretFromF64() UnionOperation { - return UnionOperation{OpKind: OperationKindI64ReinterpretFromF64} + return UnionOperation{Kind: OperationKindI64ReinterpretFromF64} } -// NewOperationF32ReinterpretFromI32 is a constructor for UnionOperation with Kind OperationKindF32ReinterpretFromI32. +// NewOperationF32ReinterpretFromI32 is a constructor for UnionOperation with OperationKindF32ReinterpretFromI32. // // This corresponds to wasm.OpcodeF32ReinterpretI32Name. func NewOperationF32ReinterpretFromI32() UnionOperation { - return UnionOperation{OpKind: OperationKindF32ReinterpretFromI32} + return UnionOperation{Kind: OperationKindF32ReinterpretFromI32} } -// NewOperationF64ReinterpretFromI64 is a constructor for UnionOperation with Kind OperationKindF64ReinterpretFromI64. +// NewOperationF64ReinterpretFromI64 is a constructor for UnionOperation with OperationKindF64ReinterpretFromI64. // // This corresponds to wasm.OpcodeF64ReinterpretI64Name. func NewOperationF64ReinterpretFromI64() UnionOperation { - return UnionOperation{OpKind: OperationKindF64ReinterpretFromI64} + return UnionOperation{Kind: OperationKindF64ReinterpretFromI64} } -// OperationExtend implements Operation. +// NewOperationExtend is a constructor for UnionOperation with OperationKindExtend. // // # This corresponds to wasm.OpcodeI64ExtendI32SName wasm.OpcodeI64ExtendI32UName // @@ -1761,275 +1743,166 @@ func NewOperationF64ReinterpretFromI64() UnionOperation { // as a 64-bit integer of corresponding signedness. For unsigned case, this is just reinterpreting the // underlying bit pattern as 64-bit integer. For signed case, this is sign-extension which preserves the // original integer's sign. -type OperationExtend struct{ Signed bool } - -// String implements fmt.Stringer. -func (o OperationExtend) String() string { - var in, out string - if o.Signed { - in = "i32" - out = "i64" - } else { - in = "u32" - out = "u64" +func NewOperationExtend(signed bool) UnionOperation { + op := UnionOperation{Kind: OperationKindExtend} + if signed { + op.B1 = 1 } - return fmt.Sprintf("%s.%s.%s", out, o.Kind(), in) + return op } -// Kind implements Operation.Kind. -func (OperationExtend) Kind() OperationKind { - return OperationKindExtend -} - -// NewOperationSignExtend32From8 is a constructor for UnionOperation with Kind OperationKindSignExtend32From8. +// NewOperationSignExtend32From8 is a constructor for UnionOperation with OperationKindSignExtend32From8. // // This corresponds to wasm.OpcodeI32Extend8SName. // // The engines are expected to sign-extend the first 8-bits of 32-bit in as signed 32-bit int. func NewOperationSignExtend32From8() UnionOperation { - return UnionOperation{OpKind: OperationKindSignExtend32From8} + return UnionOperation{Kind: OperationKindSignExtend32From8} } -// NewOperationSignExtend32From16 is a constructor for UnionOperation with Kind OperationKindSignExtend32From16. +// NewOperationSignExtend32From16 is a constructor for UnionOperation with OperationKindSignExtend32From16. // // This corresponds to wasm.OpcodeI32Extend16SName. // // The engines are expected to sign-extend the first 16-bits of 32-bit in as signed 32-bit int. func NewOperationSignExtend32From16() UnionOperation { - return UnionOperation{OpKind: OperationKindSignExtend32From16} + return UnionOperation{Kind: OperationKindSignExtend32From16} } -// NewOperationSignExtend64From8 is a constructor for UnionOperation with Kind OperationKindSignExtend64From8. +// NewOperationSignExtend64From8 is a constructor for UnionOperation with OperationKindSignExtend64From8. // // This corresponds to wasm.OpcodeI64Extend8SName. // // The engines are expected to sign-extend the first 8-bits of 64-bit in as signed 32-bit int. func NewOperationSignExtend64From8() UnionOperation { - return UnionOperation{OpKind: OperationKindSignExtend64From8} + return UnionOperation{Kind: OperationKindSignExtend64From8} } -// NewOperationSignExtend64From16 is a constructor for UnionOperation with Kind OperationKindSignExtend64From16. +// NewOperationSignExtend64From16 is a constructor for UnionOperation with OperationKindSignExtend64From16. // // This corresponds to wasm.OpcodeI64Extend16SName. // // The engines are expected to sign-extend the first 16-bits of 64-bit in as signed 32-bit int. func NewOperationSignExtend64From16() UnionOperation { - return UnionOperation{OpKind: OperationKindSignExtend64From16} + return UnionOperation{Kind: OperationKindSignExtend64From16} } -// NewOperationSignExtend64From32 is a constructor for UnionOperation with Kind OperationKindSignExtend64From32. +// NewOperationSignExtend64From32 is a constructor for UnionOperation with OperationKindSignExtend64From32. // // This corresponds to wasm.OpcodeI64Extend32SName. // // The engines are expected to sign-extend the first 32-bits of 64-bit in as signed 32-bit int. func NewOperationSignExtend64From32() UnionOperation { - return UnionOperation{OpKind: OperationKindSignExtend64From32} + return UnionOperation{Kind: OperationKindSignExtend64From32} } -// OperationMemoryInit implements Operation. +// NewOperationMemoryInit is a constructor for UnionOperation with OperationKindMemoryInit. // // This corresponds to wasm.OpcodeMemoryInitName. -type OperationMemoryInit struct { - // DataIndex is the index of the data instance in ModuleInstance.DataInstances - // by which this operation instantiates a part of the memory. - DataIndex uint32 +// +// dataIndex is the index of the data instance in ModuleInstance.DataInstances +// by which this operation instantiates a part of the memory. +func NewOperationMemoryInit(dataIndex uint32) UnionOperation { + return UnionOperation{Kind: OperationKindMemoryInit, U1: uint64(dataIndex)} } -// String implements fmt.Stringer. -func (o OperationMemoryInit) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationMemoryInit) Kind() OperationKind { - return OperationKindMemoryInit -} - -// OperationDataDrop implements Operation. +// NewOperationDataDrop implements Operation. // // This corresponds to wasm.OpcodeDataDropName. -type OperationDataDrop struct { - // DataIndex is the index of the data instance in ModuleInstance.DataInstances - // which this operation drops. - DataIndex uint32 +// +// dataIndex is the index of the data instance in ModuleInstance.DataInstances +// which this operation drops. +func NewOperationDataDrop(dataIndex uint32) UnionOperation { + return UnionOperation{Kind: OperationKindDataDrop, U1: uint64(dataIndex)} } -// String implements fmt.Stringer. -func (o OperationDataDrop) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationDataDrop) Kind() OperationKind { - return OperationKindDataDrop -} - -// NewOperationMemoryCopy is a consuctor for UnionOperation with Kind OperationKindMemoryCopy. +// NewOperationMemoryCopy is a consuctor for UnionOperation with OperationKindMemoryCopy. // // This corresponds to wasm.OpcodeMemoryCopyName. func NewOperationMemoryCopy() UnionOperation { - return UnionOperation{OpKind: OperationKindMemoryCopy} + return UnionOperation{Kind: OperationKindMemoryCopy} } -// NewOperationMemoryFill is a consuctor for UnionOperation with Kind OperationKindMemoryFill. +// NewOperationMemoryFill is a consuctor for UnionOperation with OperationKindMemoryFill. func NewOperationMemoryFill() UnionOperation { - return UnionOperation{OpKind: OperationKindMemoryFill} + return UnionOperation{Kind: OperationKindMemoryFill} } -// OperationTableInit implements Operation. +// NewOperationTableInit is a constructor for UnionOperation with OperationKindTableInit. // // This corresponds to wasm.OpcodeTableInitName. -type OperationTableInit struct { - // ElemIndex is the index of the element by which this operation initializes a part of the table. - ElemIndex uint32 - // TableIndex is the index of the table on which this operation initialize by the target element. - TableIndex uint32 +// +// elemIndex is the index of the element by which this operation initializes a part of the table. +// tableIndex is the index of the table on which this operation initialize by the target element. +func NewOperationTableInit(elemIndex, tableIndex uint32) UnionOperation { + return UnionOperation{Kind: OperationKindTableInit, U1: uint64(elemIndex), U2: uint64(tableIndex)} } -// String implements fmt.Stringer. -func (o OperationTableInit) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationTableInit) Kind() OperationKind { - return OperationKindTableInit -} - -// OperationElemDrop implements Operation. +// NewOperationElemDrop is a constructor for UnionOperation with OperationKindElemDrop. // // This corresponds to wasm.OpcodeElemDropName. -type OperationElemDrop struct { - // ElemIndex is the index of the element which this operation drops. - ElemIndex uint32 +// +// elemIndex is the index of the element which this operation drops. +func NewOperationElemDrop(elemIndex uint32) UnionOperation { + return UnionOperation{Kind: OperationKindElemDrop, U1: uint64(elemIndex)} } -// String implements fmt.Stringer. -func (o OperationElemDrop) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationElemDrop) Kind() OperationKind { - return OperationKindElemDrop -} - -// OperationTableCopy implements Operation. +// NewOperationTableCopy implements Operation. // // This corresponds to wasm.OpcodeTableCopyName. -type OperationTableCopy struct { - SrcTableIndex, DstTableIndex uint32 +func NewOperationTableCopy(srcTableIndex, dstTableIndex uint32) UnionOperation { + return UnionOperation{Kind: OperationKindTableCopy, U1: uint64(srcTableIndex), U2: uint64(dstTableIndex)} } -// String implements fmt.Stringer. -func (o OperationTableCopy) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationTableCopy) Kind() OperationKind { - return OperationKindTableCopy -} - -// OperationRefFunc implements Operation. +// NewOperationRefFunc constructor for UnionOperation with OperationKindRefFunc. // // This corresponds to wasm.OpcodeRefFuncName, and engines are expected to // push the opaque pointer value of engine specific func for the given FunctionIndex. // // Note: in wazero, we express any reference types (funcref or externref) as opaque pointers which is uint64. // Therefore, the engine implementations emit instructions to push the address of *function onto the stack. -type OperationRefFunc struct { - FunctionIndex uint32 +func NewOperationRefFunc(functionIndex uint32) UnionOperation { + return UnionOperation{Kind: OperationKindRefFunc, U1: uint64(functionIndex)} } -// String implements fmt.Stringer. -func (o OperationRefFunc) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationRefFunc) Kind() OperationKind { - return OperationKindRefFunc -} - -// OperationTableGet implements Operation. +// NewOperationTableGet constructor for UnionOperation with OperationKindTableGet. // // This corresponds to wasm.OpcodeTableGetName. -type OperationTableGet struct { - TableIndex uint32 +func NewOperationTableGet(tableIndex uint32) UnionOperation { + return UnionOperation{Kind: OperationKindTableGet, U1: uint64(tableIndex)} } -// String implements fmt.Stringer. -func (o OperationTableGet) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationTableGet) Kind() OperationKind { - return OperationKindTableGet -} - -// OperationTableSet implements Operation. +// NewOperationTableSet constructor for UnionOperation with OperationKindTableSet. // // This corresponds to wasm.OpcodeTableSetName. -type OperationTableSet struct { - TableIndex uint32 +func NewOperationTableSet(tableIndex uint32) UnionOperation { + return UnionOperation{Kind: OperationKindTableSet, U1: uint64(tableIndex)} } -// String implements fmt.Stringer. -func (o OperationTableSet) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationTableSet) Kind() OperationKind { - return OperationKindTableSet -} - -// OperationTableSize implements Operation. +// NewOperationTableSize constructor for UnionOperation with OperationKindTableSize. // // This corresponds to wasm.OpcodeTableSizeName. -type OperationTableSize struct { - TableIndex uint32 +func NewOperationTableSize(tableIndex uint32) UnionOperation { + return UnionOperation{Kind: OperationKindTableSize, U1: uint64(tableIndex)} } -// String implements fmt.Stringer. -func (o OperationTableSize) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationTableSize) Kind() OperationKind { - return OperationKindTableSize -} - -// OperationTableGrow implements Operation. +// NewOperationTableGrow constructor for UnionOperation with OperationKindTableGrow. // // This corresponds to wasm.OpcodeTableGrowName. -type OperationTableGrow struct { - TableIndex uint32 +func NewOperationTableGrow(tableIndex uint32) UnionOperation { + return UnionOperation{Kind: OperationKindTableGrow, U1: uint64(tableIndex)} } -// String implements fmt.Stringer. -func (o OperationTableGrow) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationTableGrow) Kind() OperationKind { - return OperationKindTableGrow -} - -// OperationTableFill implements Operation. +// NewOperationTableFill constructor for UnionOperation with OperationKindTableFill. // // This corresponds to wasm.OpcodeTableFillName. -type OperationTableFill struct { - TableIndex uint32 +func NewOperationTableFill(tableIndex uint32) UnionOperation { + return UnionOperation{Kind: OperationKindTableFill, U1: uint64(tableIndex)} } -// String implements fmt.Stringer. -func (o OperationTableFill) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationTableFill) Kind() OperationKind { - return OperationKindTableFill -} - -// OperationV128Const implements Operation. -type OperationV128Const struct { - Lo, Hi uint64 -} - -// String implements fmt.Stringer. -func (o OperationV128Const) String() string { - return fmt.Sprintf("%s [%#x, %#x]", o.Kind(), o.Lo, o.Hi) -} - -// Kind implements Operation.Kind. -// -// This corresponds to wasm.OpcodeVecV128Const. -func (OperationV128Const) Kind() OperationKind { - return OperationKindV128Const +// NewOperationV128Const constructor for UnionOperation with OperationKindV128Const +func NewOperationV128Const(lo, hi uint64) UnionOperation { + return UnionOperation{Kind: OperationKindV128Const, U1: lo, U2: hi} } // Shape corresponds to a shape of v128 values. @@ -2063,40 +1936,22 @@ func shapeName(s Shape) (ret string) { return } -// OperationV128Add implements Operation. +// NewOperationV128Add constructor for UnionOperation with OperationKindV128Add. // // This corresponds to wasm.OpcodeVecI8x16AddName wasm.OpcodeVecI16x8AddName wasm.OpcodeVecI32x4AddName // // wasm.OpcodeVecI64x2AddName wasm.OpcodeVecF32x4AddName wasm.OpcodeVecF64x2AddName -type OperationV128Add struct { - Shape Shape +func NewOperationV128Add(shape Shape) UnionOperation { + return UnionOperation{Kind: OperationKindV128Add, B1: shape} } -// String implements fmt.Stringer. -func (o OperationV128Add) String() string { - return fmt.Sprintf("%s (shape=%s)", o.Kind(), shapeName(o.Shape)) -} - -// Kind implements Operation.Kind. -func (OperationV128Add) Kind() OperationKind { - return OperationKindV128Add -} - -// OperationV128Sub implements Operation. +// NewOperationV128Sub constructor for UnionOperation with OperationKindV128Sub. // // This corresponds to wasm.OpcodeVecI8x16SubName wasm.OpcodeVecI16x8SubName wasm.OpcodeVecI32x4SubName // // wasm.OpcodeVecI64x2SubName wasm.OpcodeVecF32x4SubName wasm.OpcodeVecF64x2SubName -type OperationV128Sub struct { - Shape Shape -} - -// String implements fmt.Stringer. -func (o OperationV128Sub) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationV128Sub) Kind() OperationKind { - return OperationKindV128Sub +func NewOperationV128Sub(shape Shape) UnionOperation { + return UnionOperation{Kind: OperationKindV128Sub, B1: shape} } // V128LoadType represents a type of wasm.OpcodeVecV128Load* instructions. @@ -2131,7 +1986,7 @@ const ( V128LoadType64zero ) -// OperationV128Load implements Operation. +// NewOperationV128Load is a constructor for UnionOperation with OperationKindV128Load. // // This corresponds to // @@ -2140,79 +1995,54 @@ const ( // wasm.OpcodeVecV128Load32x2UName wasm.OpcodeVecV128Load8SplatName wasm.OpcodeVecV128Load16SplatName // wasm.OpcodeVecV128Load32SplatName wasm.OpcodeVecV128Load64SplatName wasm.OpcodeVecV128Load32zeroName // wasm.OpcodeVecV128Load64zeroName -type OperationV128Load struct { - Type V128LoadType - Arg MemoryArg +func NewOperationV128Load(loadType V128LoadType, arg MemoryArg) UnionOperation { + return UnionOperation{Kind: OperationKindV128Load, B1: loadType, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} } -// String implements fmt.Stringer. -func (o OperationV128Load) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationV128Load) Kind() OperationKind { - return OperationKindV128Load -} - -// OperationV128LoadLane implements Operation. +// NewOperationV128LoadLane is a constructor for UnionOperation with OperationKindV128LoadLane. // // This corresponds to wasm.OpcodeVecV128Load8LaneName wasm.OpcodeVecV128Load16LaneName // // wasm.OpcodeVecV128Load32LaneName wasm.OpcodeVecV128Load64LaneName. -type OperationV128LoadLane struct { - // LaneIndex is >=0 && <(128/LaneSize). - LaneIndex byte - // LaneSize is either 8, 16, 32, or 64. - LaneSize byte - Arg MemoryArg +// +// laneIndex is >=0 && <(128/LaneSize). +// laneSize is either 8, 16, 32, or 64. +func NewOperationV128LoadLane(laneIndex, laneSize byte, arg MemoryArg) UnionOperation { + return UnionOperation{Kind: OperationKindV128LoadLane, B1: laneSize, B2: laneIndex, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)} } -// String implements fmt.Stringer. -func (o OperationV128LoadLane) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationV128LoadLane) Kind() OperationKind { - return OperationKindV128LoadLane -} - -// OperationV128Store implements Operation. +// NewOperationV128Store is a constructor for UnionOperation with OperationKindV128Store. // // This corresponds to wasm.OpcodeVecV128Load8LaneName wasm.OpcodeVecV128Load16LaneName // // wasm.OpcodeVecV128Load32LaneName wasm.OpcodeVecV128Load64LaneName. -type OperationV128Store struct { - Arg MemoryArg +func NewOperationV128Store(arg MemoryArg) UnionOperation { + return UnionOperation{ + Kind: OperationKindV128Store, + U1: uint64(arg.Alignment), + U2: uint64(arg.Offset), + } } -// String implements fmt.Stringer. -func (o OperationV128Store) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationV128Store) Kind() OperationKind { - return OperationKindV128Store -} - -// OperationV128StoreLane implements Operation. +// NewOperationV128StoreLane implements Operation. // // This corresponds to wasm.OpcodeVecV128Load8LaneName wasm.OpcodeVecV128Load16LaneName // // wasm.OpcodeVecV128Load32LaneName wasm.OpcodeVecV128Load64LaneName. -type OperationV128StoreLane struct { - // LaneIndex is >=0 && <(128/LaneSize). - LaneIndex byte - // LaneSize is either 8, 16, 32, or 64. - LaneSize byte - Arg MemoryArg +// +// laneIndex is >=0 && <(128/LaneSize). +// laneSize is either 8, 16, 32, or 64. +func NewOperationV128StoreLane(laneIndex byte, laneSize byte, arg MemoryArg) UnionOperation { + return UnionOperation{ + Kind: OperationKindV128StoreLane, + B1: laneSize, + B2: laneIndex, + U1: uint64(arg.Alignment), + U2: uint64(arg.Offset), + } } -// String implements fmt.Stringer. -func (o OperationV128StoreLane) String() string { return o.Kind().String() } - -// Kind implements Operation.Kind. -func (OperationV128StoreLane) Kind() OperationKind { - return OperationKindV128StoreLane -} - -// OperationV128ExtractLane implements Operation. +// NewOperationV128ExtractLane is a constructor for UnionOperation with OperationKindV128ExtractLane. // // This corresponds to // @@ -2220,256 +2050,145 @@ func (OperationV128StoreLane) Kind() OperationKind { // wasm.OpcodeVecI16x8ExtractLaneSName wasm.OpcodeVecI16x8ExtractLaneUName // wasm.OpcodeVecI32x4ExtractLaneName wasm.OpcodeVecI64x2ExtractLaneName // wasm.OpcodeVecF32x4ExtractLaneName wasm.OpcodeVecF64x2ExtractLaneName. -type OperationV128ExtractLane struct { - // LaneIndex is >=0 && =0 && =0 && =0 &&