Fixes global numeric types to have max of signed encoding (#442)
This adjusts towards the exiting code which used int32/64 instead of uint32/64. The reason is that the spec indicates intepretation as signed numbers, which affects the maximum value. See https://www.w3.org/TR/wasm-core-1/#value-types%E2%91%A2 Signed-off-by: Adrian Cole <adrian@tetrate.io>
This commit is contained in:
@@ -31,9 +31,13 @@ import (
|
||||
type ValueType = byte
|
||||
|
||||
const (
|
||||
// ValueTypeI32 is a 32-bit integer.
|
||||
ValueTypeI32 ValueType = 0x7f
|
||||
// ValueTypeI64 is a 64-bit integer.
|
||||
ValueTypeI64 ValueType = 0x7e
|
||||
// ValueTypeF32 is a 32-bit floating point number.
|
||||
ValueTypeF32 ValueType = 0x7d
|
||||
// ValueTypeF64 is a 32-bit floating point number.
|
||||
ValueTypeF64 ValueType = 0x7c
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user