This adjusts towards the exiting code which used int32/64 instead of uint32/64. The reason is that the spec indicates intepretation as signed numbers, which affects the maximum value. See https://www.w3.org/TR/wasm-core-1/#value-types%E2%91%A2 Signed-off-by: Adrian Cole <adrian@tetrate.io>
32 lines
781 B
Go
32 lines
781 B
Go
package ieee754
|
|
|
|
import (
|
|
"encoding/binary"
|
|
"io"
|
|
"math"
|
|
)
|
|
|
|
// DecodeFloat32 decodes a float32 in IEEE 754 binary representation.
|
|
// See https://www.w3.org/TR/wasm-core-1/#floating-point%E2%91%A2
|
|
func DecodeFloat32(r io.Reader) (float32, error) {
|
|
buf := make([]byte, 4)
|
|
_, err := io.ReadFull(r, buf)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
raw := binary.LittleEndian.Uint32(buf)
|
|
return math.Float32frombits(raw), nil
|
|
}
|
|
|
|
// DecodeFloat64 decodes a float64 in IEEE 754 binary representation.
|
|
// See https://www.w3.org/TR/wasm-core-1/#floating-point%E2%91%A2
|
|
func DecodeFloat64(r io.Reader) (float64, error) {
|
|
buf := make([]byte, 8)
|
|
_, err := io.ReadFull(r, buf)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
raw := binary.LittleEndian.Uint64(buf)
|
|
return math.Float64frombits(raw), nil
|
|
}
|