Decoder: check max uint on 32 bit platforms (#778)

Fixes #777
This commit is contained in:
Thomas Pelletier
2022-05-10 15:43:26 +02:00
committed by GitHub
parent 627dade0c7
commit 0a422e3dbd
+21 -6
View File
@@ -866,12 +866,27 @@ func (d *decoder) unmarshalFloat(value *ast.Node, v reflect.Value) error {
return nil
}
func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error {
const (
maxInt = int64(^uint(0) >> 1)
minInt = -maxInt - 1
)
const (
maxInt = int64(^uint(0) >> 1)
minInt = -maxInt - 1
)
// Maximum value of uint for decoding. Currently the decoder parses the integer
// into an int64. As a result, on architectures where uint is 64 bits, the
// effective maximum uint we can decode is the maximum of int64. On
// architectures where uint is 32 bits, the maximum value we can decode is
// lower: the maximum of uint32. I didn't find a way to figure out this value at
// compile time, so it is computed during initialization.
var maxUint int64 = math.MaxInt64
func init() {
m := uint64(^uint(0))
if m < uint64(maxUint) {
maxUint = int64(m)
}
}
func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error {
i, err := parseInteger(value.Data)
if err != nil {
return err
@@ -932,7 +947,7 @@ func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error {
r = reflect.ValueOf(uint8(i))
case reflect.Uint:
if i < 0 {
if i < 0 || i > maxUint {
return fmt.Errorf("toml: negative number %d does not fit in an uint", i)
}