Decode: code cleanup for struct cache (#659)

This commit is contained in:
Thomas Pelletier
2021-11-07 18:35:30 -05:00
committed by GitHub
parent 11f789ef11
commit dc1740d473
4 changed files with 105 additions and 73 deletions
+23
View File
@@ -0,0 +1,23 @@
package danger
import (
"reflect"
"unsafe"
)
// typeID is used as key in encoder and decoder caches to enable using
// the optimize runtime.mapaccess2_fast64 function instead of the more
// expensive lookup if we were to use reflect.Type as map key.
//
// typeID holds the pointer to the reflect.Type value, which is unique
// in the program.
//
// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61
type TypeID unsafe.Pointer
func MakeTypeID(t reflect.Type) TypeID {
// reflect.Type has the fields:
// typ unsafe.Pointer
// ptr unsafe.Pointer
return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1])
}
+1
View File
@@ -11,3 +11,4 @@ var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{})
var sliceInterfaceType = reflect.TypeOf([]interface{}{}) var sliceInterfaceType = reflect.TypeOf([]interface{}{})
var stringType = reflect.TypeOf("")
+68 -73
View File
@@ -9,10 +9,11 @@ import (
"math" "math"
"reflect" "reflect"
"strings" "strings"
"sync" "sync/atomic"
"time" "time"
"github.com/pelletier/go-toml/v2/internal/ast" "github.com/pelletier/go-toml/v2/internal/ast"
"github.com/pelletier/go-toml/v2/internal/danger"
"github.com/pelletier/go-toml/v2/internal/tracker" "github.com/pelletier/go-toml/v2/internal/tracker"
) )
@@ -384,12 +385,14 @@ func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handle
elem = v.Elem() elem = v.Elem()
return d.handleKeyPart(key, elem, nextFn, makeFn) return d.handleKeyPart(key, elem, nextFn, makeFn)
case reflect.Map: case reflect.Map:
// Create the key for the map element. For now assume it's a string. // Create the key for the map element. For now assume it's a string.
mk := reflect.ValueOf(string(key.Node().Data)) mk := reflect.ValueOf(string(key.Node().Data))
// If the map does not exist, create it. // If the map does not exist, create it.
if v.IsNil() { if v.IsNil() {
v = reflect.MakeMap(v.Type()) vt := v.Type()
v = reflect.MakeMap(vt)
rv = v rv = v
} }
@@ -401,7 +404,8 @@ func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handle
// map[string]interface{} or a []interface{} depending on whether // map[string]interface{} or a []interface{} depending on whether
// this is the last part of the array table key. // this is the last part of the array table key.
t := v.Type().Elem() vt := v.Type()
t := vt.Elem()
if t.Kind() == reflect.Interface { if t.Kind() == reflect.Interface {
mv = makeFn() mv = makeFn()
} else { } else {
@@ -415,7 +419,8 @@ func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handle
} }
set = true set = true
} else if !mv.CanAddr() { } else if !mv.CanAddr() {
t := v.Type().Elem() vt := v.Type()
t := vt.Elem()
oldmv := mv oldmv := mv
mv = reflect.New(t).Elem() mv = reflect.New(t).Elem()
mv.Set(oldmv) mv.Set(oldmv)
@@ -453,7 +458,7 @@ func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handle
if v.Elem().IsValid() { if v.Elem().IsValid() {
v = v.Elem() v = v.Elem()
} else { } else {
v = reflect.MakeMap(mapStringInterfaceType) v = makeMapStringInterface()
} }
x, err := d.handleKeyPart(key, v, nextFn, makeFn) x, err := d.handleKeyPart(key, v, nextFn, makeFn)
@@ -697,7 +702,7 @@ func (d *decoder) unmarshalInlineTable(itable *ast.Node, v reflect.Value) error
case reflect.Interface: case reflect.Interface:
elem := v.Elem() elem := v.Elem()
if !elem.IsValid() { if !elem.IsValid() {
elem = reflect.MakeMap(mapStringInterfaceType) elem = makeMapStringInterface()
v.Set(elem) v.Set(elem)
} }
return d.unmarshalInlineTable(itable, elem) return d.unmarshalInlineTable(itable, elem)
@@ -953,12 +958,15 @@ func (d *decoder) handleKeyValuePart(key ast.Iterator, value *ast.Node, v reflec
// There is no guarantee over what it could be. // There is no guarantee over what it could be.
switch v.Kind() { switch v.Kind() {
case reflect.Map: case reflect.Map:
mk := reflect.ValueOf(string(key.Node().Data)) vt := v.Type()
keyType := v.Type().Key() mk := reflect.ValueOf(string(key.Node().Data))
if !mk.Type().AssignableTo(keyType) { mkt := stringType
if !mk.Type().ConvertibleTo(keyType) {
return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", mk.Type(), keyType) keyType := vt.Key()
if !mkt.AssignableTo(keyType) {
if !mkt.ConvertibleTo(keyType) {
return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", mkt, keyType)
} }
mk = mk.Convert(keyType) mk = mk.Convert(keyType)
@@ -966,7 +974,7 @@ func (d *decoder) handleKeyValuePart(key ast.Iterator, value *ast.Node, v reflec
// If the map does not exist, create it. // If the map does not exist, create it.
if v.IsNil() { if v.IsNil() {
v = reflect.MakeMap(v.Type()) v = reflect.MakeMap(vt)
rv = v rv = v
} }
@@ -1013,11 +1021,12 @@ func (d *decoder) handleKeyValuePart(key ast.Iterator, value *ast.Node, v reflec
case reflect.Interface: case reflect.Interface:
v = v.Elem() v = v.Elem()
// Following encoding/toml: decoding an object into an interface{}, it // Following encoding/json: decoding an object into an
// needs to always hold a map[string]interface{}. This is for the types // interface{}, it needs to always hold a
// to be consistent whether a previous value was set or not. // map[string]interface{}. This is for the types to be
// consistent whether a previous value was set or not.
if !v.IsValid() || v.Type() != mapStringInterfaceType { if !v.IsValid() || v.Type() != mapStringInterfaceType {
v = reflect.MakeMap(mapStringInterfaceType) v = makeMapStringInterface()
} }
x, err := d.handleKeyValuePart(key, value, v) x, err := d.handleKeyValuePart(key, value, v)
@@ -1064,70 +1073,29 @@ func initAndDereferencePointer(v reflect.Value) reflect.Value {
type fieldPathsMap = map[string][]int type fieldPathsMap = map[string][]int
type fieldPathsCache struct { var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap
m map[reflect.Type]fieldPathsMap
l sync.RWMutex
}
func (c *fieldPathsCache) get(t reflect.Type) (fieldPathsMap, bool) {
c.l.RLock()
paths, ok := c.m[t]
c.l.RUnlock()
return paths, ok
}
func (c *fieldPathsCache) set(t reflect.Type, m fieldPathsMap) {
c.l.Lock()
c.m[t] = m
c.l.Unlock()
}
var globalFieldPathsCache = fieldPathsCache{
m: map[reflect.Type]fieldPathsMap{},
l: sync.RWMutex{},
}
func structField(v reflect.Value, name string) (reflect.Value, bool) { func structField(v reflect.Value, name string) (reflect.Value, bool) {
//nolint:godox t := v.Type()
// TODO: cache this, and reduce allocations
fieldPaths, ok := globalFieldPathsCache.get(v.Type()) cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap)
fieldPaths, ok := cache[danger.MakeTypeID(t)]
if !ok { if !ok {
fieldPaths = map[string][]int{} fieldPaths = map[string][]int{}
path := make([]int, 0, 16) forEachField(t, nil, func(name string, path []int) {
fieldPaths[name] = path
// extra copy for the case-insensitive match
fieldPaths[strings.ToLower(name)] = path
})
var walk func(reflect.Value) newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1)
walk = func(v reflect.Value) { newCache[danger.MakeTypeID(t)] = fieldPaths
t := v.Type() for k, v := range cache {
for i := 0; i < t.NumField(); i++ { newCache[k] = v
l := len(path)
path = append(path, i)
f := t.Field(i)
if f.Anonymous {
walk(v.Field(i))
} else if f.PkgPath == "" {
// only consider exported fields
fieldName, ok := f.Tag.Lookup("toml")
if !ok {
fieldName = f.Name
}
pathCopy := make([]int, len(path))
copy(pathCopy, path)
fieldPaths[fieldName] = pathCopy
// extra copy for the case-insensitive match
fieldPaths[strings.ToLower(fieldName)] = pathCopy
}
path = path[:l]
}
} }
globalFieldPathsCache.Store(newCache)
walk(v)
globalFieldPathsCache.set(v.Type(), fieldPaths)
} }
path, ok := fieldPaths[name] path, ok := fieldPaths[name]
@@ -1141,3 +1109,30 @@ func structField(v reflect.Value, name string) (reflect.Value, bool) {
return v.FieldByIndex(path), true return v.FieldByIndex(path), true
} }
func forEachField(t reflect.Type, path []int, do func(name string, path []int)) {
n := t.NumField()
for i := 0; i < n; i++ {
f := t.Field(i)
if !f.Anonymous && f.PkgPath != "" {
// only consider exported fields.
continue
}
fieldPath := append(path, i)
fieldPath = fieldPath[:len(fieldPath):len(fieldPath)]
if f.Anonymous {
forEachField(f.Type, fieldPath, do)
continue
}
name, ok := f.Tag.Lookup("toml")
if !ok {
name = f.Name
}
do(name, fieldPath)
}
}
+13
View File
@@ -1037,6 +1037,19 @@ B = "data"`,
} }
}, },
}, },
{
desc: "unexported struct fields are ignored",
input: `foo = "bar"`,
gen: func() test {
type doc struct {
foo string
}
return test{
target: &doc{},
expected: &doc{},
}
},
},
{ {
desc: "array table into nil ptr", desc: "array table into nil ptr",
input: `[[foo]] input: `[[foo]]