Merge pull request #49 from pelletier/generic-input

Generic input
This commit is contained in:
Thomas Pelletier
2016-01-31 16:57:17 +01:00
5 changed files with 200 additions and 182 deletions
+167 -159
View File
@@ -7,10 +7,11 @@ package toml
import ( import (
"fmt" "fmt"
"github.com/pelletier/go-buffruneio"
"io"
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
"unicode/utf8"
) )
var dateRegexp *regexp.Regexp var dateRegexp *regexp.Regexp
@@ -20,47 +21,56 @@ type tomlLexStateFn func() tomlLexStateFn
// Define lexer // Define lexer
type tomlLexer struct { type tomlLexer struct {
input string input *buffruneio.Reader // Textual source
start int buffer []rune // Runes composing the current token
pos int
width int
tokens chan token tokens chan token
depth int depth int
line int line int
col int col int
endbufferLine int
endbufferCol int
} }
func (l *tomlLexer) run() { // Basic read operations on input
for state := l.lexVoid; state != nil; {
state = state()
}
close(l.tokens)
}
func (l *tomlLexer) nextStart() { func (l *tomlLexer) read() rune {
// iterate by runes (utf8 characters) r, err := l.input.ReadRune()
// search for newlines and advance line/col counts if err != nil {
for i := l.start; i < l.pos; { panic(err)
r, width := utf8.DecodeRuneInString(l.input[i:]) }
if r == '\n' { if r == '\n' {
l.line++ l.endbufferLine++
l.col = 1 l.endbufferCol = 1
} else { } else {
l.col++ l.endbufferCol++
} }
i += width return r
}
// advance start position to next token
l.start = l.pos
} }
func (l *tomlLexer) emit(t tokenType) { func (l *tomlLexer) next() rune {
l.tokens <- token{ r := l.read()
Position: Position{l.line, l.col},
typ: t, if r != eof {
val: l.input[l.start:l.pos], l.buffer = append(l.buffer, r)
}
return r
}
func (l *tomlLexer) ignore() {
l.buffer = make([]rune, 0)
l.line = l.endbufferLine
l.col = l.endbufferCol
}
func (l *tomlLexer) skip() {
l.next()
l.ignore()
}
func (l *tomlLexer) fastForward(n int) {
for i := 0; i < n; i++ {
l.next()
} }
l.nextStart()
} }
func (l *tomlLexer) emitWithValue(t tokenType, value string) { func (l *tomlLexer) emitWithValue(t tokenType, value string) {
@@ -69,27 +79,37 @@ func (l *tomlLexer) emitWithValue(t tokenType, value string) {
typ: t, typ: t,
val: value, val: value,
} }
l.nextStart() l.ignore()
} }
func (l *tomlLexer) next() rune { func (l *tomlLexer) emit(t tokenType) {
if l.pos >= len(l.input) { l.emitWithValue(t, string(l.buffer))
l.width = 0
return eof
} }
var r rune
r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) func (l *tomlLexer) peek() rune {
l.pos += l.width r, err := l.input.ReadRune()
if err != nil {
panic(err)
}
l.input.UnreadRune()
return r return r
} }
func (l *tomlLexer) ignore() { func (l *tomlLexer) follow(next string) bool {
l.nextStart() for _, expectedRune := range next {
r, err := l.input.ReadRune()
defer l.input.UnreadRune()
if err != nil {
panic(err)
}
if expectedRune != r {
return false
}
}
return true
} }
func (l *tomlLexer) backup() { // Error management
l.pos -= l.width
}
func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn {
l.tokens <- token{ l.tokens <- token{
@@ -100,23 +120,7 @@ func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn {
return nil return nil
} }
func (l *tomlLexer) peek() rune { // State functions
r := l.next()
l.backup()
return r
}
func (l *tomlLexer) accept(valid string) bool {
if strings.IndexRune(valid, l.next()) >= 0 {
return true
}
l.backup()
return false
}
func (l *tomlLexer) follow(next string) bool {
return strings.HasPrefix(l.input[l.pos:], next)
}
func (l *tomlLexer) lexVoid() tomlLexStateFn { func (l *tomlLexer) lexVoid() tomlLexStateFn {
for { for {
@@ -128,10 +132,13 @@ func (l *tomlLexer) lexVoid() tomlLexStateFn {
return l.lexComment return l.lexComment
case '=': case '=':
return l.lexEqual return l.lexEqual
case '\n':
l.skip()
continue
} }
if isSpace(next) { if isSpace(next) {
l.ignore() l.skip()
} }
if l.depth > 0 { if l.depth > 0 {
@@ -142,7 +149,8 @@ func (l *tomlLexer) lexVoid() tomlLexStateFn {
return l.lexKey return l.lexKey
} }
if l.next() == eof { if next == eof {
l.next()
break break
} }
} }
@@ -178,8 +186,7 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
case ',': case ',':
return l.lexComma return l.lexComma
case '\n': case '\n':
l.ignore() l.skip()
l.pos++
if l.depth == 0 { if l.depth == 0 {
return l.lexVoid return l.lexVoid
} }
@@ -196,14 +203,20 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
return l.lexFalse return l.lexFalse
} }
if isAlphanumeric(next) { if isSpace(next) {
return l.lexKey l.skip()
continue
} }
dateMatch := dateRegexp.FindString(l.input[l.pos:]) if next == eof {
l.next()
break
}
possibleDate := string(l.input.Peek(35))
dateMatch := dateRegexp.FindString(possibleDate)
if dateMatch != "" { if dateMatch != "" {
l.ignore() l.fastForward(len(dateMatch))
l.pos += len(dateMatch)
return l.lexDate return l.lexDate
} }
@@ -211,13 +224,10 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
return l.lexNumber return l.lexNumber
} }
if isSpace(next) { if isAlphanumeric(next) {
l.ignore() return l.lexKey
} }
if l.next() == eof {
break
}
} }
l.emit(tokenEOF) l.emit(tokenEOF)
@@ -225,15 +235,13 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
} }
func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn {
l.ignore() l.next()
l.pos++
l.emit(tokenLeftCurlyBrace) l.emit(tokenLeftCurlyBrace)
return l.lexRvalue return l.lexRvalue
} }
func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn {
l.ignore() l.next()
l.pos++
l.emit(tokenRightCurlyBrace) l.emit(tokenRightCurlyBrace)
return l.lexRvalue return l.lexRvalue
} }
@@ -244,37 +252,32 @@ func (l *tomlLexer) lexDate() tomlLexStateFn {
} }
func (l *tomlLexer) lexTrue() tomlLexStateFn { func (l *tomlLexer) lexTrue() tomlLexStateFn {
l.ignore() l.fastForward(4)
l.pos += 4
l.emit(tokenTrue) l.emit(tokenTrue)
return l.lexRvalue return l.lexRvalue
} }
func (l *tomlLexer) lexFalse() tomlLexStateFn { func (l *tomlLexer) lexFalse() tomlLexStateFn {
l.ignore() l.fastForward(5)
l.pos += 5
l.emit(tokenFalse) l.emit(tokenFalse)
return l.lexRvalue return l.lexRvalue
} }
func (l *tomlLexer) lexEqual() tomlLexStateFn { func (l *tomlLexer) lexEqual() tomlLexStateFn {
l.ignore() l.next()
l.accept("=")
l.emit(tokenEqual) l.emit(tokenEqual)
return l.lexRvalue return l.lexRvalue
} }
func (l *tomlLexer) lexComma() tomlLexStateFn { func (l *tomlLexer) lexComma() tomlLexStateFn {
l.ignore() l.next()
l.accept(",")
l.emit(tokenComma) l.emit(tokenComma)
return l.lexRvalue return l.lexRvalue
} }
func (l *tomlLexer) lexKey() tomlLexStateFn { func (l *tomlLexer) lexKey() tomlLexStateFn {
l.ignore()
inQuotes := false inQuotes := false
for r := l.next(); isKeyChar(r) || r == '\n'; r = l.next() { for r := l.peek(); isKeyChar(r) || r == '\n'; r = l.peek() {
if r == '"' { if r == '"' {
inQuotes = !inQuotes inQuotes = !inQuotes
} else if r == '\n' { } else if r == '\n' {
@@ -284,46 +287,40 @@ func (l *tomlLexer) lexKey() tomlLexStateFn {
} else if !isValidBareChar(r) && !inQuotes { } else if !isValidBareChar(r) && !inQuotes {
return l.errorf("keys cannot contain %c character", r) return l.errorf("keys cannot contain %c character", r)
} }
l.next()
} }
l.backup()
l.emit(tokenKey) l.emit(tokenKey)
return l.lexVoid return l.lexVoid
} }
func (l *tomlLexer) lexComment() tomlLexStateFn { func (l *tomlLexer) lexComment() tomlLexStateFn {
for { for next := l.peek(); next != '\n' && next != eof; next = l.peek() {
next := l.next() l.next()
if next == '\n' || next == eof {
break
}
} }
l.ignore() l.ignore()
return l.lexVoid return l.lexVoid
} }
func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { func (l *tomlLexer) lexLeftBracket() tomlLexStateFn {
l.ignore() l.next()
l.pos++
l.emit(tokenLeftBracket) l.emit(tokenLeftBracket)
return l.lexRvalue return l.lexRvalue
} }
func (l *tomlLexer) lexLiteralString() tomlLexStateFn { func (l *tomlLexer) lexLiteralString() tomlLexStateFn {
l.pos++ l.skip()
l.ignore()
growingString := "" growingString := ""
// handle special case for triple-quote // handle special case for triple-quote
terminator := "'" terminator := "'"
if l.follow("''") { if l.follow("''") {
l.pos += 2 l.skip()
l.ignore() l.skip()
terminator = "'''" terminator = "'''"
// special case: discard leading newline // special case: discard leading newline
if l.peek() == '\n' { if l.peek() == '\n' {
l.pos++ l.skip()
l.ignore()
} }
} }
@@ -331,50 +328,48 @@ func (l *tomlLexer) lexLiteralString() tomlLexStateFn {
for { for {
if l.follow(terminator) { if l.follow(terminator) {
l.emitWithValue(tokenString, growingString) l.emitWithValue(tokenString, growingString)
l.pos += len(terminator) l.fastForward(len(terminator))
l.ignore() l.ignore()
return l.lexRvalue return l.lexRvalue
} }
growingString += string(l.peek()) next := l.peek()
if next == eof {
if l.next() == eof {
break break
} }
growingString += string(l.next())
} }
return l.errorf("unclosed string") return l.errorf("unclosed string")
} }
func (l *tomlLexer) lexString() tomlLexStateFn { func (l *tomlLexer) lexString() tomlLexStateFn {
l.pos++ l.skip()
l.ignore()
growingString := "" growingString := ""
// handle special case for triple-quote // handle special case for triple-quote
terminator := "\"" terminator := "\""
if l.follow("\"\"") { if l.follow("\"\"") {
l.pos += 2 l.skip()
l.ignore() l.skip()
terminator = "\"\"\"" terminator = "\"\"\""
// special case: discard leading newline // special case: discard leading newline
if l.peek() == '\n' { if l.peek() == '\n' {
l.pos++ l.skip()
l.ignore()
} }
} }
for { for {
if l.follow(terminator) { if l.follow(terminator) {
l.emitWithValue(tokenString, growingString) l.emitWithValue(tokenString, growingString)
l.pos += len(terminator) l.fastForward(len(terminator))
l.ignore() l.ignore()
return l.lexRvalue return l.lexRvalue
} }
if l.follow("\\") { if l.follow("\\") {
l.pos++ l.next()
switch l.peek() { switch l.peek() {
case '\r': case '\r':
fallthrough fallthrough
@@ -384,56 +379,60 @@ func (l *tomlLexer) lexString() tomlLexStateFn {
fallthrough fallthrough
case ' ': case ' ':
// skip all whitespace chars following backslash // skip all whitespace chars following backslash
l.pos++
for strings.ContainsRune("\r\n\t ", l.peek()) { for strings.ContainsRune("\r\n\t ", l.peek()) {
l.pos++ l.next()
} }
l.pos--
case '"': case '"':
growingString += "\"" growingString += "\""
l.next()
case 'n': case 'n':
growingString += "\n" growingString += "\n"
l.next()
case 'b': case 'b':
growingString += "\b" growingString += "\b"
l.next()
case 'f': case 'f':
growingString += "\f" growingString += "\f"
l.next()
case '/': case '/':
growingString += "/" growingString += "/"
l.next()
case 't': case 't':
growingString += "\t" growingString += "\t"
l.next()
case 'r': case 'r':
growingString += "\r" growingString += "\r"
l.next()
case '\\': case '\\':
growingString += "\\" growingString += "\\"
l.next()
case 'u': case 'u':
l.pos++ l.next()
code := "" code := ""
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
c := l.peek() c := l.peek()
l.pos++
if !isHexDigit(c) { if !isHexDigit(c) {
return l.errorf("unfinished unicode escape") return l.errorf("unfinished unicode escape")
} }
l.next()
code = code + string(c) code = code + string(c)
} }
l.pos--
intcode, err := strconv.ParseInt(code, 16, 32) intcode, err := strconv.ParseInt(code, 16, 32)
if err != nil { if err != nil {
return l.errorf("invalid unicode escape: \\u" + code) return l.errorf("invalid unicode escape: \\u" + code)
} }
growingString += string(rune(intcode)) growingString += string(rune(intcode))
case 'U': case 'U':
l.pos++ l.next()
code := "" code := ""
for i := 0; i < 8; i++ { for i := 0; i < 8; i++ {
c := l.peek() c := l.peek()
l.pos++
if !isHexDigit(c) { if !isHexDigit(c) {
return l.errorf("unfinished unicode escape") return l.errorf("unfinished unicode escape")
} }
l.next()
code = code + string(c) code = code + string(c)
} }
l.pos--
intcode, err := strconv.ParseInt(code, 16, 64) intcode, err := strconv.ParseInt(code, 16, 64)
if err != nil { if err != nil {
return l.errorf("invalid unicode escape: \\U" + code) return l.errorf("invalid unicode escape: \\U" + code)
@@ -447,10 +446,11 @@ func (l *tomlLexer) lexString() tomlLexStateFn {
if 0x00 <= r && r <= 0x1F { if 0x00 <= r && r <= 0x1F {
return l.errorf("unescaped control character %U", r) return l.errorf("unescaped control character %U", r)
} }
l.next()
growingString += string(r) growingString += string(r)
} }
if l.next() == eof { if l.peek() == eof {
break break
} }
} }
@@ -459,12 +459,11 @@ func (l *tomlLexer) lexString() tomlLexStateFn {
} }
func (l *tomlLexer) lexKeyGroup() tomlLexStateFn { func (l *tomlLexer) lexKeyGroup() tomlLexStateFn {
l.ignore() l.next()
l.pos++
if l.peek() == '[' { if l.peek() == '[' {
// token '[[' signifies an array of anonymous key groups // token '[[' signifies an array of anonymous key groups
l.pos++ l.next()
l.emit(tokenDoubleLeftBracket) l.emit(tokenDoubleLeftBracket)
return l.lexInsideKeyGroupArray return l.lexInsideKeyGroupArray
} }
@@ -474,86 +473,85 @@ func (l *tomlLexer) lexKeyGroup() tomlLexStateFn {
} }
func (l *tomlLexer) lexInsideKeyGroupArray() tomlLexStateFn { func (l *tomlLexer) lexInsideKeyGroupArray() tomlLexStateFn {
for { for r := l.peek(); r != eof; r = l.peek() {
if l.peek() == ']' { switch r {
if l.pos > l.start { case ']':
if len(l.buffer) > 0 {
l.emit(tokenKeyGroupArray) l.emit(tokenKeyGroupArray)
} }
l.ignore() l.next()
l.pos++
if l.peek() != ']' { if l.peek() != ']' {
break // error break
} }
l.pos++ l.next()
l.emit(tokenDoubleRightBracket) l.emit(tokenDoubleRightBracket)
return l.lexVoid return l.lexVoid
} else if l.peek() == '[' { case '[':
return l.errorf("group name cannot contain ']'") return l.errorf("group name cannot contain ']'")
} default:
l.next()
if l.next() == eof {
break
} }
} }
return l.errorf("unclosed key group array") return l.errorf("unclosed key group array")
} }
func (l *tomlLexer) lexInsideKeyGroup() tomlLexStateFn { func (l *tomlLexer) lexInsideKeyGroup() tomlLexStateFn {
for { for r := l.peek(); r != eof; r = l.peek() {
if l.peek() == ']' { switch r {
if l.pos > l.start { case ']':
if len(l.buffer) > 0 {
l.emit(tokenKeyGroup) l.emit(tokenKeyGroup)
} }
l.ignore() l.next()
l.pos++
l.emit(tokenRightBracket) l.emit(tokenRightBracket)
return l.lexVoid return l.lexVoid
} else if l.peek() == '[' { case '[':
return l.errorf("group name cannot contain ']'") return l.errorf("group name cannot contain ']'")
} default:
l.next()
if l.next() == eof {
break
} }
} }
return l.errorf("unclosed key group") return l.errorf("unclosed key group")
} }
func (l *tomlLexer) lexRightBracket() tomlLexStateFn { func (l *tomlLexer) lexRightBracket() tomlLexStateFn {
l.ignore() l.next()
l.pos++
l.emit(tokenRightBracket) l.emit(tokenRightBracket)
return l.lexRvalue return l.lexRvalue
} }
func (l *tomlLexer) lexNumber() tomlLexStateFn { func (l *tomlLexer) lexNumber() tomlLexStateFn {
l.ignore() r := l.peek()
if !l.accept("+") { if r == '+' || r == '-' {
l.accept("-") l.next()
} }
pointSeen := false pointSeen := false
expSeen := false expSeen := false
digitSeen := false digitSeen := false
for { for {
next := l.next() next := l.peek()
if next == '.' { if next == '.' {
if pointSeen { if pointSeen {
return l.errorf("cannot have two dots in one float") return l.errorf("cannot have two dots in one float")
} }
l.next()
if !isDigit(l.peek()) { if !isDigit(l.peek()) {
return l.errorf("float cannot end with a dot") return l.errorf("float cannot end with a dot")
} }
pointSeen = true pointSeen = true
} else if next == 'e' || next == 'E' { } else if next == 'e' || next == 'E' {
expSeen = true expSeen = true
if !l.accept("+") { l.next()
l.accept("-") r := l.peek()
if r == '+' || r == '-' {
l.next()
} }
} else if isDigit(next) { } else if isDigit(next) {
digitSeen = true digitSeen = true
l.next()
} else if next == '_' { } else if next == '_' {
l.next()
} else { } else {
l.backup()
break break
} }
if pointSeen && !digitSeen { if pointSeen && !digitSeen {
@@ -572,17 +570,27 @@ func (l *tomlLexer) lexNumber() tomlLexStateFn {
return l.lexRvalue return l.lexRvalue
} }
func (l *tomlLexer) run() {
for state := l.lexVoid; state != nil; {
state = state()
}
close(l.tokens)
}
func init() { func init() {
dateRegexp = regexp.MustCompile("^\\d{1,4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}(\\.\\d{1,9})?(Z|[+-]\\d{2}:\\d{2})") dateRegexp = regexp.MustCompile("^\\d{1,4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}(\\.\\d{1,9})?(Z|[+-]\\d{2}:\\d{2})")
} }
// Entry point // Entry point
func lexToml(input string) chan token { func lexToml(input io.Reader) chan token {
bufferedInput := buffruneio.NewReader(input)
l := &tomlLexer{ l := &tomlLexer{
input: input, input: bufferedInput,
tokens: make(chan token), tokens: make(chan token),
line: 1, line: 1,
col: 1, col: 1,
endbufferLine: 1,
endbufferCol: 1,
} }
go l.run() go l.run()
return l.tokens return l.tokens
+6 -2
View File
@@ -1,15 +1,19 @@
package toml package toml
import "testing" import (
"strings"
"testing"
)
func testFlow(t *testing.T, input string, expectedFlow []token) { func testFlow(t *testing.T, input string, expectedFlow []token) {
ch := lexToml(input) ch := lexToml(strings.NewReader(input))
for _, expected := range expectedFlow { for _, expected := range expectedFlow {
token := <-ch token := <-ch
if token != expected { if token != expected {
t.Log("While testing: ", input) t.Log("While testing: ", input)
t.Log("compared (got)", token, "to (expected)", expected) t.Log("compared (got)", token, "to (expected)", expected)
t.Log("\tvalue:", token.val, "<->", expected.val) t.Log("\tvalue:", token.val, "<->", expected.val)
t.Log("\tvalue as bytes:", []byte(token.val), "<->", []byte(expected.val))
t.Log("\ttype:", token.typ.String(), "<->", expected.typ.String()) t.Log("\ttype:", token.typ.String(), "<->", expected.typ.String())
t.Log("\tline:", token.Line, "<->", expected.Line) t.Log("\tline:", token.Line, "<->", expected.Line)
t.Log("\tcolumn:", token.Col, "<->", expected.Col) t.Log("\tcolumn:", token.Col, "<->", expected.Col)
+2 -2
View File
@@ -287,7 +287,7 @@ func TestArrayNestedStrings(t *testing.T) {
func TestMissingValue(t *testing.T) { func TestMissingValue(t *testing.T) {
_, err := Load("a = ") _, err := Load("a = ")
if err.Error() != "(1, 4): expecting a value" { if err.Error() != "(1, 5): expecting a value" {
t.Error("Bad error message:", err.Error()) t.Error("Bad error message:", err.Error())
} }
} }
@@ -441,7 +441,7 @@ func TestImplicitDeclarationBefore(t *testing.T) {
func TestFloatsWithoutLeadingZeros(t *testing.T) { func TestFloatsWithoutLeadingZeros(t *testing.T) {
_, err := Load("a = .42") _, err := Load("a = .42")
if err.Error() != "(1, 4): cannot start float with a dot" { if err.Error() != "(1, 5): cannot start float with a dot" {
t.Error("Bad error message:", err.Error()) t.Error("Bad error message:", err.Error())
} }
+2
View File
@@ -19,6 +19,8 @@ function git_clone() {
popd popd
} }
go get github.com/pelletier/go-buffruneio
# get code for BurntSushi TOML validation # get code for BurntSushi TOML validation
# pinning all to 'HEAD' for version 0.3.x work (TODO: pin to commit hash when tests stabilize) # pinning all to 'HEAD' for version 0.3.x work (TODO: pin to commit hash when tests stabilize)
git_clone github.com/BurntSushi/toml master HEAD git_clone github.com/BurntSushi/toml master HEAD
+15 -11
View File
@@ -3,7 +3,8 @@ package toml
import ( import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io"
"os"
"runtime" "runtime"
"strconv" "strconv"
"strings" "strings"
@@ -360,8 +361,8 @@ func (t *TomlTree) ToString() string {
return t.toToml("", "") return t.toToml("", "")
} }
// Load creates a TomlTree from a string. // LoadReader creates a TomlTree from any io.Reader.
func Load(content string) (tree *TomlTree, err error) { func LoadReader(reader io.Reader) (tree *TomlTree, err error) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok { if _, ok := r.(runtime.Error); ok {
@@ -370,18 +371,21 @@ func Load(content string) (tree *TomlTree, err error) {
err = errors.New(r.(string)) err = errors.New(r.(string))
} }
}() }()
tree = parseToml(lexToml(content)) tree = parseToml(lexToml(reader))
return return
} }
// Load creates a TomlTree from a string.
func Load(content string) (tree *TomlTree, err error) {
return LoadReader(strings.NewReader(content))
}
// LoadFile creates a TomlTree from a file. // LoadFile creates a TomlTree from a file.
func LoadFile(path string) (tree *TomlTree, err error) { func LoadFile(path string) (tree *TomlTree, err error) {
buff, ferr := ioutil.ReadFile(path) file, err := os.Open(path)
if ferr != nil { if err != nil {
err = ferr return nil, err
} else {
s := string(buff)
tree, err = Load(s)
} }
return defer file.Close()
return LoadReader(file)
} }