Compare commits

...

48 Commits

Author SHA1 Message Date
Thomas Pelletier 3102b98900 Update to TOML v0.4.0 2015-11-03 16:07:50 +01:00
Thomas Pelletier f0cae62430 Merge pull request #46 from pelletier/pelletier/inline-tables
Implement inline tables
2015-11-03 16:05:32 +01:00
Thomas Pelletier 56c6106477 Specify point versions in Travis 2015-09-10 09:51:39 +01:00
Thomas Pelletier 7d69e5a5c5 Tests for erroneous inline tables 2015-09-09 17:40:27 +01:00
Thomas Pelletier 07d0c2e4d3 Merge branch 'master' into pelletier/inline-tables 2015-09-09 17:35:03 +01:00
Thomas Pelletier 6b9002d8f9 Harden tests for bad arrays 2015-09-09 17:33:28 +01:00
Thomas Pelletier 5753e884d0 Fix floating points with underscores 2015-09-09 17:17:08 +01:00
Thomas Pelletier d467309bdd Add comment to justify this madness 2015-09-09 17:04:36 +01:00
Thomas Pelletier 821a80e635 Add removed test 2015-09-09 17:01:05 +01:00
Thomas Pelletier dd4c4ffc2b Implement inline tables 2015-09-09 16:56:18 +01:00
Thomas Pelletier da703daafe Add go 1.5 to tested versions 2015-08-19 10:24:53 -07:00
Thomas Pelletier f58048cec0 Merge pull request #39 from pelletier/pelletier/integers_underscores
Accept underscores in integers
2015-07-17 16:54:19 -07:00
Thomas Pelletier 440592fa85 Merge pull request #40 from pelletier/pelletier/space-in-keys
Accept spaces in keys
2015-07-17 16:53:53 -07:00
Thomas Pelletier f4f2456dcd Merge pull request #38 from pelletier/pelletier/multiline
Reject full 00 - 1F unicode range
2015-07-17 16:52:59 -07:00
Thomas Pelletier a77f30ea80 Add coveralls badge to readme 2015-07-16 23:55:56 -07:00
Thomas Pelletier d61c80733b Add goveralls 2015-07-16 23:51:41 -07:00
Thomas Pelletier 894e775e38 Accept spaces in keys 2015-07-16 23:04:13 -07:00
Thomas Pelletier 8e75093380 Accept underscores in integers 2015-07-16 22:07:16 -07:00
Thomas Pelletier cf5ad6a245 Fixes #27: Reject full 00 - 1F unicode range 2015-07-16 21:54:10 -07:00
Thomas Pelletier 8fc7451ffc Merge pull request #37 from pelletier/pelletier/better_keys_parsing
Update keys parsing
2015-07-16 17:47:46 -07:00
Thomas Pelletier 9defd66d3c Parse datetimes in UTC 2015-07-15 10:58:08 -07:00
Thomas Pelletier 6adf8057ed Use the new Travis container infrastructure
http://docs.travis-ci.com/user/migrating-from-legacy/#Why-migrate-to-container-based-infrastructure%3F
2015-07-15 09:12:52 -07:00
Thomas Pelletier 36e1197190 Test datetimes differently 2015-07-15 08:17:28 -07:00
Thomas Pelletier 6dd2de38a9 We have been in 2015 for quite a while now 2015-07-14 20:18:44 -07:00
Thomas Pelletier 209315c2af Fixes #35: Retrieve dotted keys 2015-07-14 20:15:02 -07:00
Thomas Pelletier 41a8959f14 Reject new lines in keys 2015-07-14 20:07:43 -07:00
Thomas Pelletier 16a681db2a Allow numbers in keys parsing 2015-07-14 19:56:28 -07:00
Thomas Pelletier 9f36448571 Basic keys parsing 2015-07-14 16:33:33 -07:00
Thomas Pelletier 222e90a7d3 Parse long unicode 2015-05-21 18:52:26 -07:00
Thomas Pelletier a8327d781a Specifiy timezone name 2015-04-23 15:42:25 -07:00
Thomas Pelletier 61449e9d32 Test for Go 1.4.1 2015-04-23 15:36:06 -07:00
Thomas Pelletier 48c977fb58 Test for golang 1.4 2015-04-23 15:33:31 -07:00
Thomas Pelletier 42e7853ef6 Merge pull request #34 from pelletier/issue-29
Changes to support #29 - Support multi-line literal strings
2015-02-27 14:48:13 +01:00
eanderton 1f3d0e03c3 Changes to support #29 - Support multi-line literal strings
* Added error output to test_program.go
* Added multi-line literal string support to lexer
* Added multi-line string supprt to lexer
* Added unit-test for new string support
* Modified test.sh to take an optional parameter to run an individual BurntSushi test suite.
* Fixed formatting
2015-02-26 18:03:30 -05:00
Thomas Pelletier 36d65b681a Merge branch 'toml-0.3.1' 2014-12-06 15:27:39 +01:00
Thomas Pelletier a56707c85f Fixes #28 : Support of literal strings 2014-12-06 15:23:37 +01:00
Thomas Pelletier 4b47f52cb0 Fixes #31 : Use RFC 3339 for datetimes 2014-12-06 15:00:24 +01:00
Thomas Pelletier 2f2f28631b Fixes #32 : Ensure keys are correctly parsed 2014-12-06 14:16:42 +01:00
Thomas Pelletier 543444f747 Fixes #30: Implement exp notation in floats 2014-12-06 13:56:27 +01:00
Thomas Pelletier b814e1a94f Merge pull request #25 from vektra/master
Make it possible to use lib to make new Toml Trees
2014-11-05 19:08:21 +01:00
Evan Phoenix 1fe62f3000 Merge remote-tracking branch 'prim/master'
Conflicts:
	match_test.go
	queryparser.go
2014-11-05 09:52:03 -08:00
Evan Phoenix 709382e9c1 Fix usage on 32bit machines 2014-11-05 09:24:08 -08:00
Evan Phoenix 71e7762db5 Don't wrap native types in a tomlValue{} 2014-11-05 09:23:41 -08:00
Evan Phoenix 34da10d880 Report the type and value that generated the error 2014-11-05 09:23:28 -08:00
Thomas Pelletier db15f8a481 Merge pull request #24 from pelletier/pelletier/integer_overflow
Int overflow in queryparser
2014-11-03 22:09:12 +01:00
Evan Phoenix 8ef71920bd Expose ability to make an empty tree and handle raw values 2014-10-28 11:49:50 -07:00
Evan Phoenix fa055bcbba Fix inserting values into a tree 2014-10-28 11:49:14 -07:00
Thomas Pelletier 7337a63f5a Use MaxInt instead of MaxInt64 for ints
This is causing an integer overflow on 386 go builds, because ints are
int32 and not int64 on this platform.
2014-10-16 05:58:50 -07:00
14 changed files with 928 additions and 119 deletions
+11 -3
View File
@@ -1,7 +1,15 @@
language: go language: go
script: "./test.sh" script: "./test.sh"
sudo: false
go: go:
- 1.1 - 1.2.2
- 1.2 - 1.3.3
- 1.3 - 1.4.2
- 1.5.1
- tip - tip
before_install:
- go get github.com/axw/gocov/gocov
- go get github.com/mattn/goveralls
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
after_success:
- $HOME/gopath/bin/goveralls -service=travis-ci
+3 -2
View File
@@ -3,10 +3,11 @@
Go library for the [TOML](https://github.com/mojombo/toml) format. Go library for the [TOML](https://github.com/mojombo/toml) format.
This library supports TOML version This library supports TOML version
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) [v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
[![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml) [![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml)
[![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml) [![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml)
[![Coverage Status](https://coveralls.io/repos/pelletier/go-toml/badge.svg?branch=master&service=github)](https://coveralls.io/github/pelletier/go-toml?branch=master)
## Features ## Features
@@ -97,7 +98,7 @@ You can run both of them using `./test.sh`.
## License ## License
Copyright (c) 2013, 2014 Thomas Pelletier, Eric Anderton Copyright (c) 2013 - 2015 Thomas Pelletier, Eric Anderton
Permission is hereby granted, free of charge, to any person obtaining a copy of Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in this software and associated documentation files (the "Software"), to deal in
+3
View File
@@ -13,10 +13,12 @@ import (
func main() { func main() {
bytes, err := ioutil.ReadAll(os.Stdin) bytes, err := ioutil.ReadAll(os.Stdin)
if err != nil { if err != nil {
log.Fatalf("Error during TOML read: %s", err)
os.Exit(2) os.Exit(2)
} }
tree, err := toml.Load(string(bytes)) tree, err := toml.Load(string(bytes))
if err != nil { if err != nil {
log.Fatalf("Error during TOML load: %s", err)
os.Exit(1) os.Exit(1)
} }
@@ -24,6 +26,7 @@ func main() {
if err := json.NewEncoder(os.Stdout).Encode(typedTree); err != nil { if err := json.NewEncoder(os.Stdout).Encode(typedTree); err != nil {
log.Fatalf("Error encoding JSON: %s", err) log.Fatalf("Error encoding JSON: %s", err)
os.Exit(3)
} }
os.Exit(0) os.Exit(0)
+78
View File
@@ -0,0 +1,78 @@
// Parsing keys handling both bare and quoted keys.
package toml
import (
"bytes"
"fmt"
"unicode"
)
func parseKey(key string) ([]string, error) {
groups := []string{}
var buffer bytes.Buffer
inQuotes := false
escapeNext := false
ignoreSpace := true
expectDot := false
for _, char := range key {
if ignoreSpace {
if char == ' ' {
continue
}
ignoreSpace = false
}
if escapeNext {
buffer.WriteRune(char)
escapeNext = false
continue
}
switch char {
case '\\':
escapeNext = true
continue
case '"':
inQuotes = !inQuotes
expectDot = false
case '.':
if inQuotes {
buffer.WriteRune(char)
} else {
groups = append(groups, buffer.String())
buffer.Reset()
ignoreSpace = true
expectDot = false
}
case ' ':
if inQuotes {
buffer.WriteRune(char)
} else {
expectDot = true
}
default:
if !inQuotes && !isValidBareChar(char) {
return nil, fmt.Errorf("invalid bare character: %c", char)
}
if !inQuotes && expectDot {
return nil, fmt.Errorf("what?")
}
buffer.WriteRune(char)
expectDot = false
}
}
if inQuotes {
return nil, fmt.Errorf("mismatched quotes")
}
if escapeNext {
return nil, fmt.Errorf("unfinished escape sequence")
}
if buffer.Len() > 0 {
groups = append(groups, buffer.String())
}
return groups, nil
}
func isValidBareChar(r rune) bool {
return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r)
}
+44
View File
@@ -0,0 +1,44 @@
package toml
import (
"fmt"
"testing"
)
func testResult(t *testing.T, key string, expected []string) {
parsed, err := parseKey(key)
if err != nil {
t.Fatal("Unexpected error:", err)
}
if len(expected) != len(parsed) {
t.Fatal("Expected length", len(expected), "but", len(parsed), "parsed")
}
for index, expectedKey := range expected {
if expectedKey != parsed[index] {
t.Fatal("Expected", expectedKey, "at index", index, "but found", parsed[index])
}
}
}
func testError(t *testing.T, key string, expectedError string) {
_, err := parseKey(key)
if fmt.Sprintf("%s", err) != expectedError {
t.Fatalf("Expected error \"%s\", but got \"%s\".", expectedError, err)
}
}
func TestBareKeyBasic(t *testing.T) {
testResult(t, "test", []string{"test"})
}
func TestBareKeyDotted(t *testing.T) {
testResult(t, "this.is.a.key", []string{"this", "is", "a", "key"})
}
func TestDottedKeyBasic(t *testing.T) {
testResult(t, "\"a.dotted.key\"", []string{"a.dotted.key"})
}
func TestBaseKeyPound(t *testing.T) {
testError(t, "hello#world", "invalid bare character: #")
}
+172 -52
View File
@@ -138,7 +138,7 @@ func (l *tomlLexer) lexVoid() tomlLexStateFn {
return l.lexRvalue return l.lexRvalue
} }
if isKeyChar(next) { if isKeyStartChar(next) {
return l.lexKey return l.lexKey
} }
@@ -158,17 +158,23 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
case '.': case '.':
return l.errorf("cannot start float with a dot") return l.errorf("cannot start float with a dot")
case '=': case '=':
return l.errorf("cannot have multiple equals for the same key") return l.lexEqual
case '[': case '[':
l.depth++ l.depth++
return l.lexLeftBracket return l.lexLeftBracket
case ']': case ']':
l.depth-- l.depth--
return l.lexRightBracket return l.lexRightBracket
case '{':
return l.lexLeftCurlyBrace
case '}':
return l.lexRightCurlyBrace
case '#': case '#':
return l.lexComment return l.lexComment
case '"': case '"':
return l.lexString return l.lexString
case '\'':
return l.lexLiteralString
case ',': case ',':
return l.lexComma return l.lexComma
case '\n': case '\n':
@@ -192,7 +198,10 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
return l.lexKey return l.lexKey
} }
if dateRegexp.FindString(l.input[l.pos:]) != "" { dateMatch := dateRegexp.FindString(l.input[l.pos:])
if dateMatch != "" {
l.ignore()
l.pos += len(dateMatch)
return l.lexDate return l.lexDate
} }
@@ -213,9 +222,21 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
return nil return nil
} }
func (l *tomlLexer) lexDate() tomlLexStateFn { func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn {
l.ignore() l.ignore()
l.pos += 20 // Fixed size of a date in TOML l.pos++
l.emit(tokenLeftCurlyBrace)
return l.lexRvalue
}
func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn {
l.ignore()
l.pos++
l.emit(tokenRightCurlyBrace)
return l.lexRvalue
}
func (l *tomlLexer) lexDate() tomlLexStateFn {
l.emit(tokenDate) l.emit(tokenDate)
return l.lexRvalue return l.lexRvalue
} }
@@ -250,7 +271,17 @@ func (l *tomlLexer) lexComma() tomlLexStateFn {
func (l *tomlLexer) lexKey() tomlLexStateFn { func (l *tomlLexer) lexKey() tomlLexStateFn {
l.ignore() l.ignore()
for isKeyChar(l.next()) { inQuotes := false
for r := l.next(); isKeyChar(r) || r == '\n'; r = l.next() {
if r == '"' {
inQuotes = !inQuotes
} else if r == '\n' {
return l.errorf("keys cannot contain new lines")
} else if isSpace(r) && !inQuotes {
break
} else if !isValidBareChar(r) && !inQuotes {
return l.errorf("keys cannot contain %c character", r)
}
} }
l.backup() l.backup()
l.emit(tokenKey) l.emit(tokenKey)
@@ -275,65 +306,146 @@ func (l *tomlLexer) lexLeftBracket() tomlLexStateFn {
return l.lexRvalue return l.lexRvalue
} }
func (l *tomlLexer) lexLiteralString() tomlLexStateFn {
l.pos++
l.ignore()
growingString := ""
// handle special case for triple-quote
terminator := "'"
if l.follow("''") {
l.pos += 2
l.ignore()
terminator = "'''"
// special case: discard leading newline
if l.peek() == '\n' {
l.pos++
l.ignore()
}
}
// find end of string
for {
if l.follow(terminator) {
l.emitWithValue(tokenString, growingString)
l.pos += len(terminator)
l.ignore()
return l.lexRvalue
}
growingString += string(l.peek())
if l.next() == eof {
break
}
}
return l.errorf("unclosed string")
}
func (l *tomlLexer) lexString() tomlLexStateFn { func (l *tomlLexer) lexString() tomlLexStateFn {
l.pos++ l.pos++
l.ignore() l.ignore()
growingString := "" growingString := ""
for { // handle special case for triple-quote
if l.peek() == '"' { terminator := "\""
l.emitWithValue(tokenString, growingString) if l.follow("\"\"") {
l.pos += 2
l.ignore()
terminator = "\"\"\""
// special case: discard leading newline
if l.peek() == '\n' {
l.pos++ l.pos++
l.ignore() l.ignore()
}
}
for {
if l.follow(terminator) {
l.emitWithValue(tokenString, growingString)
l.pos += len(terminator)
l.ignore()
return l.lexRvalue return l.lexRvalue
} }
if l.follow("\\\"") { if l.follow("\\") {
l.pos++ l.pos++
growingString += "\"" switch l.peek() {
} else if l.follow("\\n") { case '\r':
l.pos++ fallthrough
growingString += "\n" case '\n':
} else if l.follow("\\b") { fallthrough
l.pos++ case '\t':
growingString += "\b" fallthrough
} else if l.follow("\\f") { case ' ':
l.pos++ // skip all whitespace chars following backslash
growingString += "\f"
} else if l.follow("\\/") {
l.pos++
growingString += "/"
} else if l.follow("\\t") {
l.pos++
growingString += "\t"
} else if l.follow("\\r") {
l.pos++
growingString += "\r"
} else if l.follow("\\\\") {
l.pos++
growingString += "\\"
} else if l.follow("\\u") {
l.pos += 2
code := ""
for i := 0; i < 4; i++ {
c := l.peek()
l.pos++ l.pos++
if !isHexDigit(c) { for strings.ContainsRune("\r\n\t ", l.peek()) {
return l.errorf("unfinished unicode escape") l.pos++
} }
code = code + string(c) l.pos--
case '"':
growingString += "\""
case 'n':
growingString += "\n"
case 'b':
growingString += "\b"
case 'f':
growingString += "\f"
case '/':
growingString += "/"
case 't':
growingString += "\t"
case 'r':
growingString += "\r"
case '\\':
growingString += "\\"
case 'u':
l.pos++
code := ""
for i := 0; i < 4; i++ {
c := l.peek()
l.pos++
if !isHexDigit(c) {
return l.errorf("unfinished unicode escape")
}
code = code + string(c)
}
l.pos--
intcode, err := strconv.ParseInt(code, 16, 32)
if err != nil {
return l.errorf("invalid unicode escape: \\u" + code)
}
growingString += string(rune(intcode))
case 'U':
l.pos++
code := ""
for i := 0; i < 8; i++ {
c := l.peek()
l.pos++
if !isHexDigit(c) {
return l.errorf("unfinished unicode escape")
}
code = code + string(c)
}
l.pos--
intcode, err := strconv.ParseInt(code, 16, 64)
if err != nil {
return l.errorf("invalid unicode escape: \\U" + code)
}
growingString += string(rune(intcode))
default:
return l.errorf("invalid escape sequence: \\" + string(l.peek()))
} }
l.pos--
intcode, err := strconv.ParseInt(code, 16, 32)
if err != nil {
return l.errorf("invalid unicode escape: \\u" + code)
}
growingString += string(rune(intcode))
} else if l.follow("\\") {
l.pos++
return l.errorf("invalid escape sequence: \\" + string(l.peek()))
} else { } else {
growingString += string(l.peek()) r := l.peek()
if 0x00 <= r && r <= 0x1F {
return l.errorf("unescaped control character %U", r)
}
growingString += string(r)
} }
if l.next() == eof { if l.next() == eof {
@@ -418,6 +530,7 @@ func (l *tomlLexer) lexNumber() tomlLexStateFn {
l.accept("-") l.accept("-")
} }
pointSeen := false pointSeen := false
expSeen := false
digitSeen := false digitSeen := false
for { for {
next := l.next() next := l.next()
@@ -429,8 +542,15 @@ func (l *tomlLexer) lexNumber() tomlLexStateFn {
return l.errorf("float cannot end with a dot") return l.errorf("float cannot end with a dot")
} }
pointSeen = true pointSeen = true
} else if next == 'e' || next == 'E' {
expSeen = true
if !l.accept("+") {
l.accept("-")
}
} else if isDigit(next) { } else if isDigit(next) {
digitSeen = true digitSeen = true
} else if next == '_' {
l.pos++
} else { } else {
l.backup() l.backup()
break break
@@ -443,7 +563,7 @@ func (l *tomlLexer) lexNumber() tomlLexStateFn {
if !digitSeen { if !digitSeen {
return l.errorf("no digit in that number") return l.errorf("no digit in that number")
} }
if pointSeen { if pointSeen || expSeen {
l.emit(tokenFloat) l.emit(tokenFloat)
} else { } else {
l.emit(tokenInteger) l.emit(tokenInteger)
@@ -452,7 +572,7 @@ func (l *tomlLexer) lexNumber() tomlLexStateFn {
} }
func init() { func init() {
dateRegexp = regexp.MustCompile("^\\d{1,4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z") dateRegexp = regexp.MustCompile("^\\d{1,4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}(\\.\\d{1,9})?(Z|[+-]\\d{2}:\\d{2})")
} }
// Entry point // Entry point
+220 -19
View File
@@ -8,11 +8,12 @@ func testFlow(t *testing.T, input string, expectedFlow []token) {
token := <-ch token := <-ch
if token != expected { if token != expected {
t.Log("While testing: ", input) t.Log("While testing: ", input)
t.Log("compared (got)", token, "to (expected)", expected)
t.Log("\tvalue:", token.val, "<->", expected.val)
t.Log("\ttype:", token.typ.String(), "<->", expected.typ.String())
t.Log("\tline:", token.Line, "<->", expected.Line)
t.Log("\tcolumn:", token.Col, "<->", expected.Col)
t.Log("compared", token, "to", expected) t.Log("compared", token, "to", expected)
t.Log(token.val, "<->", expected.val)
t.Log(token.typ, "<->", expected.typ)
t.Log(token.Line, "<->", expected.Line)
t.Log(token.Col, "<->", expected.Col)
t.FailNow() t.FailNow()
} }
} }
@@ -39,6 +40,15 @@ func TestValidKeyGroup(t *testing.T) {
}) })
} }
func TestNestedQuotedUnicodeKeyGroup(t *testing.T) {
testFlow(t, `[ j . "ʞ" . l ]`, []token{
token{Position{1, 1}, tokenLeftBracket, "["},
token{Position{1, 2}, tokenKeyGroup, ` j . "ʞ" . l `},
token{Position{1, 15}, tokenRightBracket, "]"},
token{Position{1, 16}, tokenEOF, ""},
})
}
func TestUnclosedKeyGroup(t *testing.T) { func TestUnclosedKeyGroup(t *testing.T) {
testFlow(t, "[hello world", []token{ testFlow(t, "[hello world", []token{
token{Position{1, 1}, tokenLeftBracket, "["}, token{Position{1, 1}, tokenLeftBracket, "["},
@@ -118,19 +128,13 @@ func TestBasicKeyAndEqual(t *testing.T) {
func TestKeyWithSharpAndEqual(t *testing.T) { func TestKeyWithSharpAndEqual(t *testing.T) {
testFlow(t, "key#name = 5", []token{ testFlow(t, "key#name = 5", []token{
token{Position{1, 1}, tokenKey, "key#name"}, token{Position{1, 1}, tokenError, "keys cannot contain # character"},
token{Position{1, 10}, tokenEqual, "="},
token{Position{1, 12}, tokenInteger, "5"},
token{Position{1, 13}, tokenEOF, ""},
}) })
} }
func TestKeyWithSymbolsAndEqual(t *testing.T) { func TestKeyWithSymbolsAndEqual(t *testing.T) {
testFlow(t, "~!@#$^&*()_+-`1234567890[]\\|/?><.,;:' = 5", []token{ testFlow(t, "~!@$^&*()_+-`1234567890[]\\|/?><.,;:' = 5", []token{
token{Position{1, 1}, tokenKey, "~!@#$^&*()_+-`1234567890[]\\|/?><.,;:'"}, token{Position{1, 1}, tokenError, "keys cannot contain ~ character"},
token{Position{1, 39}, tokenEqual, "="},
token{Position{1, 41}, tokenInteger, "5"},
token{Position{1, 42}, tokenEOF, ""},
}) })
} }
@@ -276,7 +280,13 @@ func TestKeyEqualArrayBoolsWithComments(t *testing.T) {
func TestDateRegexp(t *testing.T) { func TestDateRegexp(t *testing.T) {
if dateRegexp.FindString("1979-05-27T07:32:00Z") == "" { if dateRegexp.FindString("1979-05-27T07:32:00Z") == "" {
t.Fail() t.Error("basic lexing")
}
if dateRegexp.FindString("1979-05-27T00:32:00-07:00") == "" {
t.Error("offset lexing")
}
if dateRegexp.FindString("1979-05-27T00:32:00.999999-07:00") == "" {
t.Error("nano precision lexing")
} }
} }
@@ -287,6 +297,18 @@ func TestKeyEqualDate(t *testing.T) {
token{Position{1, 7}, tokenDate, "1979-05-27T07:32:00Z"}, token{Position{1, 7}, tokenDate, "1979-05-27T07:32:00Z"},
token{Position{1, 27}, tokenEOF, ""}, token{Position{1, 27}, tokenEOF, ""},
}) })
testFlow(t, "foo = 1979-05-27T00:32:00-07:00", []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 7}, tokenDate, "1979-05-27T00:32:00-07:00"},
token{Position{1, 32}, tokenEOF, ""},
})
testFlow(t, "foo = 1979-05-27T00:32:00.999999-07:00", []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 7}, tokenDate, "1979-05-27T00:32:00.999999-07:00"},
token{Position{1, 39}, tokenEOF, ""},
})
} }
func TestFloatEndingWithDot(t *testing.T) { func TestFloatEndingWithDot(t *testing.T) {
@@ -305,11 +327,48 @@ func TestFloatWithTwoDots(t *testing.T) {
}) })
} }
func TestDoubleEqualKey(t *testing.T) { func TestFloatWithExponent1(t *testing.T) {
testFlow(t, "foo= = 2", []token{ testFlow(t, "a = 5e+22", []token{
token{Position{1, 1}, tokenKey, "foo"}, token{Position{1, 1}, tokenKey, "a"},
token{Position{1, 4}, tokenEqual, "="}, token{Position{1, 3}, tokenEqual, "="},
token{Position{1, 5}, tokenError, "cannot have multiple equals for the same key"}, token{Position{1, 5}, tokenFloat, "5e+22"},
token{Position{1, 10}, tokenEOF, ""},
})
}
func TestFloatWithExponent2(t *testing.T) {
testFlow(t, "a = 5E+22", []token{
token{Position{1, 1}, tokenKey, "a"},
token{Position{1, 3}, tokenEqual, "="},
token{Position{1, 5}, tokenFloat, "5E+22"},
token{Position{1, 10}, tokenEOF, ""},
})
}
func TestFloatWithExponent3(t *testing.T) {
testFlow(t, "a = -5e+22", []token{
token{Position{1, 1}, tokenKey, "a"},
token{Position{1, 3}, tokenEqual, "="},
token{Position{1, 5}, tokenFloat, "-5e+22"},
token{Position{1, 11}, tokenEOF, ""},
})
}
func TestFloatWithExponent4(t *testing.T) {
testFlow(t, "a = -5e-22", []token{
token{Position{1, 1}, tokenKey, "a"},
token{Position{1, 3}, tokenEqual, "="},
token{Position{1, 5}, tokenFloat, "-5e-22"},
token{Position{1, 11}, tokenEOF, ""},
})
}
func TestFloatWithExponent5(t *testing.T) {
testFlow(t, "a = 6.626e-34", []token{
token{Position{1, 1}, tokenKey, "a"},
token{Position{1, 3}, tokenEqual, "="},
token{Position{1, 5}, tokenFloat, "6.626e-34"},
token{Position{1, 14}, tokenEOF, ""},
}) })
} }
@@ -377,6 +436,34 @@ func TestKeyEqualNumber(t *testing.T) {
token{Position{1, 7}, tokenFloat, "-4.2"}, token{Position{1, 7}, tokenFloat, "-4.2"},
token{Position{1, 11}, tokenEOF, ""}, token{Position{1, 11}, tokenEOF, ""},
}) })
testFlow(t, "foo = 1_000", []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 7}, tokenInteger, "1_000"},
token{Position{1, 12}, tokenEOF, ""},
})
testFlow(t, "foo = 5_349_221", []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 7}, tokenInteger, "5_349_221"},
token{Position{1, 16}, tokenEOF, ""},
})
testFlow(t, "foo = 1_2_3_4_5", []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 7}, tokenInteger, "1_2_3_4_5"},
token{Position{1, 16}, tokenEOF, ""},
})
testFlow(t, "flt8 = 9_224_617.445_991_228_313", []token{
token{Position{1, 1}, tokenKey, "flt8"},
token{Position{1, 6}, tokenEqual, "="},
token{Position{1, 8}, tokenFloat, "9_224_617.445_991_228_313"},
token{Position{1, 33}, tokenEOF, ""},
})
} }
func TestMultiline(t *testing.T) { func TestMultiline(t *testing.T) {
@@ -398,6 +485,105 @@ func TestKeyEqualStringUnicodeEscape(t *testing.T) {
token{Position{1, 8}, tokenString, "hello ♥"}, token{Position{1, 8}, tokenString, "hello ♥"},
token{Position{1, 21}, tokenEOF, ""}, token{Position{1, 21}, tokenEOF, ""},
}) })
testFlow(t, `foo = "hello \U000003B4"`, []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 8}, tokenString, "hello δ"},
token{Position{1, 25}, tokenEOF, ""},
})
}
func TestKeyEqualStringNoEscape(t *testing.T) {
testFlow(t, "foo = \"hello \u0002\"", []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 8}, tokenError, "unescaped control character U+0002"},
})
testFlow(t, "foo = \"hello \u001F\"", []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 8}, tokenError, "unescaped control character U+001F"},
})
}
func TestLiteralString(t *testing.T) {
testFlow(t, `foo = 'C:\Users\nodejs\templates'`, []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 8}, tokenString, `C:\Users\nodejs\templates`},
token{Position{1, 34}, tokenEOF, ""},
})
testFlow(t, `foo = '\\ServerX\admin$\system32\'`, []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 8}, tokenString, `\\ServerX\admin$\system32\`},
token{Position{1, 35}, tokenEOF, ""},
})
testFlow(t, `foo = 'Tom "Dubs" Preston-Werner'`, []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 8}, tokenString, `Tom "Dubs" Preston-Werner`},
token{Position{1, 34}, tokenEOF, ""},
})
testFlow(t, `foo = '<\i\c*\s*>'`, []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 8}, tokenString, `<\i\c*\s*>`},
token{Position{1, 19}, tokenEOF, ""},
})
}
func TestMultilineLiteralString(t *testing.T) {
testFlow(t, `foo = '''hello 'literal' world'''`, []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 10}, tokenString, `hello 'literal' world`},
token{Position{1, 34}, tokenEOF, ""},
})
testFlow(t, "foo = '''\nhello\n'literal'\nworld'''", []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{2, 1}, tokenString, "hello\n'literal'\nworld"},
token{Position{4, 9}, tokenEOF, ""},
})
}
func TestMultilineString(t *testing.T) {
testFlow(t, `foo = """hello "literal" world"""`, []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 10}, tokenString, `hello "literal" world`},
token{Position{1, 34}, tokenEOF, ""},
})
testFlow(t, "foo = \"\"\"\nhello\\\n\"literal\"\\\nworld\"\"\"", []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{2, 1}, tokenString, "hello\"literal\"world"},
token{Position{4, 9}, tokenEOF, ""},
})
testFlow(t, "foo = \"\"\"\\\n \\\n \\\n hello\\\nmultiline\\\nworld\"\"\"", []token{
token{Position{1, 1}, tokenKey, "foo"},
token{Position{1, 5}, tokenEqual, "="},
token{Position{1, 10}, tokenString, "hellomultilineworld"},
token{Position{6, 9}, tokenEOF, ""},
})
testFlow(t, "key2 = \"\"\"\nThe quick brown \\\n\n\n fox jumps over \\\n the lazy dog.\"\"\"", []token{
token{Position{1, 1}, tokenKey, "key2"},
token{Position{1, 6}, tokenEqual, "="},
token{Position{2, 1}, tokenString, "The quick brown fox jumps over the lazy dog."},
token{Position{6, 21}, tokenEOF, ""},
})
testFlow(t, "key2 = \"\"\"\\\n The quick brown \\\n fox jumps over \\\n the lazy dog.\\\n \"\"\"", []token{
token{Position{1, 1}, tokenKey, "key2"},
token{Position{1, 6}, tokenEqual, "="},
token{Position{1, 11}, tokenString, "The quick brown fox jumps over the lazy dog."},
token{Position{5, 11}, tokenEOF, ""},
})
} }
func TestUnicodeString(t *testing.T) { func TestUnicodeString(t *testing.T) {
@@ -417,3 +603,18 @@ func TestKeyGroupArray(t *testing.T) {
token{Position{1, 8}, tokenEOF, ""}, token{Position{1, 8}, tokenEOF, ""},
}) })
} }
func TestQuotedKey(t *testing.T) {
testFlow(t, "\"a b\" = 42", []token{
token{Position{1, 1}, tokenKey, "\"a b\""},
token{Position{1, 7}, tokenEqual, "="},
token{Position{1, 9}, tokenInteger, "42"},
token{Position{1, 11}, tokenEOF, ""},
})
}
func TestKeyNewline(t *testing.T) {
testFlow(t, "a\n= 4", []token{
token{Position{1, 1}, tokenError, "keys cannot contain new lines"},
})
}
+3 -4
View File
@@ -2,7 +2,6 @@ package toml
import ( import (
"fmt" "fmt"
"math"
"testing" "testing"
) )
@@ -110,7 +109,7 @@ func TestPathSliceStart(t *testing.T) {
assertPath(t, assertPath(t,
"$[123:]", "$[123:]",
buildPath( buildPath(
newMatchSliceFn(123, math.MaxInt64, 1), newMatchSliceFn(123, MaxInt, 1),
)) ))
} }
@@ -134,7 +133,7 @@ func TestPathSliceStartStep(t *testing.T) {
assertPath(t, assertPath(t,
"$[123::7]", "$[123::7]",
buildPath( buildPath(
newMatchSliceFn(123, math.MaxInt64, 7), newMatchSliceFn(123, MaxInt, 7),
)) ))
} }
@@ -150,7 +149,7 @@ func TestPathSliceStep(t *testing.T) {
assertPath(t, assertPath(t,
"$[::7]", "$[::7]",
buildPath( buildPath(
newMatchSliceFn(0, math.MaxInt64, 7), newMatchSliceFn(0, MaxInt, 7),
)) ))
} }
+87 -11
View File
@@ -98,7 +98,10 @@ func (p *tomlParser) parseGroupArray() tomlParserStateFn {
} }
// get or create group array element at the indicated part in the path // get or create group array element at the indicated part in the path
keys := strings.Split(key.val, ".") keys, err := parseKey(key.val)
if err != nil {
p.raiseError(key, "invalid group array key: %s", err)
}
p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
destTree := p.tree.GetPath(keys) destTree := p.tree.GetPath(keys)
var array []*TomlTree var array []*TomlTree
@@ -153,7 +156,10 @@ func (p *tomlParser) parseGroup() tomlParserStateFn {
} }
p.seenGroupKeys = append(p.seenGroupKeys, key.val) p.seenGroupKeys = append(p.seenGroupKeys, key.val)
keys := strings.Split(key.val, ".") keys, err := parseKey(key.val)
if err != nil {
p.raiseError(key, "invalid group array key: %s", err)
}
if err := p.tree.createSubTree(keys, startToken.Position); err != nil { if err := p.tree.createSubTree(keys, startToken.Position); err != nil {
p.raiseError(key, "%s", err) p.raiseError(key, "%s", err)
} }
@@ -165,6 +171,7 @@ func (p *tomlParser) parseGroup() tomlParserStateFn {
func (p *tomlParser) parseAssign() tomlParserStateFn { func (p *tomlParser) parseAssign() tomlParserStateFn {
key := p.getToken() key := p.getToken()
p.assume(tokenEqual) p.assume(tokenEqual)
value := p.parseRvalue() value := p.parseRvalue()
var groupKey []string var groupKey []string
if len(p.currentGroup) > 0 { if len(p.currentGroup) > 0 {
@@ -186,13 +193,21 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
} }
// assign value to the found group // assign value to the found group
localKey := []string{key.val} keyVals, err := parseKey(key.val)
finalKey := append(groupKey, key.val) if err != nil {
p.raiseError(key, "%s", err)
}
if len(keyVals) != 1 {
p.raiseError(key, "Invalid key")
}
keyVal := keyVals[0]
localKey := []string{keyVal}
finalKey := append(groupKey, keyVal)
if targetNode.GetPath(localKey) != nil { if targetNode.GetPath(localKey) != nil {
p.raiseError(key, "The following key was defined twice: %s", p.raiseError(key, "The following key was defined twice: %s",
strings.Join(finalKey, ".")) strings.Join(finalKey, "."))
} }
targetNode.values[key.val] = &tomlValue{value, key.Position} targetNode.values[keyVal] = &tomlValue{value, key.Position}
return p.parseStart return p.parseStart
} }
@@ -210,25 +225,31 @@ func (p *tomlParser) parseRvalue() interface{} {
case tokenFalse: case tokenFalse:
return false return false
case tokenInteger: case tokenInteger:
val, err := strconv.ParseInt(tok.val, 10, 64) cleanedVal := strings.Replace(tok.val, "_", "", -1)
val, err := strconv.ParseInt(cleanedVal, 10, 64)
if err != nil { if err != nil {
p.raiseError(tok, "%s", err) p.raiseError(tok, "%s", err)
} }
return val return val
case tokenFloat: case tokenFloat:
val, err := strconv.ParseFloat(tok.val, 64) cleanedVal := strings.Replace(tok.val, "_", "", -1)
val, err := strconv.ParseFloat(cleanedVal, 64)
if err != nil { if err != nil {
p.raiseError(tok, "%s", err) p.raiseError(tok, "%s", err)
} }
return val return val
case tokenDate: case tokenDate:
val, err := time.Parse(time.RFC3339, tok.val) val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC)
if err != nil { if err != nil {
p.raiseError(tok, "%s", err) p.raiseError(tok, "%s", err)
} }
return val return val
case tokenLeftBracket: case tokenLeftBracket:
return p.parseArray() return p.parseArray()
case tokenLeftCurlyBrace:
return p.parseInlineTable()
case tokenEqual:
p.raiseError(tok, "cannot have multiple equals for the same key")
case tokenError: case tokenError:
p.raiseError(tok, "%s", tok) p.raiseError(tok, "%s", tok)
} }
@@ -238,7 +259,51 @@ func (p *tomlParser) parseRvalue() interface{} {
return nil return nil
} }
func (p *tomlParser) parseArray() []interface{} { func tokenIsComma(t *token) bool {
return t != nil && t.typ == tokenComma
}
func (p *tomlParser) parseInlineTable() *TomlTree {
tree := newTomlTree()
var previous *token
Loop:
for {
follow := p.peek()
if follow == nil || follow.typ == tokenEOF {
p.raiseError(follow, "unterminated inline table")
}
switch follow.typ {
case tokenRightCurlyBrace:
p.getToken()
break Loop
case tokenKey:
if !tokenIsComma(previous) && previous != nil {
p.raiseError(follow, "comma expected between fields in inline table")
}
key := p.getToken()
p.assume(tokenEqual)
value := p.parseRvalue()
tree.Set(key.val, value)
case tokenComma:
if previous == nil {
p.raiseError(follow, "inline table cannot start with a comma")
}
if tokenIsComma(previous) {
p.raiseError(follow, "need field between two commas in inline table")
}
p.getToken()
default:
p.raiseError(follow, "unexpected token type in inline table: %s", follow.typ.String())
}
previous = follow
}
if tokenIsComma(previous) {
p.raiseError(previous, "trailing comma at the end of inline table")
}
return tree
}
func (p *tomlParser) parseArray() interface{} {
var array []interface{} var array []interface{}
arrayType := reflect.TypeOf(nil) arrayType := reflect.TypeOf(nil)
for { for {
@@ -248,7 +313,7 @@ func (p *tomlParser) parseArray() []interface{} {
} }
if follow.typ == tokenRightBracket { if follow.typ == tokenRightBracket {
p.getToken() p.getToken()
return array break
} }
val := p.parseRvalue() val := p.parseRvalue()
if arrayType == nil { if arrayType == nil {
@@ -259,7 +324,7 @@ func (p *tomlParser) parseArray() []interface{} {
} }
array = append(array, val) array = append(array, val)
follow = p.peek() follow = p.peek()
if follow == nil { if follow == nil || follow.typ == tokenEOF {
p.raiseError(follow, "unterminated array") p.raiseError(follow, "unterminated array")
} }
if follow.typ != tokenRightBracket && follow.typ != tokenComma { if follow.typ != tokenRightBracket && follow.typ != tokenComma {
@@ -269,6 +334,17 @@ func (p *tomlParser) parseArray() []interface{} {
p.getToken() p.getToken()
} }
} }
// An array of TomlTrees is actually an array of inline
// tables, which is a shorthand for a table array. If the
// array was not converted from []interface{} to []*TomlTree,
// the two notations would not be equivalent.
if arrayType == reflect.TypeOf(newTomlTree()) {
tomlArray := make([]*TomlTree, len(array))
for i, v := range array {
tomlArray[i] = v.(*TomlTree)
}
return tomlArray
}
return array return array
} }
+195 -5
View File
@@ -51,12 +51,10 @@ func TestSimpleKV(t *testing.T) {
}) })
} }
// NOTE: from the BurntSushi test suite func TestNumberInKey(t *testing.T) {
// NOTE: this test is pure evil due to the embedded '.' tree, err := Load("hello2 = 42")
func TestSpecialKV(t *testing.T) {
tree, err := Load("~!@#$^&*()_+-`1234567890[]\\|/?><.,;: = 1")
assertTree(t, tree, err, map[string]interface{}{ assertTree(t, tree, err, map[string]interface{}{
"~!@#$^&*()_+-`1234567890[]\\|/?><.,;:": int64(1), "hello2": int64(42),
}) })
} }
@@ -70,6 +68,44 @@ func TestSimpleNumbers(t *testing.T) {
}) })
} }
func TestNumbersWithUnderscores(t *testing.T) {
tree, err := Load("a = 1_000")
assertTree(t, tree, err, map[string]interface{}{
"a": int64(1000),
})
tree, err = Load("a = 5_349_221")
assertTree(t, tree, err, map[string]interface{}{
"a": int64(5349221),
})
tree, err = Load("a = 1_2_3_4_5")
assertTree(t, tree, err, map[string]interface{}{
"a": int64(12345),
})
tree, err = Load("flt8 = 9_224_617.445_991_228_313")
assertTree(t, tree, err, map[string]interface{}{
"flt8": float64(9224617.445991228313),
})
tree, err = Load("flt9 = 1e1_00")
assertTree(t, tree, err, map[string]interface{}{
"flt9": float64(1e100),
})
}
func TestFloatsWithExponents(t *testing.T) {
tree, err := Load("a = 5e+22\nb = 5E+22\nc = -5e+22\nd = -5e-22\ne = 6.626e-34")
assertTree(t, tree, err, map[string]interface{}{
"a": float64(5e+22),
"b": float64(5E+22),
"c": float64(-5e+22),
"d": float64(-5e-22),
"e": float64(6.626e-34),
})
}
func TestSimpleDate(t *testing.T) { func TestSimpleDate(t *testing.T) {
tree, err := Load("a = 1979-05-27T07:32:00Z") tree, err := Load("a = 1979-05-27T07:32:00Z")
assertTree(t, tree, err, map[string]interface{}{ assertTree(t, tree, err, map[string]interface{}{
@@ -77,6 +113,20 @@ func TestSimpleDate(t *testing.T) {
}) })
} }
func TestDateOffset(t *testing.T) {
tree, err := Load("a = 1979-05-27T00:32:00-07:00")
assertTree(t, tree, err, map[string]interface{}{
"a": time.Date(1979, time.May, 27, 0, 32, 0, 0, time.FixedZone("", -7*60*60)),
})
}
func TestDateNano(t *testing.T) {
tree, err := Load("a = 1979-05-27T00:32:00.999999999-07:00")
assertTree(t, tree, err, map[string]interface{}{
"a": time.Date(1979, time.May, 27, 0, 32, 0, 999999999, time.FixedZone("", -7*60*60)),
})
}
func TestSimpleString(t *testing.T) { func TestSimpleString(t *testing.T) {
tree, err := Load("a = \"hello world\"") tree, err := Load("a = \"hello world\"")
assertTree(t, tree, err, map[string]interface{}{ assertTree(t, tree, err, map[string]interface{}{
@@ -84,6 +134,13 @@ func TestSimpleString(t *testing.T) {
}) })
} }
func TestSpaceKey(t *testing.T) {
tree, err := Load("\"a b\" = \"hello world\"")
assertTree(t, tree, err, map[string]interface{}{
"a b": "hello world",
})
}
func TestStringEscapables(t *testing.T) { func TestStringEscapables(t *testing.T) {
tree, err := Load("a = \"a \\n b\"") tree, err := Load("a = \"a \\n b\"")
assertTree(t, tree, err, map[string]interface{}{ assertTree(t, tree, err, map[string]interface{}{
@@ -127,6 +184,41 @@ func TestNestedKeys(t *testing.T) {
}) })
} }
func TestNestedQuotedUnicodeKeys(t *testing.T) {
tree, err := Load("[ j . \"ʞ\" . l ]\nd = 42")
assertTree(t, tree, err, map[string]interface{}{
"j": map[string]interface{}{
"ʞ": map[string]interface{}{
"l": map[string]interface{}{
"d": int64(42),
},
},
},
})
tree, err = Load("[ g . h . i ]\nd = 42")
assertTree(t, tree, err, map[string]interface{}{
"g": map[string]interface{}{
"h": map[string]interface{}{
"i": map[string]interface{}{
"d": int64(42),
},
},
},
})
tree, err = Load("[ d.e.f ]\nk = 42")
assertTree(t, tree, err, map[string]interface{}{
"d": map[string]interface{}{
"e": map[string]interface{}{
"f": map[string]interface{}{
"k": int64(42),
},
},
},
})
}
func TestArrayOne(t *testing.T) { func TestArrayOne(t *testing.T) {
tree, err := Load("a = [1]") tree, err := Load("a = [1]")
assertTree(t, tree, err, map[string]interface{}{ assertTree(t, tree, err, map[string]interface{}{
@@ -205,6 +297,16 @@ func TestUnterminatedArray(t *testing.T) {
if err.Error() != "(1, 8): unterminated array" { if err.Error() != "(1, 8): unterminated array" {
t.Error("Bad error message:", err.Error()) t.Error("Bad error message:", err.Error())
} }
_, err = Load("a = [1")
if err.Error() != "(1, 7): unterminated array" {
t.Error("Bad error message:", err.Error())
}
_, err = Load("a = [1 2")
if err.Error() != "(1, 8): missing comma" {
t.Error("Bad error message:", err.Error())
}
} }
func TestNewlinesInArrays(t *testing.T) { func TestNewlinesInArrays(t *testing.T) {
@@ -228,6 +330,80 @@ func TestArrayWithExtraCommaComment(t *testing.T) {
}) })
} }
func TestSimpleInlineGroup(t *testing.T) {
tree, err := Load("key = {a = 42}")
assertTree(t, tree, err, map[string]interface{}{
"key": map[string]interface{}{
"a": int64(42),
},
})
}
func TestDoubleInlineGroup(t *testing.T) {
tree, err := Load("key = {a = 42, b = \"foo\"}")
assertTree(t, tree, err, map[string]interface{}{
"key": map[string]interface{}{
"a": int64(42),
"b": "foo",
},
})
}
func TestExampleInlineGroup(t *testing.T) {
tree, err := Load(`name = { first = "Tom", last = "Preston-Werner" }
point = { x = 1, y = 2 }`)
assertTree(t, tree, err, map[string]interface{}{
"name": map[string]interface{}{
"first": "Tom",
"last": "Preston-Werner",
},
"point": map[string]interface{}{
"x": int64(1),
"y": int64(2),
},
})
}
func TestExampleInlineGroupInArray(t *testing.T) {
tree, err := Load(`points = [{ x = 1, y = 2 }]`)
assertTree(t, tree, err, map[string]interface{}{
"points": []map[string]interface{}{
map[string]interface{}{
"x": int64(1),
"y": int64(2),
},
},
})
}
func TestInlineTableUnterminated(t *testing.T) {
_, err := Load("foo = {")
if err.Error() != "(1, 8): unterminated inline table" {
t.Error("Bad error message:", err.Error())
}
}
func TestInlineTableCommaExpected(t *testing.T) {
_, err := Load("foo = {hello = 53 test = foo}")
if err.Error() != "(1, 19): comma expected between fields in inline table" {
t.Error("Bad error message:", err.Error())
}
}
func TestInlineTableCommaStart(t *testing.T) {
_, err := Load("foo = {, hello = 53}")
if err.Error() != "(1, 8): inline table cannot start with a comma" {
t.Error("Bad error message:", err.Error())
}
}
func TestInlineTableDoubleComma(t *testing.T) {
_, err := Load("foo = {hello = 53,, foo = 17}")
if err.Error() != "(1, 19): need field between two commas in inline table" {
t.Error("Bad error message:", err.Error())
}
}
func TestDuplicateGroups(t *testing.T) { func TestDuplicateGroups(t *testing.T) {
_, err := Load("[foo]\na=2\n[foo]b=3") _, err := Load("[foo]\na=2\n[foo]b=3")
if err.Error() != "(3, 2): duplicated tables" { if err.Error() != "(3, 2): duplicated tables" {
@@ -436,3 +612,17 @@ func TestNestedTreePosition(t *testing.T) {
"foo.bar.b": Position{3, 1}, "foo.bar.b": Position{3, 1},
}) })
} }
func TestInvalidGroupArray(t *testing.T) {
_, err := Load("[key#group]\nanswer = 42")
if err == nil {
t.Error("Should error")
}
}
func TestDoubleEqual(t *testing.T) {
_, err := Load("foo= = 2")
if err.Error() != "(1, 6): cannot have multiple equals for the same key" {
t.Error("Bad error message:", err.Error())
}
}
+3 -2
View File
@@ -9,9 +9,10 @@ package toml
import ( import (
"fmt" "fmt"
"math"
) )
const MaxInt = int(^uint(0) >> 1)
type queryParser struct { type queryParser struct {
flow chan token flow chan token
tokensBuffer []token tokensBuffer []token
@@ -203,7 +204,7 @@ loop: // labeled loop for easy breaking
func (p *queryParser) parseSliceExpr() queryParserStateFn { func (p *queryParser) parseSliceExpr() queryParserStateFn {
// init slice to grab all elements // init slice to grab all elements
start, end, step := 0, math.MaxInt64, 1 start, end, step := 0, MaxInt, 1
// parse optional start // parse optional start
tok := p.getToken() tok := p.getToken()
+58 -11
View File
@@ -5,15 +5,26 @@ set -e
# set the path to the present working directory # set the path to the present working directory
export GOPATH=`pwd` export GOPATH=`pwd`
# Vendorize the BurntSushi test suite function git_clone() {
# NOTE: this gets a specific release to avoid versioning issues path=$1
if [ ! -d 'src/github.com/BurntSushi/toml-test' ]; then branch=$2
mkdir -p src/github.com/BurntSushi version=$3
git clone https://github.com/BurntSushi/toml-test.git src/github.com/BurntSushi/toml-test if [ ! -d "src/$path" ]; then
fi mkdir -p src/$path
pushd src/github.com/BurntSushi/toml-test git clone https://$path.git src/$path
git reset --hard '0.2.0' # use the released version, NOT tip fi
popd pushd src/$path
git checkout "$branch"
git reset --hard "$version"
popd
}
# get code for BurntSushi TOML validation
# pinning all to 'HEAD' for version 0.3.x work (TODO: pin to commit hash when tests stabilize)
git_clone github.com/BurntSushi/toml master HEAD
git_clone github.com/BurntSushi/toml-test master HEAD #was: 0.2.0 HEAD
# build the BurntSushi test application
go build -o toml-test github.com/BurntSushi/toml-test go build -o toml-test github.com/BurntSushi/toml-test
# vendorize the current lib for testing # vendorize the current lib for testing
@@ -23,6 +34,42 @@ cp *.go *.toml src/github.com/pelletier/go-toml
cp cmd/*.go src/github.com/pelletier/go-toml/cmd cp cmd/*.go src/github.com/pelletier/go-toml/cmd
go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go
# Run basic unit tests and then the BurntSushi test suite # Run basic unit tests
go test -v github.com/pelletier/go-toml go test -v github.com/pelletier/go-toml
./toml-test ./test_program_bin | tee test_out
# run the entire BurntSushi test suite
if [[ $# -eq 0 ]] ; then
echo "Running all BurntSushi tests"
./toml-test ./test_program_bin | tee test_out
else
# run a specific test
test=$1
test_path='src/github.com/BurntSushi/toml-test/tests'
valid_test="$test_path/valid/$test"
invalid_test="$test_path/invalid/$test"
if [ -e "$valid_test.toml" ]; then
echo "Valid Test TOML for $test:"
echo "===="
cat "$valid_test.toml"
echo "Valid Test JSON for $test:"
echo "===="
cat "$valid_test.json"
echo "Go-TOML Output for $test:"
echo "===="
cat "$valid_test.toml" | ./test_program_bin
fi
if [ -e "$invalid_test.toml" ]; then
echo "Invalid Test TOML for $test:"
echo "===="
cat "$invalid_test.toml"
echo "Go-TOML Output for $test:"
echo "===="
echo "go-toml Output:"
cat "$invalid_test.toml" | ./test_program_bin
fi
fi
+14 -4
View File
@@ -26,6 +26,8 @@ const (
tokenEqual tokenEqual
tokenLeftBracket tokenLeftBracket
tokenRightBracket tokenRightBracket
tokenLeftCurlyBrace
tokenRightCurlyBrace
tokenLeftParen tokenLeftParen
tokenRightParen tokenRightParen
tokenDoubleLeftBracket tokenDoubleLeftBracket
@@ -44,6 +46,7 @@ const (
) )
var tokenTypeNames = []string{ var tokenTypeNames = []string{
"Error",
"EOF", "EOF",
"Comment", "Comment",
"Key", "Key",
@@ -54,7 +57,9 @@ var tokenTypeNames = []string{
"Float", "Float",
"=", "=",
"[", "[",
"[", "]",
"{",
"}",
"(", "(",
")", ")",
"]]", "]]",
@@ -117,9 +122,14 @@ func isAlphanumeric(r rune) bool {
} }
func isKeyChar(r rune) bool { func isKeyChar(r rune) bool {
// "Keys start with the first non-whitespace character and end with the last // Keys start with the first character that isn't whitespace or [ and end
// non-whitespace character before the equals sign." // with the last non-whitespace character before the equals sign. Keys
return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '=') // cannot contain a # character."
return !(r == '\r' || r == '\n' || r == eof || r == '=')
}
func isKeyStartChar(r rune) bool {
return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[')
} }
func isDigit(r rune) bool { func isDigit(r rune) bool {
+37 -6
View File
@@ -28,6 +28,12 @@ func newTomlTree() *TomlTree {
} }
} }
func TreeFromMap(m map[string]interface{}) *TomlTree {
return &TomlTree{
values: m,
}
}
// Has returns a boolean indicating if the given key exists. // Has returns a boolean indicating if the given key exists.
func (t *TomlTree) Has(key string) bool { func (t *TomlTree) Has(key string) bool {
if key == "" { if key == "" {
@@ -59,7 +65,11 @@ func (t *TomlTree) Get(key string) interface{} {
if key == "" { if key == "" {
return t return t
} }
return t.GetPath(strings.Split(key, ".")) comps, err := parseKey(key)
if err != nil {
return nil
}
return t.GetPath(comps)
} }
// GetPath returns the element in the tree indicated by 'keys'. // GetPath returns the element in the tree indicated by 'keys'.
@@ -171,7 +181,7 @@ func (t *TomlTree) SetPath(keys []string, value interface{}) {
nextTree, exists := subtree.values[intermediateKey] nextTree, exists := subtree.values[intermediateKey]
if !exists { if !exists {
nextTree = newTomlTree() nextTree = newTomlTree()
subtree.values[intermediateKey] = &nextTree // add new element here subtree.values[intermediateKey] = nextTree // add new element here
} }
switch node := nextTree.(type) { switch node := nextTree.(type) {
case *TomlTree: case *TomlTree:
@@ -185,7 +195,21 @@ func (t *TomlTree) SetPath(keys []string, value interface{}) {
subtree = node[len(node)-1] subtree = node[len(node)-1]
} }
} }
subtree.values[keys[len(keys)-1]] = value
var toInsert interface{}
switch value.(type) {
case *TomlTree:
toInsert = value
case []*TomlTree:
toInsert = value
case *tomlValue:
toInsert = value
default:
toInsert = &tomlValue{value: value}
}
subtree.values[keys[len(keys)-1]] = toInsert
} }
// createSubTree takes a tree and a key and create the necessary intermediate // createSubTree takes a tree and a key and create the necessary intermediate
@@ -215,8 +239,8 @@ func (t *TomlTree) createSubTree(keys []string, pos Position) error {
case *TomlTree: case *TomlTree:
subtree = node subtree = node
default: default:
return fmt.Errorf("unknown type for path %s (%s)", return fmt.Errorf("unknown type for path %s (%s): %T (%#v)",
strings.Join(keys, "."), intermediateKey) strings.Join(keys, "."), intermediateKey, nextTree, nextTree)
} }
} }
return nil return nil
@@ -306,10 +330,17 @@ func (t *TomlTree) toToml(indent, keyspace string) string {
result += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey) result += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
} }
result += node.toToml(indent+" ", combinedKey) result += node.toToml(indent+" ", combinedKey)
case map[string]interface{}:
sub := TreeFromMap(node)
if len(sub.Keys()) > 0 {
result += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
}
result += sub.toToml(indent+" ", combinedKey)
case *tomlValue: case *tomlValue:
result += fmt.Sprintf("%s%s = %s\n", indent, k, toTomlValue(node.value, 0)) result += fmt.Sprintf("%s%s = %s\n", indent, k, toTomlValue(node.value, 0))
default: default:
panic(fmt.Sprintf("unsupported node type: %v", node)) result += fmt.Sprintf("%s%s = %s\n", indent, k, toTomlValue(v, 0))
} }
} }
return result return result