Compare commits

..

8 Commits

Author SHA1 Message Date
Claude fa98989475 fix: resolve lint issues and improve test coverage for TOML v1.1.0
- Fix dupl lint: add nolint:dupl to parseInlineTable and parseValArray
  which have intentionally similar loop structures
- Fix gci lint: correct alignment in multiline basic string test entries
- Add unmarshaler tests for new TOML v1.1.0 features exercising public
  APIs: multiline inline tables with comments, trailing commas, leading
  commas, error cases (comma at start, missing separator, double comma,
  incomplete table), escape sequences, and type mismatch errors
- Add parser test for inline table comment handling with KeepComments
- Coverage increases from 97.37% to 97.44% vs v2 base

https://claude.ai/code/session_01RdiWykFQdmwkQ2nbLwJwwP
2026-03-29 17:37:29 +00:00
João Fernandes 189bf9820b test: parsing inline table 2026-03-03 14:03:57 +00:00
João Fernandes 8455b10edd test: regen toml-test tests targetting TOML v1.1.0 2026-02-11 11:49:40 +00:00
João Fernandes 6de639d0ae docs: update README 2026-02-11 11:15:44 +00:00
João Fernandes 517ceb4eb8 feat: allow newlines and trailing commas in inline tables
TOML v1.1.0 relaxes inline table syntax to allow newlines, comments,
and trailing commas, matching the existing behavior of arrays.
2026-02-11 11:15:33 +00:00
João Fernandes dd7970eb93 feat: make seconds optional in datetime and time values
TOML v1.1.0 allows times to be specified as HH:MM without the seconds
component (previously HH:MM:SS was required). This applies to local
times, local datetimes, and offset datetimes.
2026-02-11 11:15:16 +00:00
João Fernandes 3405e8a1d9 test: \e escape character 2026-02-11 11:14:59 +00:00
João Fernandes 5794be6251 feat: add \xHH escape sequence support in basic strings
TOML v1.1.0 introduces the \xHH escape notation for basic strings,
allowing two-digit hex escapes for Unicode code points U+0000 to
U+00FF.

We keep emitting \u00XX for backwards compatibility.
2026-02-11 11:14:23 +00:00
28 changed files with 2260 additions and 1519 deletions
+1 -1
View File
@@ -19,7 +19,7 @@ jobs:
dry-run: false dry-run: false
language: go language: go
- name: Upload Crash - name: Upload Crash
uses: actions/upload-artifact@v7 uses: actions/upload-artifact@v6
if: failure() && steps.build.outcome == 'success' if: failure() && steps.build.outcome == 'success'
with: with:
name: artifacts name: artifacts
+1 -1
View File
@@ -15,6 +15,6 @@ jobs:
- name: Setup go - name: Setup go
uses: actions/setup-go@v6 uses: actions/setup-go@v6
with: with:
go-version: "1.26" go-version: "1.25"
- name: Run tests with coverage - name: Run tests with coverage
run: ./ci.sh coverage -d "${GITHUB_BASE_REF-HEAD}" run: ./ci.sh coverage -d "${GITHUB_BASE_REF-HEAD}"
+2 -2
View File
@@ -20,7 +20,7 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v4 uses: docker/setup-buildx-action@v3
- name: Run Go versions compatibility test - name: Run Go versions compatibility test
run: | run: |
@@ -28,7 +28,7 @@ jobs:
./test-go-versions.sh --output ./test-results $VERSIONS ./test-go-versions.sh --output ./test-results $VERSIONS
- name: Upload test results - name: Upload test results
uses: actions/upload-artifact@v7 uses: actions/upload-artifact@v6
with: with:
name: go-versions-test-results name: go-versions-test-results
path: | path: |
+1 -1
View File
@@ -15,7 +15,7 @@ jobs:
- name: Setup go - name: Setup go
uses: actions/setup-go@v6 uses: actions/setup-go@v6
with: with:
go-version: "1.26" go-version: "1.24"
- name: Run golangci-lint - name: Run golangci-lint
uses: golangci/golangci-lint-action@v9 uses: golangci/golangci-lint-action@v9
with: with:
+3 -3
View File
@@ -22,15 +22,15 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v6 uses: actions/setup-go@v6
with: with:
go-version: "1.26" go-version: "1.25"
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v4 uses: docker/login-action@v3
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.actor }} username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Run GoReleaser - name: Run GoReleaser
uses: goreleaser/goreleaser-action@v7 uses: goreleaser/goreleaser-action@v6
with: with:
distribution: goreleaser distribution: goreleaser
version: '~> v2' version: '~> v2'
+1 -2
View File
@@ -10,10 +10,9 @@ on:
jobs: jobs:
build: build:
strategy: strategy:
fail-fast: false
matrix: matrix:
os: [ 'ubuntu-latest', 'windows-latest', 'macos-latest', 'macos-14' ] os: [ 'ubuntu-latest', 'windows-latest', 'macos-latest', 'macos-14' ]
go: [ '1.25', '1.26' ] go: [ '1.24', '1.25' ]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
name: ${{ matrix.go }}/${{ matrix.os }} name: ${{ matrix.go }}/${{ matrix.os }}
steps: steps:
+3
View File
@@ -22,6 +22,7 @@ builds:
- linux_riscv64 - linux_riscv64
- windows_amd64 - windows_amd64
- windows_arm64 - windows_arm64
- windows_arm
- darwin_amd64 - darwin_amd64
- darwin_arm64 - darwin_arm64
- id: tomljson - id: tomljson
@@ -41,6 +42,7 @@ builds:
- linux_riscv64 - linux_riscv64
- windows_amd64 - windows_amd64
- windows_arm64 - windows_arm64
- windows_arm
- darwin_amd64 - darwin_amd64
- darwin_arm64 - darwin_arm64
- id: jsontoml - id: jsontoml
@@ -60,6 +62,7 @@ builds:
- linux_arm - linux_arm
- windows_amd64 - windows_amd64
- windows_arm64 - windows_arm64
- windows_arm
- darwin_amd64 - darwin_amd64
- darwin_arm64 - darwin_arm64
universal_binaries: universal_binaries:
+19 -19
View File
@@ -2,7 +2,7 @@
Go library for the [TOML](https://toml.io/en/) format. Go library for the [TOML](https://toml.io/en/) format.
This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0). This library supports [TOML v1.1.0](https://toml.io/en/v1.1.0).
[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues) [🐞 Bug Reports](https://github.com/pelletier/go-toml/issues)
@@ -67,7 +67,7 @@ this use-case, go-toml provides [`LocalDate`][tld], [`LocalTime`][tlt], and
making them convenient yet unambiguous structures for their respective TOML making them convenient yet unambiguous structures for their respective TOML
representation. representation.
[ldt]: https://toml.io/en/v1.0.0#local-date-time [ldt]: https://toml.io/en/v1.1.0#local-date-time
[tld]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDate [tld]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDate
[tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime [tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime
[tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime [tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime
@@ -239,12 +239,12 @@ Execution time speedup compared to other Go TOML libraries:
<tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr> <tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr>
</thead> </thead>
<tbody> <tbody>
<tr><td>Marshal/HugoFrontMatter-2</td><td>2.1x</td><td>2.0x</td></tr> <tr><td>Marshal/HugoFrontMatter-2</td><td>1.9x</td><td>2.2x</td></tr>
<tr><td>Marshal/ReferenceFile/map-2</td><td>2.0x</td><td>2.0x</td></tr> <tr><td>Marshal/ReferenceFile/map-2</td><td>1.7x</td><td>2.1x</td></tr>
<tr><td>Marshal/ReferenceFile/struct-2</td><td>2.3x</td><td>2.5x</td></tr> <tr><td>Marshal/ReferenceFile/struct-2</td><td>2.2x</td><td>3.0x</td></tr>
<tr><td>Unmarshal/HugoFrontMatter-2</td><td>3.3x</td><td>2.8x</td></tr> <tr><td>Unmarshal/HugoFrontMatter-2</td><td>2.9x</td><td>2.7x</td></tr>
<tr><td>Unmarshal/ReferenceFile/map-2</td><td>2.9x</td><td>3.0x</td></tr> <tr><td>Unmarshal/ReferenceFile/map-2</td><td>2.6x</td><td>2.7x</td></tr>
<tr><td>Unmarshal/ReferenceFile/struct-2</td><td>4.8x</td><td>5.0x</td></tr> <tr><td>Unmarshal/ReferenceFile/struct-2</td><td>4.6x</td><td>5.1x</td></tr>
</tbody> </tbody>
</table> </table>
<details><summary>See more</summary> <details><summary>See more</summary>
@@ -257,17 +257,17 @@ provided for completeness.</p>
<tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr> <tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr>
</thead> </thead>
<tbody> <tbody>
<tr><td>Marshal/SimpleDocument/map-2</td><td>2.0x</td><td>2.9x</td></tr> <tr><td>Marshal/SimpleDocument/map-2</td><td>1.8x</td><td>2.7x</td></tr>
<tr><td>Marshal/SimpleDocument/struct-2</td><td>2.5x</td><td>3.6x</td></tr> <tr><td>Marshal/SimpleDocument/struct-2</td><td>2.7x</td><td>3.8x</td></tr>
<tr><td>Unmarshal/SimpleDocument/map-2</td><td>4.2x</td><td>3.4x</td></tr> <tr><td>Unmarshal/SimpleDocument/map-2</td><td>3.8x</td><td>3.0x</td></tr>
<tr><td>Unmarshal/SimpleDocument/struct-2</td><td>5.9x</td><td>4.4x</td></tr> <tr><td>Unmarshal/SimpleDocument/struct-2</td><td>5.6x</td><td>4.1x</td></tr>
<tr><td>UnmarshalDataset/example-2</td><td>3.2x</td><td>2.9x</td></tr> <tr><td>UnmarshalDataset/example-2</td><td>3.0x</td><td>3.2x</td></tr>
<tr><td>UnmarshalDataset/code-2</td><td>2.4x</td><td>2.8x</td></tr> <tr><td>UnmarshalDataset/code-2</td><td>2.3x</td><td>2.9x</td></tr>
<tr><td>UnmarshalDataset/twitter-2</td><td>2.7x</td><td>2.5x</td></tr> <tr><td>UnmarshalDataset/twitter-2</td><td>2.6x</td><td>2.7x</td></tr>
<tr><td>UnmarshalDataset/citm_catalog-2</td><td>2.3x</td><td>2.3x</td></tr> <tr><td>UnmarshalDataset/citm_catalog-2</td><td>2.2x</td><td>2.3x</td></tr>
<tr><td>UnmarshalDataset/canada-2</td><td>1.9x</td><td>1.5x</td></tr> <tr><td>UnmarshalDataset/canada-2</td><td>1.8x</td><td>1.5x</td></tr>
<tr><td>UnmarshalDataset/config-2</td><td>5.4x</td><td>3.0x</td></tr> <tr><td>UnmarshalDataset/config-2</td><td>4.1x</td><td>2.9x</td></tr>
<tr><td>geomean</td><td>2.9x</td><td>2.8x</td></tr> <tr><td>geomean</td><td>2.7x</td><td>2.8x</td></tr>
</tbody> </tbody>
</table> </table>
<p>This table can be generated with <code>./ci.sh benchmark -a -html</code>.</p> <p>This table can be generated with <code>./ci.sh benchmark -a -html</code>.</p>
+1 -6
View File
@@ -147,7 +147,7 @@ bench() {
pushd "$dir" pushd "$dir"
if [ "${replace}" != "" ]; then if [ "${replace}" != "" ]; then
find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2\"|${replace}\"|g" {} \; find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \;
go get "${replace}" go get "${replace}"
fi fi
@@ -195,11 +195,6 @@ for line in reversed(lines[2:]):
"%.1fx" % (float(line[3])/v2), # v1 "%.1fx" % (float(line[3])/v2), # v1
"%.1fx" % (float(line[7])/v2), # bs "%.1fx" % (float(line[7])/v2), # bs
]) ])
if not results:
print("No benchmark results to display.", file=sys.stderr)
sys.exit(1)
# move geomean to the end # move geomean to the end
results.append(results[0]) results.append(results[0])
del results[0] del results[0]
+103 -83
View File
@@ -9,60 +9,64 @@ import (
"github.com/pelletier/go-toml/v2/unstable" "github.com/pelletier/go-toml/v2/unstable"
) )
func parseInteger(b []byte, base int) (int64, error) { func parseInteger(b []byte) (int64, error) {
if len(b) > 2 && b[0] == '0' { if len(b) > 2 && b[0] == '0' {
switch b[1] { switch b[1] {
case 'x': case 'x':
return parseIntHex(b, base) return parseIntHex(b)
case 'b': case 'b':
return parseIntBin(b, base) return parseIntBin(b)
case 'o': case 'o':
return parseIntOct(b, base) return parseIntOct(b)
default: default:
panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1])) panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1]))
} }
} }
return parseIntDec(b, base) return parseIntDec(b)
} }
func parseLocalDate(b []byte, base int) (LocalDate, error) { func parseLocalDate(b []byte) (LocalDate, error) {
// full-date = date-fullyear "-" date-month "-" date-mday
// date-fullyear = 4DIGIT
// date-month = 2DIGIT ; 01-12
// date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year
var date LocalDate var date LocalDate
if len(b) != 10 || b[4] != '-' || b[7] != '-' { if len(b) != 10 || b[4] != '-' || b[7] != '-' {
return date, unstable.NewParserError(b, base, "dates are expected to have the format YYYY-MM-DD") return date, unstable.NewParserError(b, "dates are expected to have the format YYYY-MM-DD")
} }
var err error var err error
date.Year, err = parseDecimalDigits(b[0:4], base) date.Year, err = parseDecimalDigits(b[0:4])
if err != nil { if err != nil {
return LocalDate{}, err return LocalDate{}, err
} }
date.Month, err = parseDecimalDigits(b[5:7], base+5) date.Month, err = parseDecimalDigits(b[5:7])
if err != nil { if err != nil {
return LocalDate{}, err return LocalDate{}, err
} }
date.Day, err = parseDecimalDigits(b[8:10], base+8) date.Day, err = parseDecimalDigits(b[8:10])
if err != nil { if err != nil {
return LocalDate{}, err return LocalDate{}, err
} }
if !isValidDate(date.Year, date.Month, date.Day) { if !isValidDate(date.Year, date.Month, date.Day) {
return LocalDate{}, unstable.NewParserError(b, base, "impossible date") return LocalDate{}, unstable.NewParserError(b, "impossible date")
} }
return date, nil return date, nil
} }
func parseDecimalDigits(b []byte, base int) (int, error) { func parseDecimalDigits(b []byte) (int, error) {
v := 0 v := 0
for i, c := range b { for i, c := range b {
if c < '0' || c > '9' { if c < '0' || c > '9' {
return 0, unstable.NewParserError(b[i:i+1], base+i, "expected digit (0-9)") return 0, unstable.NewParserError(b[i:i+1], "expected digit (0-9)")
} }
v *= 10 v *= 10
v += int(c - '0') v += int(c - '0')
@@ -71,18 +75,21 @@ func parseDecimalDigits(b []byte, base int) (int, error) {
return v, nil return v, nil
} }
func parseDateTime(b []byte, base int) (time.Time, error) { func parseDateTime(b []byte) (time.Time, error) {
origLen := len(b) // offset-date-time = full-date time-delim full-time
dt, b, err := parseLocalDateTime(b, base) // full-time = partial-time time-offset
// time-offset = "Z" / time-numoffset
// time-numoffset = ( "+" / "-" ) time-hour ":" time-minute
dt, b, err := parseLocalDateTime(b)
if err != nil { if err != nil {
return time.Time{}, err return time.Time{}, err
} }
tzBase := base + origLen - len(b)
var zone *time.Location var zone *time.Location
if len(b) == 0 { if len(b) == 0 {
// parser should have checked that when assigning the date time node
panic("date time should have a timezone") panic("date time should have a timezone")
} }
@@ -92,7 +99,7 @@ func parseDateTime(b []byte, base int) (time.Time, error) {
} else { } else {
const dateTimeByteLen = 6 const dateTimeByteLen = 6
if len(b) != dateTimeByteLen { if len(b) != dateTimeByteLen {
return time.Time{}, unstable.NewParserError(b, tzBase, "invalid date-time timezone") return time.Time{}, unstable.NewParserError(b, "invalid date-time timezone")
} }
var direction int var direction int
switch b[0] { switch b[0] {
@@ -101,27 +108,27 @@ func parseDateTime(b []byte, base int) (time.Time, error) {
case '+': case '+':
direction = +1 direction = +1
default: default:
return time.Time{}, unstable.NewParserError(b[:1], tzBase, "invalid timezone offset character") return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset character")
} }
if b[3] != ':' { if b[3] != ':' {
return time.Time{}, unstable.NewParserError(b[3:4], tzBase+3, "expected a : separator") return time.Time{}, unstable.NewParserError(b[3:4], "expected a : separator")
} }
hours, err := parseDecimalDigits(b[1:3], tzBase+1) hours, err := parseDecimalDigits(b[1:3])
if err != nil { if err != nil {
return time.Time{}, err return time.Time{}, err
} }
if hours > 23 { if hours > 23 {
return time.Time{}, unstable.NewParserError(b[:1], tzBase, "invalid timezone offset hours") return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset hours")
} }
minutes, err := parseDecimalDigits(b[4:6], tzBase+4) minutes, err := parseDecimalDigits(b[4:6])
if err != nil { if err != nil {
return time.Time{}, err return time.Time{}, err
} }
if minutes > 59 { if minutes > 59 {
return time.Time{}, unstable.NewParserError(b[:1], tzBase, "invalid timezone offset minutes") return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset minutes")
} }
seconds := direction * (hours*3600 + minutes*60) seconds := direction * (hours*3600 + minutes*60)
@@ -134,7 +141,7 @@ func parseDateTime(b []byte, base int) (time.Time, error) {
} }
if len(b) > 0 { if len(b) > 0 {
return time.Time{}, unstable.NewParserError(b, tzBase, "extra bytes at the end of the timezone") return time.Time{}, unstable.NewParserError(b, "extra bytes at the end of the timezone")
} }
t := time.Date( t := time.Date(
@@ -150,15 +157,15 @@ func parseDateTime(b []byte, base int) (time.Time, error) {
return t, nil return t, nil
} }
func parseLocalDateTime(b []byte, base int) (LocalDateTime, []byte, error) { func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) {
var dt LocalDateTime var dt LocalDateTime
const localDateTimeByteMinLen = 11 const localDateTimeByteMinLen = 11
if len(b) < localDateTimeByteMinLen { if len(b) < localDateTimeByteMinLen {
return dt, nil, unstable.NewParserError(b, base, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]") return dt, nil, unstable.NewParserError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM[:SS[.NNNNNNNNN]]")
} }
date, err := parseLocalDate(b[:10], base) date, err := parseLocalDate(b[:10])
if err != nil { if err != nil {
return dt, nil, err return dt, nil, err
} }
@@ -166,10 +173,10 @@ func parseLocalDateTime(b []byte, base int) (LocalDateTime, []byte, error) {
sep := b[10] sep := b[10]
if sep != 'T' && sep != ' ' && sep != 't' { if sep != 'T' && sep != ' ' && sep != 't' {
return dt, nil, unstable.NewParserError(b[10:11], base+10, "datetime separator is expected to be T or a space") return dt, nil, unstable.NewParserError(b[10:11], "datetime separator is expected to be T or a space")
} }
t, rest, err := parseLocalTime(b[11:], base+11) t, rest, err := parseLocalTime(b[11:])
if err != nil { if err != nil {
return dt, nil, err return dt, nil, err
} }
@@ -181,53 +188,58 @@ func parseLocalDateTime(b []byte, base int) (LocalDateTime, []byte, error) {
// parseLocalTime is a bit different because it also returns the remaining // parseLocalTime is a bit different because it also returns the remaining
// []byte that is didn't need. This is to allow parseDateTime to parse those // []byte that is didn't need. This is to allow parseDateTime to parse those
// remaining bytes as a timezone. // remaining bytes as a timezone.
func parseLocalTime(b []byte, base int) (LocalTime, []byte, error) { func parseLocalTime(b []byte) (LocalTime, []byte, error) {
var ( var (
nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0} nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0}
t LocalTime t LocalTime
) )
const localTimeByteLen = 8 // check if b matches to have expected format HH:MM[:SS[.NNNNNN]]
if len(b) < localTimeByteLen { const localTimeByteMinLen = 5
return t, nil, unstable.NewParserError(b, base, "times are expected to have the format HH:MM:SS[.NNNNNN]") if len(b) < localTimeByteMinLen {
return t, nil, unstable.NewParserError(b, "times are expected to have the format HH:MM[:SS[.NNNNNN]]")
} }
var err error var err error
t.Hour, err = parseDecimalDigits(b[0:2], base) t.Hour, err = parseDecimalDigits(b[0:2])
if err != nil { if err != nil {
return t, nil, err return t, nil, err
} }
if t.Hour > 23 { if t.Hour > 23 {
return t, nil, unstable.NewParserError(b[0:2], base, "hour cannot be greater 23") return t, nil, unstable.NewParserError(b[0:2], "hour cannot be greater 23")
} }
if b[2] != ':' { if b[2] != ':' {
return t, nil, unstable.NewParserError(b[2:3], base+2, "expecting colon between hours and minutes") return t, nil, unstable.NewParserError(b[2:3], "expecting colon between hours and minutes")
} }
t.Minute, err = parseDecimalDigits(b[3:5], base+3) t.Minute, err = parseDecimalDigits(b[3:5])
if err != nil { if err != nil {
return t, nil, err return t, nil, err
} }
if t.Minute > 59 { if t.Minute > 59 {
return t, nil, unstable.NewParserError(b[3:5], base+3, "minutes cannot be greater 59") return t, nil, unstable.NewParserError(b[3:5], "minutes cannot be greater 59")
}
if b[5] != ':' {
return t, nil, unstable.NewParserError(b[5:6], base+5, "expecting colon between minutes and seconds")
} }
t.Second, err = parseDecimalDigits(b[6:8], base+6) b = b[5:]
if len(b) >= 1 && b[0] == ':' {
if len(b) < 3 {
return t, nil, unstable.NewParserError(b, "incomplete seconds")
}
t.Second, err = parseDecimalDigits(b[1:3])
if err != nil { if err != nil {
return t, nil, err return t, nil, err
} }
if t.Second > 59 { if t.Second > 59 {
return t, nil, unstable.NewParserError(b[6:8], base+6, "seconds cannot be greater than 59") return t, nil, unstable.NewParserError(b[1:3], "seconds cannot be greater than 59")
} }
b = b[8:] b = b[3:]
base += 8 }
if len(b) >= 1 && b[0] == '.' { if len(b) >= 1 && b[0] == '.' {
frac := 0 frac := 0
@@ -237,7 +249,7 @@ func parseLocalTime(b []byte, base int) (LocalTime, []byte, error) {
for i, c := range b[1:] { for i, c := range b[1:] {
if !isDigit(c) { if !isDigit(c) {
if i == 0 { if i == 0 {
return t, nil, unstable.NewParserError(b[0:1], base, "need at least one digit after fraction point") return t, nil, unstable.NewParserError(b[0:1], "need at least one digit after fraction point")
} }
break break
} }
@@ -245,6 +257,13 @@ func parseLocalTime(b []byte, base int) (LocalTime, []byte, error) {
const maxFracPrecision = 9 const maxFracPrecision = 9
if i >= maxFracPrecision { if i >= maxFracPrecision {
// go-toml allows decoding fractional seconds
// beyond the supported precision of 9
// digits. It truncates the fractional component
// to the supported precision and ignores the
// remaining digits.
//
// https://github.com/pelletier/go-toml/discussions/707
continue continue
} }
@@ -254,7 +273,7 @@ func parseLocalTime(b []byte, base int) (LocalTime, []byte, error) {
} }
if precision == 0 { if precision == 0 {
return t, nil, unstable.NewParserError(b[:1], base, "nanoseconds need at least one digit") return t, nil, unstable.NewParserError(b[:1], "nanoseconds need at least one digit")
} }
t.Nanosecond = frac * nspow[precision] t.Nanosecond = frac * nspow[precision]
@@ -265,35 +284,35 @@ func parseLocalTime(b []byte, base int) (LocalTime, []byte, error) {
return t, b, nil return t, b, nil
} }
func parseFloat(b []byte, base int) (float64, error) { func parseFloat(b []byte) (float64, error) {
if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' { if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' {
return math.NaN(), nil return math.NaN(), nil
} }
cleaned, err := checkAndRemoveUnderscoresFloats(b, base) cleaned, err := checkAndRemoveUnderscoresFloats(b)
if err != nil { if err != nil {
return 0, err return 0, err
} }
if cleaned[0] == '.' { if cleaned[0] == '.' {
return 0, unstable.NewParserError(b, base, "float cannot start with a dot") return 0, unstable.NewParserError(b, "float cannot start with a dot")
} }
if cleaned[len(cleaned)-1] == '.' { if cleaned[len(cleaned)-1] == '.' {
return 0, unstable.NewParserError(b, base, "float cannot end with a dot") return 0, unstable.NewParserError(b, "float cannot end with a dot")
} }
dotAlreadySeen := false dotAlreadySeen := false
for i, c := range cleaned { for i, c := range cleaned {
if c == '.' { if c == '.' {
if dotAlreadySeen { if dotAlreadySeen {
return 0, unstable.NewParserError(b[i:i+1], base+i, "float can have at most one decimal point") return 0, unstable.NewParserError(b[i:i+1], "float can have at most one decimal point")
} }
if !isDigit(cleaned[i-1]) { if !isDigit(cleaned[i-1]) {
return 0, unstable.NewParserError(b[i-1:i+1], base+i-1, "float decimal point must be preceded by a digit") return 0, unstable.NewParserError(b[i-1:i+1], "float decimal point must be preceded by a digit")
} }
if !isDigit(cleaned[i+1]) { if !isDigit(cleaned[i+1]) {
return 0, unstable.NewParserError(b[i:i+2], base+i, "float decimal point must be followed by a digit") return 0, unstable.NewParserError(b[i:i+2], "float decimal point must be followed by a digit")
} }
dotAlreadySeen = true dotAlreadySeen = true
} }
@@ -304,54 +323,54 @@ func parseFloat(b []byte, base int) (float64, error) {
start = 1 start = 1
} }
if cleaned[start] == '0' && len(cleaned) > start+1 && isDigit(cleaned[start+1]) { if cleaned[start] == '0' && len(cleaned) > start+1 && isDigit(cleaned[start+1]) {
return 0, unstable.NewParserError(b, base, "float integer part cannot have leading zeroes") return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes")
} }
f, err := strconv.ParseFloat(string(cleaned), 64) f, err := strconv.ParseFloat(string(cleaned), 64)
if err != nil { if err != nil {
return 0, unstable.NewParserError(b, base, "unable to parse float: %w", err) return 0, unstable.NewParserError(b, "unable to parse float: %w", err)
} }
return f, nil return f, nil
} }
func parseIntHex(b []byte, base int) (int64, error) { func parseIntHex(b []byte) (int64, error) {
cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:], base+2) cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i, err := strconv.ParseInt(string(cleaned), 16, 64) i, err := strconv.ParseInt(string(cleaned), 16, 64)
if err != nil { if err != nil {
return 0, unstable.NewParserError(b, base, "couldn't parse hexadecimal number: %w", err) return 0, unstable.NewParserError(b, "couldn't parse hexadecimal number: %w", err)
} }
return i, nil return i, nil
} }
func parseIntOct(b []byte, base int) (int64, error) { func parseIntOct(b []byte) (int64, error) {
cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:], base+2) cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i, err := strconv.ParseInt(string(cleaned), 8, 64) i, err := strconv.ParseInt(string(cleaned), 8, 64)
if err != nil { if err != nil {
return 0, unstable.NewParserError(b, base, "couldn't parse octal number: %w", err) return 0, unstable.NewParserError(b, "couldn't parse octal number: %w", err)
} }
return i, nil return i, nil
} }
func parseIntBin(b []byte, base int) (int64, error) { func parseIntBin(b []byte) (int64, error) {
cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:], base+2) cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i, err := strconv.ParseInt(string(cleaned), 2, 64) i, err := strconv.ParseInt(string(cleaned), 2, 64)
if err != nil { if err != nil {
return 0, unstable.NewParserError(b, base, "couldn't parse binary number: %w", err) return 0, unstable.NewParserError(b, "couldn't parse binary number: %w", err)
} }
return i, nil return i, nil
@@ -361,8 +380,8 @@ func isSign(b byte) bool {
return b == '+' || b == '-' return b == '+' || b == '-'
} }
func parseIntDec(b []byte, base int) (int64, error) { func parseIntDec(b []byte) (int64, error) {
cleaned, err := checkAndRemoveUnderscoresIntegers(b, base) cleaned, err := checkAndRemoveUnderscoresIntegers(b)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@@ -374,18 +393,18 @@ func parseIntDec(b []byte, base int) (int64, error) {
} }
if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' { if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' {
return 0, unstable.NewParserError(b, base, "leading zero not allowed on decimal number") return 0, unstable.NewParserError(b, "leading zero not allowed on decimal number")
} }
i, err := strconv.ParseInt(string(cleaned), 10, 64) i, err := strconv.ParseInt(string(cleaned), 10, 64)
if err != nil { if err != nil {
return 0, unstable.NewParserError(b, base, "couldn't parse decimal number: %w", err) return 0, unstable.NewParserError(b, "couldn't parse decimal number: %w", err)
} }
return i, nil return i, nil
} }
func checkAndRemoveUnderscoresIntegers(b []byte, base int) ([]byte, error) { func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) {
start := 0 start := 0
if b[start] == '+' || b[start] == '-' { if b[start] == '+' || b[start] == '-' {
start++ start++
@@ -396,11 +415,11 @@ func checkAndRemoveUnderscoresIntegers(b []byte, base int) ([]byte, error) {
} }
if b[start] == '_' { if b[start] == '_' {
return nil, unstable.NewParserError(b[start:start+1], base+start, "number cannot start with underscore") return nil, unstable.NewParserError(b[start:start+1], "number cannot start with underscore")
} }
if b[len(b)-1] == '_' { if b[len(b)-1] == '_' {
return nil, unstable.NewParserError(b[len(b)-1:], base+len(b)-1, "number cannot end with underscore") return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore")
} }
// fast path // fast path
@@ -422,7 +441,7 @@ func checkAndRemoveUnderscoresIntegers(b []byte, base int) ([]byte, error) {
c := b[i] c := b[i]
if c == '_' { if c == '_' {
if !before { if !before {
return nil, unstable.NewParserError(b[i-1:i+1], base+i-1, "number must have at least one digit between underscores") return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores")
} }
before = false before = false
} else { } else {
@@ -434,13 +453,13 @@ func checkAndRemoveUnderscoresIntegers(b []byte, base int) ([]byte, error) {
return cleaned, nil return cleaned, nil
} }
func checkAndRemoveUnderscoresFloats(b []byte, base int) ([]byte, error) { func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) {
if b[0] == '_' { if b[0] == '_' {
return nil, unstable.NewParserError(b[0:1], base, "number cannot start with underscore") return nil, unstable.NewParserError(b[0:1], "number cannot start with underscore")
} }
if b[len(b)-1] == '_' { if b[len(b)-1] == '_' {
return nil, unstable.NewParserError(b[len(b)-1:], base+len(b)-1, "number cannot end with underscore") return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore")
} }
// fast path // fast path
@@ -463,26 +482,27 @@ func checkAndRemoveUnderscoresFloats(b []byte, base int) ([]byte, error) {
switch c { switch c {
case '_': case '_':
if !before { if !before {
return nil, unstable.NewParserError(b[i-1:i+1], base+i-1, "number must have at least one digit between underscores") return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores")
} }
if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') { if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') {
return nil, unstable.NewParserError(b[i+1:i+2], base+i+1, "cannot have underscore before exponent") return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore before exponent")
} }
before = false before = false
case '+', '-': case '+', '-':
// signed exponents
cleaned = append(cleaned, c) cleaned = append(cleaned, c)
before = false before = false
case 'e', 'E': case 'e', 'E':
if i < len(b)-1 && b[i+1] == '_' { if i < len(b)-1 && b[i+1] == '_' {
return nil, unstable.NewParserError(b[i+1:i+2], base+i+1, "cannot have underscore after exponent") return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after exponent")
} }
cleaned = append(cleaned, c) cleaned = append(cleaned, c)
case '.': case '.':
if i < len(b)-1 && b[i+1] == '_' { if i < len(b)-1 && b[i+1] == '_' {
return nil, unstable.NewParserError(b[i+1:i+2], base+i+1, "cannot have underscore after decimal point") return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after decimal point")
} }
if i > 0 && b[i-1] == '_' { if i > 0 && b[i-1] == '_' {
return nil, unstable.NewParserError(b[i-1:i], base+i-1, "cannot have underscore before decimal point") return nil, unstable.NewParserError(b[i-1:i], "cannot have underscore before decimal point")
} }
cleaned = append(cleaned, c) cleaned = append(cleaned, c)
default: default:
+21 -1
View File
@@ -2,6 +2,7 @@ package toml
import ( import (
"fmt" "fmt"
"reflect"
"strconv" "strconv"
"strings" "strings"
@@ -99,7 +100,7 @@ func (e *DecodeError) Key() Key {
// //
//nolint:funlen //nolint:funlen
func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError { func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError {
offset := de.Offset offset := subsliceOffset(document, de.Highlight)
errMessage := de.Error() errMessage := de.Error()
errLine, errColumn := positionAtEnd(document[:offset]) errLine, errColumn := positionAtEnd(document[:offset])
@@ -261,3 +262,22 @@ func positionAtEnd(b []byte) (row int, column int) {
return row, column return row, column
} }
// subsliceOffset returns the byte offset of subslice within data.
// subslice must share the same backing array as data.
func subsliceOffset(data []byte, subslice []byte) int {
if len(subslice) == 0 {
return 0
}
// Use reflect to get the data pointers of both slices.
// This is safe because we're only reading the pointer values for comparison.
dataPtr := reflect.ValueOf(data).Pointer()
subPtr := reflect.ValueOf(subslice).Pointer()
offset := int(subPtr - dataPtr)
if offset < 0 || offset > len(data) {
panic("subslice is not within data")
}
return offset
}
-101
View File
@@ -171,7 +171,6 @@ line 5`,
err := wrapDecodeError(doc, &unstable.ParserError{ err := wrapDecodeError(doc, &unstable.ParserError{
Highlight: hl, Highlight: hl,
Offset: start,
Message: e.msg, Message: e.msg,
}) })
@@ -260,12 +259,6 @@ func TestDecodeError_Position(t *testing.T) {
expectedRow: 3, expectedRow: 3,
minCol: 5, minCol: 5,
}, },
{
name: "missing equals on last line without trailing newline",
doc: "a = 1\nb = 2\nc",
expectedRow: 3,
minCol: 1,
},
} }
for _, e := range examples { for _, e := range examples {
@@ -287,100 +280,6 @@ func TestDecodeError_Position(t *testing.T) {
} }
} }
func TestDecodeError_PositionAfterComments(t *testing.T) {
examples := []struct {
name string
doc string
expectedRow int
expectedCol int
errContains string
}{
{
name: "invalid key after comment",
doc: "# comment\n= \"value\"",
expectedRow: 2,
expectedCol: 1,
errContains: "invalid character at start of key",
},
{
name: "invalid key after multiple comments",
doc: "# line 1\n# line 2\n= \"value\"",
expectedRow: 3,
expectedCol: 1,
errContains: "invalid character at start of key",
},
{
name: "invalid key after valid assignment and comment",
doc: "a = 1\n# comment\n= \"value\"",
expectedRow: 3,
expectedCol: 1,
errContains: "invalid character at start of key",
},
{
name: "invalid key on first line",
doc: "= \"value\"",
expectedRow: 1,
expectedCol: 1,
errContains: "invalid character at start of key",
},
{
name: "invalid key with leading whitespace",
doc: "# comment\n = \"value\"",
expectedRow: 2,
expectedCol: 3,
errContains: "invalid character at start of key",
},
}
for _, e := range examples {
t.Run(e.name, func(t *testing.T) {
var v map[string]interface{}
err := Unmarshal([]byte(e.doc), &v)
if err == nil {
t.Fatal("expected an error")
}
var derr *DecodeError
if !errors.As(err, &derr) {
t.Fatalf("expected DecodeError, got %T: %v", err, err)
}
row, col := derr.Position()
if row != e.expectedRow {
t.Errorf("row: got %d, want %d (error: %s)", row, e.expectedRow, derr.String())
}
if col != e.expectedCol {
t.Errorf("col: got %d, want %d (error: %s)", col, e.expectedCol, derr.String())
}
if !strings.Contains(derr.Error(), e.errContains) {
t.Errorf("error %q does not contain %q", derr.Error(), e.errContains)
}
})
}
}
func TestDecodeError_HumanStringAfterComments(t *testing.T) {
doc := "# comment\n= \"value\""
var v map[string]interface{}
err := Unmarshal([]byte(doc), &v)
if err == nil {
t.Fatal("expected an error")
}
var derr *DecodeError
if !errors.As(err, &derr) {
t.Fatalf("expected DecodeError, got %T: %v", err, err)
}
human := derr.String()
if !strings.Contains(human, "= \"value\"") {
t.Errorf("human-readable error should show the offending line, got:\n%s", human)
}
if !strings.Contains(human, "2|") {
t.Errorf("human-readable error should reference line 2, got:\n%s", human)
}
}
func TestStrictErrorUnwrap(t *testing.T) { func TestStrictErrorUnwrap(t *testing.T) {
fo := bytes.NewBufferString(` fo := bytes.NewBufferString(`
Missing = 1 Missing = 1
+16 -12
View File
@@ -24,57 +24,61 @@ import (
// 0x9 => tab, ok // 0x9 => tab, ok
// 0xA - 0x1F => invalid // 0xA - 0x1F => invalid
// 0x7F => invalid // 0x7F => invalid
func Utf8TomlValidAlreadyEscaped(p []byte) int { func Utf8TomlValidAlreadyEscaped(p []byte) []byte {
consumed := 0
// Fast path. Check for and skip 8 bytes of ASCII characters per iteration. // Fast path. Check for and skip 8 bytes of ASCII characters per iteration.
for len(p) >= 8 { for len(p) >= 8 {
// Combining two 32 bit loads allows the same code to be used
// for 32 and 64 bit platforms.
// The compiler can generate a 32bit load for first32 and second32
// on many platforms. See test/codegen/memcombine.go.
first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24 second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24
if (first32|second32)&0x80808080 != 0 { if (first32|second32)&0x80808080 != 0 {
// Found a non ASCII byte (>= RuneSelf).
break break
} }
for i, b := range p[:8] { for i, b := range p[:8] {
if InvalidASCII(b) { if InvalidASCII(b) {
return consumed + i return p[i : i+1]
} }
} }
p = p[8:] p = p[8:]
consumed += 8
} }
n := len(p) n := len(p)
for i := 0; i < n; { for i := 0; i < n; {
pi := p[i] pi := p[i]
if pi < utf8.RuneSelf { if pi < utf8.RuneSelf {
if InvalidASCII(pi) { if InvalidASCII(pi) {
return consumed + i return p[i : i+1]
} }
i++ i++
continue continue
} }
x := first[pi] x := first[pi]
if x == xx { if x == xx {
return consumed + i // Illegal starter byte.
return p[i : i+1]
} }
size := int(x & 7) size := int(x & 7)
if i+size > n { if i+size > n {
return consumed + i // Short or invalid.
return p[i:n]
} }
accept := acceptRanges[x>>4] accept := acceptRanges[x>>4]
if c := p[i+1]; c < accept.lo || accept.hi < c { if c := p[i+1]; c < accept.lo || accept.hi < c {
return consumed + i return p[i : i+2]
} else if size == 2 { //revive:disable:empty-block } else if size == 2 { //revive:disable:empty-block
} else if c := p[i+2]; c < locb || hicb < c { } else if c := p[i+2]; c < locb || hicb < c {
return consumed + i return p[i : i+3]
} else if size == 3 { //revive:disable:empty-block } else if size == 3 { //revive:disable:empty-block
} else if c := p[i+3]; c < locb || hicb < c { } else if c := p[i+3]; c < locb || hicb < c {
return consumed + i return p[i : i+4]
} }
i += size i += size
} }
return -1 return nil
} }
// Utf8ValidNext returns the size of the next rune if valid, 0 otherwise. // Utf8ValidNext returns the size of the next rune if valid, 0 otherwise.
+5 -5
View File
@@ -32,7 +32,7 @@ func (d LocalDate) MarshalText() ([]byte, error) {
// UnmarshalText parses b using RFC 3339 to fill d. // UnmarshalText parses b using RFC 3339 to fill d.
func (d *LocalDate) UnmarshalText(b []byte) error { func (d *LocalDate) UnmarshalText(b []byte) error {
res, err := parseLocalDate(b, 0) res, err := parseLocalDate(b)
if err != nil { if err != nil {
return err return err
} }
@@ -75,9 +75,9 @@ func (d LocalTime) MarshalText() ([]byte, error) {
// UnmarshalText parses b using RFC 3339 to fill d. // UnmarshalText parses b using RFC 3339 to fill d.
func (d *LocalTime) UnmarshalText(b []byte) error { func (d *LocalTime) UnmarshalText(b []byte) error {
res, left, err := parseLocalTime(b, 0) res, left, err := parseLocalTime(b)
if err == nil && len(left) != 0 { if err == nil && len(left) != 0 {
err = unstable.NewParserError(left, len(b)-len(left), "extra characters") err = unstable.NewParserError(left, "extra characters")
} }
if err != nil { if err != nil {
return err return err
@@ -109,9 +109,9 @@ func (d LocalDateTime) MarshalText() ([]byte, error) {
// UnmarshalText parses b using RFC 3339 to fill d. // UnmarshalText parses b using RFC 3339 to fill d.
func (d *LocalDateTime) UnmarshalText(data []byte) error { func (d *LocalDateTime) UnmarshalText(data []byte) error {
res, left, err := parseLocalDateTime(data, 0) res, left, err := parseLocalDateTime(data)
if err == nil && len(left) != 0 { if err == nil && len(left) != 0 {
err = unstable.NewParserError(left, len(data)-len(left), "extra characters") err = unstable.NewParserError(left, "extra characters")
} }
if err != nil { if err != nil {
return err return err
+7
View File
@@ -67,6 +67,13 @@ func TestLocalTime_UnmarshalMarshalText(t *testing.T) {
assert.Error(t, err) assert.Error(t, err)
} }
func TestLocalTime_UnmarshalText_WithoutSeconds(t *testing.T) {
d := toml.LocalTime{}
err := d.UnmarshalText([]byte("14:15"))
assert.NoError(t, err)
assert.Equal(t, toml.LocalTime{14, 15, 0, 0, 0}, d)
}
func TestLocalTime_RoundTrip(t *testing.T) { func TestLocalTime_RoundTrip(t *testing.T) {
var d struct{ A toml.LocalTime } var d struct{ A toml.LocalTime }
err := toml.Unmarshal([]byte("a=20:12:01.500"), &d) err := toml.Unmarshal([]byte("a=20:12:01.500"), &d)
+9 -12
View File
@@ -704,18 +704,15 @@ func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte
for iter.Next() { for iter.Next() {
v := iter.Value() v := iter.Value()
// Handle nil values: convert nil pointers to zero value, if isNil(v) {
// skip nil interfaces and nil maps. // For nil pointers, convert to zero value of the element type.
switch v.Kind() { // This allows round-trip marshaling of maps with nil pointer values.
case reflect.Ptr: // For nil interfaces and nil maps, skip since we can't derive a type.
if v.IsNil() { if v.Kind() == reflect.Ptr {
v = reflect.Zero(v.Type().Elem()) v = reflect.Zero(v.Type().Elem())
} } else {
case reflect.Interface, reflect.Map:
if v.IsNil() {
continue continue
} }
default:
} }
k, err := enc.keyToString(iter.Key()) k, err := enc.keyToString(iter.Key())
@@ -939,7 +936,7 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro
if shouldOmitEmpty(kv.Options, kv.Value) { if shouldOmitEmpty(kv.Options, kv.Value) {
continue continue
} }
if kv.Options.omitzero && shouldOmitZero(kv.Options, kv.Value) { if shouldOmitZero(kv.Options, kv.Value) {
continue continue
} }
hasNonEmptyKV = true hasNonEmptyKV = true
@@ -961,7 +958,7 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro
if shouldOmitEmpty(table.Options, table.Value) { if shouldOmitEmpty(table.Options, table.Value) {
continue continue
} }
if table.Options.omitzero && shouldOmitZero(table.Options, table.Value) { if shouldOmitZero(table.Options, table.Value) {
continue continue
} }
if first { if first {
@@ -998,7 +995,7 @@ func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte
if shouldOmitEmpty(kv.Options, kv.Value) { if shouldOmitEmpty(kv.Options, kv.Value) {
continue continue
} }
if kv.Options.omitzero && shouldOmitZero(kv.Options, kv.Value) { if shouldOmitZero(kv.Options, kv.Value) {
continue continue
} }
+6 -8
View File
@@ -54,12 +54,10 @@ func (s *strict) MissingTable(node *unstable.Node) {
return return
} }
loc, offset := s.keyLocation(node)
s.missing = append(s.missing, unstable.ParserError{ s.missing = append(s.missing, unstable.ParserError{
Highlight: loc, Highlight: s.keyLocation(node),
Message: "missing table", Message: "missing table",
Key: s.key.Key(), Key: s.key.Key(),
Offset: offset,
}) })
} }
@@ -68,12 +66,10 @@ func (s *strict) MissingField(node *unstable.Node) {
return return
} }
loc, offset := s.keyLocation(node)
s.missing = append(s.missing, unstable.ParserError{ s.missing = append(s.missing, unstable.ParserError{
Highlight: loc, Highlight: s.keyLocation(node),
Message: "missing field", Message: "missing field",
Key: s.key.Key(), Key: s.key.Key(),
Offset: offset,
}) })
} }
@@ -94,7 +90,7 @@ func (s *strict) Error(doc []byte) error {
return err return err
} }
func (s *strict) keyLocation(node *unstable.Node) ([]byte, int) { func (s *strict) keyLocation(node *unstable.Node) []byte {
k := node.Key() k := node.Key()
hasOne := k.Next() hasOne := k.Next()
@@ -102,6 +98,7 @@ func (s *strict) keyLocation(node *unstable.Node) ([]byte, int) {
panic("should not be called with empty key") panic("should not be called with empty key")
} }
// Get the range from the first key to the last key.
firstRaw := k.Node().Raw firstRaw := k.Node().Raw
lastRaw := firstRaw lastRaw := firstRaw
@@ -109,8 +106,9 @@ func (s *strict) keyLocation(node *unstable.Node) ([]byte, int) {
lastRaw = k.Node().Raw lastRaw = k.Node().Raw
} }
// Compute the slice from the document using the ranges.
start := firstRaw.Offset start := firstRaw.Offset
end := lastRaw.Offset + lastRaw.Length end := lastRaw.Offset + lastRaw.Length
return s.doc[start:end], int(start) return s.doc[start:end]
} }
+4 -5
View File
@@ -9,7 +9,7 @@ YELLOW='\033[1;33m'
BLUE='\033[0;34m' BLUE='\033[0;34m'
NC='\033[0m' # No Color NC='\033[0m' # No Color
# Go versions to test (1.11 through 1.26) # Go versions to test (1.11 through 1.25)
GO_VERSIONS=( GO_VERSIONS=(
"1.11" "1.11"
"1.12" "1.12"
@@ -26,7 +26,6 @@ GO_VERSIONS=(
"1.23" "1.23"
"1.24" "1.24"
"1.25" "1.25"
"1.26"
) )
# Default values # Default values
@@ -65,7 +64,7 @@ EXAMPLES:
$0 # Test all Go versions in parallel $0 # Test all Go versions in parallel
$0 --sequential # Test all Go versions sequentially $0 --sequential # Test all Go versions sequentially
$0 1.21 1.22 1.23 # Test specific versions $0 1.21 1.22 1.23 # Test specific versions
$0 --verbose --output ./results 1.25 1.26 # Verbose output to custom directory $0 --verbose --output ./results 1.24 1.25 # Verbose output to custom directory
EXIT CODES: EXIT CODES:
0 Recent Go versions pass (good compatibility) 0 Recent Go versions pass (good compatibility)
@@ -137,8 +136,8 @@ fi
# Validate Go versions # Validate Go versions
for version in "${GO_VERSIONS[@]}"; do for version in "${GO_VERSIONS[@]}"; do
if ! [[ "$version" =~ ^1\.(1[1-9]|2[0-6])$ ]]; then if ! [[ "$version" =~ ^1\.(1[1-9]|2[0-5])$ ]]; then
log_error "Invalid Go version: $version. Supported versions: 1.11-1.26" log_error "Invalid Go version: $version. Supported versions: 1.11-1.25"
exit 1 exit 1
fi fi
done done
+2 -2
View File
@@ -1,5 +1,5 @@
//go:generate go run github.com/toml-lang/toml-test/cmd/toml-test@v1.6.0 -copy ./tests //go:generate go run github.com/toml-lang/toml-test/v2/cmd/toml-test@v2.1.0 copy -toml 1.1 ./tests
//go:generate go run ./cmd/tomltestgen/main.go -r v1.6.0 -o toml_testgen_test.go //go:generate go run ./cmd/tomltestgen/main.go -r v2.1.0 -o toml_testgen_test.go
package toml_test package toml_test
+1151 -532
View File
File diff suppressed because it is too large Load Diff
+27 -97
View File
@@ -56,18 +56,13 @@ func (d *Decoder) DisallowUnknownFields() *Decoder {
// EnableUnmarshalerInterface allows to enable unmarshaler interface. // EnableUnmarshalerInterface allows to enable unmarshaler interface.
// //
// With this feature enabled, types implementing the unstable.Unmarshaler // With this feature enabled, types implementing the unstable/Unmarshaler
// interface can be decoded from any structure of the document. It allows types // interface can be decoded from any structure of the document. It allows types
// that don't have a straightforward TOML representation to provide their own // that don't have a straightforward TOML representation to provide their own
// decoding logic. // decoding logic.
// //
// The UnmarshalTOML method receives raw TOML bytes: // Currently, types can only decode from a single value. Tables and array tables
// - For single values: the raw value bytes (e.g., `"hello"` for a string) // are not supported.
// - For tables: all key-value lines belonging to that table
// - For inline tables/arrays: the raw bytes of the inline structure
//
// The unstable.RawMessage type can be used to capture raw TOML bytes for
// later processing, similar to json.RawMessage.
// //
// *Unstable:* This method does not follow the compatibility guarantees of // *Unstable:* This method does not follow the compatibility guarantees of
// semver. It can be changed or removed without a new major version being // semver. It can be changed or removed without a new major version being
@@ -604,8 +599,9 @@ func (d *decoder) handleArrayTablePart(key unstable.Iterator, v reflect.Value) (
// cannot handle it. // cannot handle it.
func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) {
if v.Kind() == reflect.Slice { if v.Kind() == reflect.Slice {
// For non-empty slices, work with the last element if v.Len() == 0 {
if v.Len() > 0 { return reflect.Value{}, unstable.NewParserError(key.Node().Data, "cannot store a table in a slice")
}
elem := v.Index(v.Len() - 1) elem := v.Index(v.Len() - 1)
x, err := d.handleTable(key, elem) x, err := d.handleTable(key, elem)
if err != nil { if err != nil {
@@ -616,17 +612,6 @@ func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.V
} }
return reflect.Value{}, nil return reflect.Value{}, nil
} }
// Empty slice - check if it implements Unmarshaler (e.g., RawMessage)
// and we're at the end of the key path
if d.unmarshalerInterface && !key.Next() {
if v.CanAddr() && v.Addr().CanInterface() {
if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok {
return d.handleKeyValuesUnmarshaler(outi)
}
}
}
return reflect.Value{}, unstable.NewParserError(key.Node().Data, int(key.Node().Raw.Offset), "cannot store a table in a slice")
}
if key.Next() { if key.Next() {
// Still scoping the key // Still scoping the key
return d.handleTablePart(key, v) return d.handleTablePart(key, v)
@@ -639,24 +624,6 @@ func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.V
// Handle root expressions until the end of the document or the next // Handle root expressions until the end of the document or the next
// non-key-value. // non-key-value.
func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) {
// Check if target implements Unmarshaler before processing key-values.
// This allows types to handle entire tables themselves.
if d.unmarshalerInterface {
vv := v
for vv.Kind() == reflect.Ptr {
if vv.IsNil() {
vv.Set(reflect.New(vv.Type().Elem()))
}
vv = vv.Elem()
}
if vv.CanAddr() && vv.Addr().CanInterface() {
if outi, ok := vv.Addr().Interface().(unstable.Unmarshaler); ok {
// Collect all key-value expressions for this table
return d.handleKeyValuesUnmarshaler(outi)
}
}
}
var rv reflect.Value var rv reflect.Value
for d.nextExpr() { for d.nextExpr() {
expr := d.expr() expr := d.expr()
@@ -686,41 +653,6 @@ func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) {
return rv, nil return rv, nil
} }
// handleKeyValuesUnmarshaler collects all key-value expressions for a table
// and passes them to the Unmarshaler as raw TOML bytes.
func (d *decoder) handleKeyValuesUnmarshaler(u unstable.Unmarshaler) (reflect.Value, error) {
// Collect raw bytes from all key-value expressions for this table.
// We use the Raw field on each KeyValue expression to preserve the
// original formatting (whitespace, quoting style, etc.) from the document.
var buf []byte
for d.nextExpr() {
expr := d.expr()
if expr.Kind != unstable.KeyValue {
d.stashExpr()
break
}
_, err := d.seen.CheckExpression(expr)
if err != nil {
return reflect.Value{}, err
}
// Use the raw bytes from the original document to preserve formatting
if expr.Raw.Length > 0 {
raw := d.p.Raw(expr.Raw)
buf = append(buf, raw...)
}
buf = append(buf, '\n')
}
if err := u.UnmarshalTOML(buf); err != nil {
return reflect.Value{}, err
}
return reflect.Value{}, nil
}
type ( type (
handlerFn func(key unstable.Iterator, v reflect.Value) (reflect.Value, error) handlerFn func(key unstable.Iterator, v reflect.Value) (reflect.Value, error)
valueMakerFn func() reflect.Value valueMakerFn func() reflect.Value
@@ -748,7 +680,7 @@ func (d *decoder) tryTextUnmarshaler(node *unstable.Node, v reflect.Value) (bool
if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) { if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) {
err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data) err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data)
if err != nil { if err != nil {
return false, unstable.NewParserError(d.p.Raw(node.Raw), int(node.Raw.Offset), "%w", err) return false, unstable.NewParserError(d.p.Raw(node.Raw), "%w", err)
} }
return true, nil return true, nil
@@ -765,8 +697,7 @@ func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error {
if d.unmarshalerInterface { if d.unmarshalerInterface {
if v.CanAddr() && v.Addr().CanInterface() { if v.CanAddr() && v.Addr().CanInterface() {
if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok {
// Pass raw bytes from the original document return outi.UnmarshalTOML(value)
return outi.UnmarshalTOML(d.p.Raw(value.Raw))
} }
} }
} }
@@ -896,7 +827,7 @@ func (d *decoder) unmarshalInlineTable(itable *unstable.Node, v reflect.Value) e
} }
return d.unmarshalInlineTable(itable, elem) return d.unmarshalInlineTable(itable, elem)
default: default:
return unstable.NewParserError(d.p.Raw(itable.Raw), int(itable.Raw.Offset), "cannot store inline table in Go type %s", v.Kind()) return unstable.NewParserError(d.p.Raw(itable.Raw), "cannot store inline table in Go type %s", v.Kind())
} }
it := itable.Children() it := itable.Children()
@@ -916,26 +847,26 @@ func (d *decoder) unmarshalInlineTable(itable *unstable.Node, v reflect.Value) e
} }
func (d *decoder) unmarshalDateTime(value *unstable.Node, v reflect.Value) error { func (d *decoder) unmarshalDateTime(value *unstable.Node, v reflect.Value) error {
dt, err := parseDateTime(value.Data, int(value.Raw.Offset)) dt, err := parseDateTime(value.Data)
if err != nil { if err != nil {
return err return err
} }
if v.Kind() != reflect.Interface && v.Type() != timeType { if v.Kind() != reflect.Interface && v.Type() != timeType {
return unstable.NewParserError(d.p.Raw(value.Raw), int(value.Raw.Offset), "%s", d.typeMismatchString("datetime", v.Type())) return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("datetime", v.Type()))
} }
v.Set(reflect.ValueOf(dt)) v.Set(reflect.ValueOf(dt))
return nil return nil
} }
func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) error { func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) error {
ld, err := parseLocalDate(value.Data, int(value.Raw.Offset)) ld, err := parseLocalDate(value.Data)
if err != nil { if err != nil {
return err return err
} }
if v.Kind() != reflect.Interface && v.Type() != timeType { if v.Kind() != reflect.Interface && v.Type() != timeType {
return unstable.NewParserError(d.p.Raw(value.Raw), int(value.Raw.Offset), "%s", d.typeMismatchString("local date", v.Type())) return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("local date", v.Type()))
} }
if v.Type() == timeType { if v.Type() == timeType {
v.Set(reflect.ValueOf(ld.AsTime(time.Local))) v.Set(reflect.ValueOf(ld.AsTime(time.Local)))
@@ -946,34 +877,34 @@ func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) erro
} }
func (d *decoder) unmarshalLocalTime(value *unstable.Node, v reflect.Value) error { func (d *decoder) unmarshalLocalTime(value *unstable.Node, v reflect.Value) error {
lt, rest, err := parseLocalTime(value.Data, int(value.Raw.Offset)) lt, rest, err := parseLocalTime(value.Data)
if err != nil { if err != nil {
return err return err
} }
if len(rest) > 0 { if len(rest) > 0 {
return unstable.NewParserError(rest, int(value.Raw.Offset)+len(value.Data)-len(rest), "extra characters at the end of a local time") return unstable.NewParserError(rest, "extra characters at the end of a local time")
} }
if v.Kind() != reflect.Interface { if v.Kind() != reflect.Interface {
return unstable.NewParserError(d.p.Raw(value.Raw), int(value.Raw.Offset), "%s", d.typeMismatchString("local time", v.Type())) return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("local time", v.Type()))
} }
v.Set(reflect.ValueOf(lt)) v.Set(reflect.ValueOf(lt))
return nil return nil
} }
func (d *decoder) unmarshalLocalDateTime(value *unstable.Node, v reflect.Value) error { func (d *decoder) unmarshalLocalDateTime(value *unstable.Node, v reflect.Value) error {
ldt, rest, err := parseLocalDateTime(value.Data, int(value.Raw.Offset)) ldt, rest, err := parseLocalDateTime(value.Data)
if err != nil { if err != nil {
return err return err
} }
if len(rest) > 0 { if len(rest) > 0 {
return unstable.NewParserError(rest, int(value.Raw.Offset)+len(value.Data)-len(rest), "extra characters at the end of a local date time") return unstable.NewParserError(rest, "extra characters at the end of a local date time")
} }
if v.Kind() != reflect.Interface && v.Type() != timeType { if v.Kind() != reflect.Interface && v.Type() != timeType {
return unstable.NewParserError(d.p.Raw(value.Raw), int(value.Raw.Offset), "%s", d.typeMismatchString("local datetime", v.Type())) return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("local datetime", v.Type()))
} }
if v.Type() == timeType { if v.Type() == timeType {
v.Set(reflect.ValueOf(ldt.AsTime(time.Local))) v.Set(reflect.ValueOf(ldt.AsTime(time.Local)))
@@ -992,14 +923,14 @@ func (d *decoder) unmarshalBool(value *unstable.Node, v reflect.Value) error {
case reflect.Interface: case reflect.Interface:
v.Set(reflect.ValueOf(b)) v.Set(reflect.ValueOf(b))
default: default:
return unstable.NewParserError(value.Data, int(value.Raw.Offset), "cannot assign boolean to a %t", b) return unstable.NewParserError(value.Data, "cannot assign boolean to a %t", b)
} }
return nil return nil
} }
func (d *decoder) unmarshalFloat(value *unstable.Node, v reflect.Value) error { func (d *decoder) unmarshalFloat(value *unstable.Node, v reflect.Value) error {
f, err := parseFloat(value.Data, int(value.Raw.Offset)) f, err := parseFloat(value.Data)
if err != nil { if err != nil {
return err return err
} }
@@ -1009,13 +940,13 @@ func (d *decoder) unmarshalFloat(value *unstable.Node, v reflect.Value) error {
v.SetFloat(f) v.SetFloat(f)
case reflect.Float32: case reflect.Float32:
if f > math.MaxFloat32 { if f > math.MaxFloat32 {
return unstable.NewParserError(value.Data, int(value.Raw.Offset), "number %f does not fit in a float32", f) return unstable.NewParserError(value.Data, "number %f does not fit in a float32", f)
} }
v.SetFloat(f) v.SetFloat(f)
case reflect.Interface: case reflect.Interface:
v.Set(reflect.ValueOf(f)) v.Set(reflect.ValueOf(f))
default: default:
return unstable.NewParserError(value.Data, int(value.Raw.Offset), "float cannot be assigned to %s", v.Kind()) return unstable.NewParserError(value.Data, "float cannot be assigned to %s", v.Kind())
} }
return nil return nil
@@ -1048,7 +979,7 @@ func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error
return d.unmarshalFloat(value, v) return d.unmarshalFloat(value, v)
} }
i, err := parseInteger(value.Data, int(value.Raw.Offset)) i, err := parseInteger(value.Data)
if err != nil { if err != nil {
return err return err
} }
@@ -1116,7 +1047,7 @@ func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error
case reflect.Interface: case reflect.Interface:
r = reflect.ValueOf(i) r = reflect.ValueOf(i)
default: default:
return unstable.NewParserError(d.p.Raw(value.Raw), int(value.Raw.Offset), "%s", d.typeMismatchString("integer", v.Type())) return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("integer", v.Type()))
} }
if !r.Type().AssignableTo(v.Type()) { if !r.Type().AssignableTo(v.Type()) {
@@ -1135,7 +1066,7 @@ func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error {
case reflect.Interface: case reflect.Interface:
v.Set(reflect.ValueOf(string(value.Data))) v.Set(reflect.ValueOf(string(value.Data)))
default: default:
return unstable.NewParserError(d.p.Raw(value.Raw), int(value.Raw.Offset), "%s", d.typeMismatchString("string", v.Type())) return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("string", v.Type()))
} }
return nil return nil
@@ -1270,8 +1201,7 @@ func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node
if d.unmarshalerInterface { if d.unmarshalerInterface {
if v.CanAddr() && v.Addr().CanInterface() { if v.CanAddr() && v.Addr().CanInterface() {
if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok {
// Pass raw bytes from the original document return reflect.Value{}, outi.UnmarshalTOML(value)
return reflect.Value{}, outi.UnmarshalTOML(d.p.Raw(value.Raw))
} }
} }
} }
+445 -356
View File
@@ -96,132 +96,6 @@ func ExampleUnmarshal() {
// tags: [go toml] // tags: [go toml]
} }
// pluginConfig demonstrates how to implement dynamic unmarshaling
// based on a "type" field. This pattern is useful for plugin systems
// or polymorphic configuration.
type pluginConfig struct {
Type string
Config any
}
func (p *pluginConfig) UnmarshalTOML(data []byte) error {
// First, decode just the type field
var typeOnly struct {
Type string `toml:"type"`
}
if err := toml.Unmarshal(data, &typeOnly); err != nil {
return err
}
p.Type = typeOnly.Type
// Now decode the config based on the type
switch typeOnly.Type {
case "database":
var cfg struct {
Type string `toml:"type"`
Host string `toml:"host"`
Port int `toml:"port"`
}
if err := toml.Unmarshal(data, &cfg); err != nil {
return err
}
p.Config = map[string]any{"host": cfg.Host, "port": cfg.Port}
case "cache":
var cfg struct {
Type string `toml:"type"`
TTL int `toml:"ttl"`
}
if err := toml.Unmarshal(data, &cfg); err != nil {
return err
}
p.Config = map[string]any{"ttl": cfg.TTL}
}
return nil
}
// This example demonstrates dynamic unmarshaling based on a discriminator
// field. The pluginConfig type uses UnmarshalTOML to first read the "type"
// field, then decode the rest of the configuration based on that type.
// This pattern is useful for plugin systems or configuration that varies
// by type.
func ExampleDecoder_EnableUnmarshalerInterface_dynamicConfig() {
doc := `
[[plugins]]
type = "database"
host = "localhost"
port = 5432
[[plugins]]
type = "cache"
ttl = 300
`
type Config struct {
Plugins []pluginConfig `toml:"plugins"`
}
var cfg Config
err := toml.NewDecoder(strings.NewReader(doc)).
EnableUnmarshalerInterface().
Decode(&cfg)
if err != nil {
panic(err)
}
for _, p := range cfg.Plugins {
fmt.Printf("type=%s config=%v\n", p.Type, p.Config)
}
// Output:
// type=database config=map[host:localhost port:5432]
// type=cache config=map[ttl:300]
}
// This example demonstrates using RawMessage to capture raw TOML bytes
// for later processing. RawMessage is similar to json.RawMessage - it
// delays decoding so you can inspect the raw content or decode it
// differently based on context.
func ExampleDecoder_EnableUnmarshalerInterface_rawMessage() {
doc := `
[plugin]
name = "example"
version = "1.0"
enabled = true
`
type Config struct {
Plugin unstable.RawMessage `toml:"plugin"`
}
var cfg Config
err := toml.NewDecoder(strings.NewReader(doc)).
EnableUnmarshalerInterface().
Decode(&cfg)
if err != nil {
panic(err)
}
// cfg.Plugin contains the raw TOML bytes
fmt.Printf("Raw TOML captured:\n%s", cfg.Plugin)
// You can later decode it into a specific type
var plugin struct {
Name string `toml:"name"`
Version string `toml:"version"`
Enabled bool `toml:"enabled"`
}
if err := toml.Unmarshal(cfg.Plugin, &plugin); err != nil {
panic(err)
}
fmt.Printf("Decoded: name=%s version=%s enabled=%v\n",
plugin.Name, plugin.Version, plugin.Enabled)
// Output:
// Raw TOML captured:
// name = "example"
// version = "1.0"
// enabled = true
// Decoded: name=example version=1.0 enabled=true
}
type badReader struct{} type badReader struct{}
func (r *badReader) Read([]byte) (int, error) { func (r *badReader) Read([]byte) (int, error) {
@@ -726,6 +600,96 @@ foo = "bar"`,
} }
}, },
}, },
{
desc: "local-time without seconds",
input: `a = 14:15`,
gen: func() test {
var v map[string]interface{}
return test{
target: &v,
expected: &map[string]interface{}{
"a": toml.LocalTime{Hour: 14, Minute: 15},
},
}
},
},
{
desc: "local-datetime without seconds using T",
input: `a = 2010-02-03T14:15`,
gen: func() test {
var v map[string]interface{}
return test{
target: &v,
expected: &map[string]interface{}{
"a": toml.LocalDateTime{
LocalDate: toml.LocalDate{2010, 2, 3},
LocalTime: toml.LocalTime{Hour: 14, Minute: 15},
},
},
}
},
},
{
desc: "local-datetime without seconds using space",
input: `a = 2010-02-03 14:15`,
gen: func() test {
var v map[string]interface{}
return test{
target: &v,
expected: &map[string]interface{}{
"a": toml.LocalDateTime{
LocalDate: toml.LocalDate{2010, 2, 3},
LocalTime: toml.LocalTime{Hour: 14, Minute: 15},
},
},
}
},
},
{
desc: "datetime without seconds with Z",
input: `a = 2010-02-03T14:15Z`,
gen: func() test {
var v map[string]time.Time
return test{
target: &v,
expected: &map[string]time.Time{
"a": time.Date(2010, 2, 3, 14, 15, 0, 0, time.UTC),
},
}
},
},
{
desc: "datetime without seconds with offset",
input: `a = 2010-02-03T14:15+05:00`,
gen: func() test {
var v map[string]time.Time
return test{
target: &v,
expected: &map[string]time.Time{
"a": time.Date(2010, 2, 3, 14, 15, 0, 0, time.FixedZone("", 5*3600)),
},
}
},
},
{
desc: "local-time with seconds and fractional regression",
input: `a = 14:15:30.123`,
gen: func() test {
var v map[string]interface{}
return test{
target: &v,
expected: &map[string]interface{}{
"a": toml.LocalTime{Hour: 14, Minute: 15, Second: 30, Nanosecond: 123000000, Precision: 3},
},
}
},
},
{ {
desc: "local-time missing digit", desc: "local-time missing digit",
input: `a = 12:08:0`, input: `a = 12:08:0`,
@@ -885,6 +849,104 @@ huey = 'dewey'
} }
}, },
}, },
{
desc: "basic string escape character",
input: `A = "\e"`,
gen: func() test {
type doc struct {
A string
}
return test{
target: &doc{},
expected: &doc{A: "\x1B"},
}
},
},
{
desc: "multiline basic string escape character",
input: `A = """\e"""`,
gen: func() test {
type doc struct {
A string
}
return test{
target: &doc{},
expected: &doc{A: "\x1B"},
}
},
},
{
desc: "escape character combined with bracket",
input: `A = "\e["`,
gen: func() test {
type doc struct {
A string
}
return test{
target: &doc{},
expected: &doc{A: "\x1B["},
}
},
},
{
desc: "basic string hex escape lowercase letter",
input: `A = "\x61"`,
gen: func() test {
type doc struct {
A string
}
return test{
target: &doc{},
expected: &doc{A: "a"},
}
},
},
{
desc: "basic string hex escape null byte",
input: `A = "\x00"`,
gen: func() test {
type doc struct {
A string
}
return test{
target: &doc{},
expected: &doc{A: "\x00"},
}
},
},
{
desc: "basic string hex escape max value",
input: `A = "\xFF"`,
gen: func() test {
type doc struct {
A string
}
return test{
target: &doc{},
expected: &doc{A: "\u00FF"},
}
},
},
{
desc: "multiline basic string hex escape",
input: `A = """\x61"""`,
gen: func() test {
type doc struct {
A string
}
return test{
target: &doc{},
expected: &doc{A: "a"},
}
},
},
{ {
desc: "spaces around dotted keys", desc: "spaces around dotted keys",
input: "a . b = 1", input: "a . b = 1",
@@ -1032,6 +1094,87 @@ B = "data"`,
} }
}, },
}, },
{
desc: "multiline inline table",
input: "Name = {\n First = \"hello\",\n Last = \"world\"\n}",
gen: func() test {
type name struct {
First string
Last string
}
type doc struct {
Name name
}
return test{
target: &doc{},
expected: &doc{Name: name{
First: "hello",
Last: "world",
}},
}
},
},
{
desc: "inline table with trailing comma",
input: `Name = {First = "hello", Last = "world",}`,
gen: func() test {
type name struct {
First string
Last string
}
type doc struct {
Name name
}
return test{
target: &doc{},
expected: &doc{Name: name{
First: "hello",
Last: "world",
}},
}
},
},
{
desc: "multiline inline table with trailing comma and comments",
input: "Name = {\n # first name\n First = \"hello\",\n # last name\n Last = \"world\",\n}",
gen: func() test {
type name struct {
First string
Last string
}
type doc struct {
Name name
}
return test{
target: &doc{},
expected: &doc{Name: name{
First: "hello",
Last: "world",
}},
}
},
},
{
desc: "nested multiline inline tables",
input: "A = {\n B = {\n C = 1,\n },\n}",
gen: func() test {
var v map[string]interface{}
return test{
target: &v,
expected: &map[string]interface{}{
"A": map[string]interface{}{
"B": map[string]interface{}{
"C": int64(1),
},
},
},
}
},
},
{ {
desc: "inline table inside array", desc: "inline table inside array",
input: `Names = [{First = "hello", Last = "world"}, {First = "ab", Last = "cd"}]`, input: `Names = [{First = "hello", Last = "world"}, {First = "ab", Last = "cd"}]`,
@@ -3271,7 +3414,7 @@ world'`,
{ {
desc: "bad char between minutes and seconds", desc: "bad char between minutes and seconds",
data: `a = 2021-03-30 21:312:0`, data: `a = 2021-03-30 21:312:0`,
msg: `expecting colon between minutes and seconds`, msg: `extra characters at the end of a local date time`,
}, },
{ {
desc: "invalid hour value", desc: "invalid hour value",
@@ -3386,6 +3529,18 @@ world'`,
desc: `invalid escape char basic multiline string`, desc: `invalid escape char basic multiline string`,
data: `A = """\z"""`, data: `A = """\z"""`,
}, },
{
desc: `invalid hex escape non-hex character in basic string`,
data: `A = "\xGG"`,
},
{
desc: `incomplete hex escape in basic string`,
data: `A = "\x6"`,
},
{
desc: `invalid hex escape non-hex character in multiline basic string`,
data: `A = """\xGG"""`,
},
{ {
desc: `invalid inf`, desc: `invalid inf`,
data: `A = ick`, data: `A = ick`,
@@ -3572,6 +3727,30 @@ world'`,
desc: `backspace in comment`, desc: `backspace in comment`,
data: "# this is a test\ba=1", data: "# this is a test\ba=1",
}, },
{
desc: `inline table comma at start`,
data: `a = { , b = 1 }`,
},
{
desc: `inline table missing separator`,
data: `a = { b = 1 c = 2 }`,
},
{
desc: `inline table double comma across newline`,
data: "a = { b = 1,\n, c = 2 }",
},
{
desc: `incomplete inline table`,
data: "a = { b = 1,\n",
},
{
desc: `incomplete hex escape in multiline basic string`,
data: `A = """\x6"""`,
},
{
desc: `invalid escape char in basic string`,
data: `A = "\z"`,
},
} }
for _, e := range examples { for _, e := range examples {
@@ -4026,8 +4205,8 @@ type CustomUnmarshalerKey struct {
A int64 A int64
} }
func (k *CustomUnmarshalerKey) UnmarshalTOML(data []byte) error { func (k *CustomUnmarshalerKey) UnmarshalTOML(value *unstable.Node) error {
item, err := strconv.ParseInt(string(data), 10, 64) item, err := strconv.ParseInt(string(value.Data), 10, 64)
if err != nil { if err != nil {
return fmt.Errorf("error converting to int64, %w", err) return fmt.Errorf("error converting to int64, %w", err)
} }
@@ -4115,7 +4294,7 @@ foo = "bar"`,
type doc994 struct{} type doc994 struct{}
func (d *doc994) UnmarshalTOML([]byte) error { func (d *doc994) UnmarshalTOML(*unstable.Node) error {
return errors.New("expected-error") return errors.New("expected-error")
} }
@@ -4138,8 +4317,8 @@ type doc994ok struct {
S string S string
} }
func (d *doc994ok) UnmarshalTOML(data []byte) error { func (d *doc994ok) UnmarshalTOML(value *unstable.Node) error {
d.S = string(data) + " from unmarshaler" d.S = string(value.Data) + " from unmarshaler"
return nil return nil
} }
@@ -4152,8 +4331,7 @@ func TestIssue994_OK(t *testing.T) {
Decode(&d) Decode(&d)
assert.NoError(t, err) assert.NoError(t, err)
// With bytes-based interface, raw TOML bytes are passed including quotes assert.Equal(t, "bar from unmarshaler", d.S)
assert.Equal(t, "\"bar\" from unmarshaler", d.S)
} }
func TestIssue995(t *testing.T) { func TestIssue995(t *testing.T) {
@@ -4513,264 +4691,175 @@ func TestIssue1028(t *testing.T) {
}) })
} }
// Tests for issue #873 - Bring back toml.Unmarshaler for tables and arrays // customFieldUnmarshaler implements unstable.Unmarshaler and captures all
// key-value pairs directed to it, including unknown fields.
type customTable873 struct { type customFieldUnmarshaler struct {
Keys []string
Values map[string]string Values map[string]string
} }
func (c *customTable873) UnmarshalTOML(data []byte) error { func (c *customFieldUnmarshaler) UnmarshalTOML(value *unstable.Node) error {
c.Keys = []string{} c.Values = map[string]string{
c.Values = make(map[string]string) "kind": value.Kind.String(),
"data": string(value.Data),
// Parse the raw TOML bytes into a map to extract keys in order
// For this test, we use a simple line-by-line parser to preserve order
lines := bytes.Split(data, []byte{'\n'})
for _, line := range lines {
line = bytes.TrimSpace(line)
if len(line) == 0 {
continue
} }
// Skip table headers
if line[0] == '[' {
continue
}
// Parse key = value
eqIdx := bytes.Index(line, []byte{'='})
if eqIdx < 0 {
continue
}
key := string(bytes.TrimSpace(line[:eqIdx]))
// Remove quotes from quoted keys
if len(key) >= 2 && key[0] == '"' && key[len(key)-1] == '"' {
key = key[1 : len(key)-1]
}
valueBytes := bytes.TrimSpace(line[eqIdx+1:])
// Remove quotes from string values
if len(valueBytes) >= 2 && valueBytes[0] == '"' && valueBytes[len(valueBytes)-1] == '"' {
valueBytes = valueBytes[1 : len(valueBytes)-1]
}
c.Keys = append(c.Keys, key)
c.Values[key] = string(valueBytes)
}
return nil return nil
} }
// Test for split tables - when the same parent table is defined in multiple places func TestUnmarshalerInterface_StructFieldFallback(t *testing.T) {
// This is a key requirement for issue #873: if type A implements Unmarshaler, // When EnableUnmarshalerInterface is active and a struct field is not found,
// and [a.b] and [a.d] are defined with another table [x] in between, // the decoder should fall back to the Unmarshaler interface on the struct.
// A should receive content for both b and d, but not x.
func TestIssue873_SplitTables(t *testing.T) {
// For this test, we expect each sub-table to be handled separately
// The parent doesn't receive the sub-tables directly - each sub-table
// (b and d) gets its own call to handleKeyValues
type Config struct { type Config struct {
A struct { Name string `toml:"name"`
B customTable873 `toml:"b"`
D customTable873 `toml:"d"`
} `toml:"a"`
X customTable873 `toml:"x"`
} }
doc := ` t.Run("unknown field with unmarshaler", func(t *testing.T) {
[a.b] doc := `name = "hello"
C = "1" unknown = "world"`
[x]
Y = "100"
[a.d]
E = "2"
`
var cfg Config var cfg Config
err := toml.NewDecoder(bytes.NewReader([]byte(doc))). decoder := toml.NewDecoder(bytes.NewReader([]byte(doc)))
EnableUnmarshalerInterface(). decoder.EnableUnmarshalerInterface()
Decode(&cfg) err := decoder.Decode(&cfg)
assert.NoError(t, err) assert.NoError(t, err)
// Each sub-table should have received its own key-values assert.Equal(t, "hello", cfg.Name)
assert.Equal(t, []string{"C"}, cfg.A.B.Keys) })
assert.Equal(t, "1", cfg.A.B.Values["C"])
assert.Equal(t, []string{"E"}, cfg.A.D.Keys)
assert.Equal(t, "2", cfg.A.D.Values["E"])
assert.Equal(t, []string{"Y"}, cfg.X.Keys)
assert.Equal(t, "100", cfg.X.Values["Y"])
} }
// Test using RawMessage to capture raw TOML bytes func TestUnmarshalerInterface_Value(t *testing.T) {
func TestIssue873_RawMessage(t *testing.T) { // Test that EnableUnmarshalerInterface delegates value decoding
// to the UnmarshalTOML method.
type Config struct { type Config struct {
Plugin unstable.RawMessage `toml:"plugin"` Field customFieldUnmarshaler `toml:"field"`
} }
doc := ` doc := `field = "test-value"`
[plugin]
name = "example"
version = "1.0"
`
var cfg Config var cfg Config
err := toml.NewDecoder(bytes.NewReader([]byte(doc))). decoder := toml.NewDecoder(bytes.NewReader([]byte(doc)))
EnableUnmarshalerInterface(). decoder.EnableUnmarshalerInterface()
Decode(&cfg) err := decoder.Decode(&cfg)
assert.NoError(t, err) assert.NoError(t, err)
// RawMessage should contain the raw key-value bytes assert.Equal(t, "test-value", cfg.Field.Values["data"])
expected := "name = \"example\"\nversion = \"1.0\"\n"
assert.Equal(t, expected, string(cfg.Plugin))
} }
// Test keys that need quoting (contain special characters) func TestTypeMismatchString_StructFieldContext(t *testing.T) {
func TestIssue873_QuotedKeys(t *testing.T) { // Exercise the typeMismatchString code path that includes struct field info
// in the error message.
type Inner struct {
Value int `toml:"value"`
}
type Config struct { type Config struct {
Section customTable873 `toml:"section"` Inner Inner `toml:"inner"`
} }
doc := ` doc := `inner = "not-a-table"`
[section]
"key with spaces" = "value1"
"key.with.dots" = "value2"
`
var cfg Config var cfg Config
err := toml.NewDecoder(bytes.NewReader([]byte(doc))). err := toml.Unmarshal([]byte(doc), &cfg)
EnableUnmarshalerInterface().
Decode(&cfg)
assert.NoError(t, err)
assert.Equal(t, 2, len(cfg.Section.Keys))
assert.Equal(t, "value1", cfg.Section.Values["key with spaces"])
assert.Equal(t, "value2", cfg.Section.Values["key.with.dots"])
}
// errorUnmarshaler873 is used to test error propagation from UnmarshalTOML
type errorUnmarshaler873 struct{}
func (e *errorUnmarshaler873) UnmarshalTOML([]byte) error {
return errors.New("intentional error")
}
// Test error propagation from UnmarshalTOML
func TestIssue873_UnmarshalerError(t *testing.T) {
doc := `
[section]
key = "value"
`
type Config struct {
Section errorUnmarshaler873 `toml:"section"`
}
var cfg Config
err := toml.NewDecoder(bytes.NewReader([]byte(doc))).
EnableUnmarshalerInterface().
Decode(&cfg)
assert.Error(t, err) assert.Error(t, err)
assert.True(t, strings.Contains(err.Error(), "intentional error"))
} }
// Test dotted keys in a table (e.g., a.b = value) func TestUnmarshalInlineTable_IncompatibleType(t *testing.T) {
func TestIssue873_DottedKeys(t *testing.T) { // Exercise the default branch of unmarshalInlineTable when the target
type Config struct { // is not a map, struct, or interface.
Section customTable873 `toml:"section"` type doc struct {
A int `toml:"a"`
}
var v doc
err := toml.Unmarshal([]byte(`a = {b = 1}`), &v)
assert.Error(t, err)
} }
doc := ` func TestTypeMismatchString_NoStructContext(t *testing.T) {
[section] // Exercise the typeMismatchString code path without struct field context (line 186).
sub.key = "value1" // Decoding a string into a bare int triggers this path.
another.nested.key = "value2" var v map[string]int
` err := toml.Unmarshal([]byte(`a = "hello"`), &v)
assert.Error(t, err)
var cfg Config }
err := toml.NewDecoder(bytes.NewReader([]byte(doc))).
EnableUnmarshalerInterface().
Decode(&cfg)
func TestMultilineInlineTable_EmptyWithNewlines(t *testing.T) {
doc := "a = {\n\n}"
var v map[string]interface{}
err := toml.Unmarshal([]byte(doc), &v)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 2, len(cfg.Section.Keys)) inner := v["a"]
// The dotted keys should be preserved in the raw output if inner == nil {
assert.Equal(t, "value1", cfg.Section.Values["sub.key"]) t.Fatal("expected key 'a' to be present")
assert.Equal(t, "value2", cfg.Section.Values["another.nested.key"]) }
m, ok := inner.(map[string]interface{})
if !ok {
t.Fatalf("expected map, got %T", inner)
}
if len(m) != 0 {
t.Fatalf("expected empty map, got %v", m)
}
} }
// Test pointer to pointer to Unmarshaler (covers pointer dereferencing loop) func TestMultilineInlineTable_CommentsOnly(t *testing.T) {
func TestIssue873_DoublePointerUnmarshaler(t *testing.T) { doc := "a = {\n # just a comment\n}"
type Config struct { var v map[string]interface{}
Section **customTable873 `toml:"section"` err := toml.Unmarshal([]byte(doc), &v)
}
doc := `
[section]
key = "value"
`
var cfg Config
err := toml.NewDecoder(bytes.NewReader([]byte(doc))).
EnableUnmarshalerInterface().
Decode(&cfg)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, cfg.Section != nil) inner := v["a"]
assert.True(t, *cfg.Section != nil) if inner == nil {
assert.Equal(t, []string{"key"}, (*cfg.Section).Keys) t.Fatal("expected key 'a' to be present")
assert.Equal(t, "value", (*cfg.Section).Values["key"]) }
m, ok := inner.(map[string]interface{})
if !ok {
t.Fatalf("expected map, got %T", inner)
}
if len(m) != 0 {
t.Fatalf("expected empty map, got %v", m)
}
} }
// formattingCapture captures the raw TOML bytes to verify formatting preservation func TestMultilineInlineTable_CommentAfterComma(t *testing.T) {
type formattingCapture struct { // Exercises comment handling after comma in inline table (parser lines 518-524).
RawBytes string doc := "a = { b = 1, # comment\nc = 2 }"
} var v map[string]interface{}
err := toml.Unmarshal([]byte(doc), &v)
func (f *formattingCapture) UnmarshalTOML(data []byte) error {
f.RawBytes = string(data)
return nil
}
func TestIssue873_FormattingPreservation(t *testing.T) {
type Config struct {
Section *formattingCapture `toml:"section"`
}
// Test that various formatting styles are preserved:
// - Extra spaces around '='
// - Literal strings (single quotes)
// - Hex numbers
// - Inline tables
doc := `[section]
key1 = "value with spaces"
key2 = 'literal string'
hex_val = 0xDEADBEEF
inline = { a = 1, b = 2 }
`
var cfg Config
err := toml.NewDecoder(bytes.NewReader([]byte(doc))).
EnableUnmarshalerInterface().
Decode(&cfg)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, cfg.Section != nil) m, ok := v["a"].(map[string]interface{})
if !ok {
// The raw bytes should preserve original formatting t.Fatal("expected a map")
raw := cfg.Section.RawBytes }
if m["b"] != int64(1) {
// Check that extra spaces around '=' are preserved t.Fatalf("expected b=1, got %v", m["b"])
assert.True(t, strings.Contains(raw, "key1 = \"value with spaces\""), }
"Expected spacing to be preserved, got: %s", raw) if m["c"] != int64(2) {
t.Fatalf("expected c=2, got %v", m["c"])
// Check that literal string style is preserved }
assert.True(t, strings.Contains(raw, "key2 = 'literal string'"), }
"Expected literal string to be preserved, got: %s", raw)
func TestMultilineInlineTable_CommentAfterValue(t *testing.T) {
// Check that hex format is preserved // Exercises comment handling after keyval in inline table (parser lines 542-548).
assert.True(t, strings.Contains(raw, "hex_val = 0xDEADBEEF"), doc := "a = { b = 1 # comment\n, c = 2 }"
"Expected hex format to be preserved, got: %s", raw) var v map[string]interface{}
err := toml.Unmarshal([]byte(doc), &v)
// Check that inline table is preserved assert.NoError(t, err)
assert.True(t, strings.Contains(raw, "inline = { a = 1, b = 2 }"), m, ok := v["a"].(map[string]interface{})
"Expected inline table to be preserved, got: %s", raw) if !ok {
t.Fatal("expected a map")
}
if m["b"] != int64(1) {
t.Fatalf("expected b=1, got %v", m["b"])
}
if m["c"] != int64(2) {
t.Fatalf("expected c=2, got %v", m["c"])
}
}
func TestMultilineInlineTable_LeadingComma(t *testing.T) {
doc := "a = { b = 1\n, c = 2 }"
var v map[string]interface{}
err := toml.Unmarshal([]byte(doc), &v)
assert.NoError(t, err)
m, ok := v["a"].(map[string]interface{})
if !ok {
t.Fatal("expected a map")
}
if m["b"] != int64(1) {
t.Fatalf("expected b=1, got %v", m["b"])
}
if m["c"] != int64(2) {
t.Fatalf("expected c=2, got %v", m["c"])
}
} }
+3 -7
View File
@@ -28,16 +28,12 @@ func (c *Iterator) Next() bool {
if c.nodes == nil { if c.nodes == nil {
return false return false
} }
nodes := *c.nodes
if !c.started { if !c.started {
c.started = true c.started = true
} else { } else if c.idx >= 0 {
idx := c.idx c.idx = (*c.nodes)[c.idx].next
if idx >= 0 && int(idx) < len(nodes) {
c.idx = nodes[idx].next
} }
} return c.idx >= 0 && int(c.idx) < len(*c.nodes)
return c.idx >= 0 && int(c.idx) < len(nodes)
} }
// IsLast returns true if the current node of the iterator is the last // IsLast returns true if the current node of the iterator is the last
+1 -1
View File
@@ -35,7 +35,7 @@ func BenchmarkScanComments(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
_, _, _ = scanComment(input, 0) _, _, _ = scanComment(input)
} }
}) })
} }
+129 -78
View File
@@ -16,7 +16,6 @@ type ParserError struct {
Highlight []byte Highlight []byte
Message string Message string
Key []string // optional Key []string // optional
Offset int
} }
// Error is the implementation of the error interface. // Error is the implementation of the error interface.
@@ -28,10 +27,9 @@ func (e *ParserError) Error() string {
// //
// Warning: Highlight needs to be a subslice of Parser.data, so only slices // Warning: Highlight needs to be a subslice of Parser.data, so only slices
// returned by Parser.Raw are valid candidates. // returned by Parser.Raw are valid candidates.
func NewParserError(highlight []byte, offset int, format string, args ...interface{}) error { func NewParserError(highlight []byte, format string, args ...interface{}) error {
return &ParserError{ return &ParserError{
Highlight: highlight, Highlight: highlight,
Offset: offset,
Message: fmt.Errorf(format, args...).Error(), Message: fmt.Errorf(format, args...).Error(),
} }
} }
@@ -66,8 +64,14 @@ func (p *Parser) Data() []byte {
return p.data return p.data
} }
func (p *Parser) offsetOf(b []byte) int { // Range returns a range description that corresponds to a given slice of the
return len(p.data) - len(b) // input. If the argument is not a subslice of the parser input, this function
// panics.
func (p *Parser) Range(b []byte) Range {
return Range{
Offset: uint32(p.subsliceOffset(b)), //nolint:gosec // TOML documents are small
Length: uint32(len(b)), //nolint:gosec // TOML documents are small
}
} }
// rangeOfToken computes the Range of a token given the remaining bytes after the token. // rangeOfToken computes the Range of a token given the remaining bytes after the token.
@@ -78,6 +82,13 @@ func (p *Parser) rangeOfToken(token, rest []byte) Range {
return Range{Offset: uint32(offset), Length: uint32(len(token))} //nolint:gosec // TOML documents are small return Range{Offset: uint32(offset), Length: uint32(len(token))} //nolint:gosec // TOML documents are small
} }
// subsliceOffset returns the byte offset of subslice b within p.data.
// b must be a suffix (tail) of p.data.
func (p *Parser) subsliceOffset(b []byte) int {
// b is a suffix of p.data, so its offset is len(p.data) - len(b)
return len(p.data) - len(b)
}
// Raw returns the slice corresponding to the bytes in the given range. // Raw returns the slice corresponding to the bytes in the given range.
func (p *Parser) Raw(raw Range) []byte { func (p *Parser) Raw(raw Range) []byte {
return p.data[raw.Offset : raw.Offset+raw.Length] return p.data[raw.Offset : raw.Offset+raw.Length]
@@ -187,16 +198,16 @@ func (p *Parser) parseNewline(b []byte) ([]byte, error) {
} }
if b[0] == '\r' { if b[0] == '\r' {
_, rest, err := scanWindowsNewline(b, p.offsetOf(b)) _, rest, err := scanWindowsNewline(b)
return rest, err return rest, err
} }
return nil, NewParserError(b[0:1], p.offsetOf(b), "expected newline but got %#U", b[0]) return nil, NewParserError(b[0:1], "expected newline but got %#U", b[0])
} }
func (p *Parser) parseComment(b []byte) (reference, []byte, error) { func (p *Parser) parseComment(b []byte) (reference, []byte, error) {
ref := invalidReference ref := invalidReference
data, rest, err := scanComment(b, p.offsetOf(b)) data, rest, err := scanComment(b)
if p.KeepComments && err == nil { if p.KeepComments && err == nil {
ref = p.builder.Push(Node{ ref = p.builder.Push(Node{
Kind: Comment, Kind: Comment,
@@ -280,12 +291,12 @@ func (p *Parser) parseArrayTable(b []byte) (reference, []byte, error) {
p.builder.AttachChild(ref, k) p.builder.AttachChild(ref, k)
b = p.parseWhitespace(b) b = p.parseWhitespace(b)
b, err = expect(']', b, p.offsetOf(b)) b, err = expect(']', b)
if err != nil { if err != nil {
return ref, nil, err return ref, nil, err
} }
b, err = expect(']', b, p.offsetOf(b)) b, err = expect(']', b)
return ref, b, err return ref, b, err
} }
@@ -310,16 +321,13 @@ func (p *Parser) parseStdTable(b []byte) (reference, []byte, error) {
b = p.parseWhitespace(b) b = p.parseWhitespace(b)
b, err = expect(']', b, p.offsetOf(b)) b, err = expect(']', b)
return ref, b, err return ref, b, err
} }
func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) { func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) {
// keyval = key keyval-sep val // keyval = key keyval-sep val
// Track the start position for Raw range
startB := b
ref := p.builder.Push(Node{ ref := p.builder.Push(Node{
Kind: KeyValue, Kind: KeyValue,
}) })
@@ -334,10 +342,10 @@ func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) {
b = p.parseWhitespace(b) b = p.parseWhitespace(b)
if len(b) == 0 { if len(b) == 0 {
return invalidReference, nil, NewParserError(startB[:len(startB)-len(b)], p.offsetOf(startB), "expected = after a key, but the document ends there") return invalidReference, nil, NewParserError(b, "expected = after a key, but the document ends there")
} }
b, err = expect('=', b, p.offsetOf(b)) b, err = expect('=', b)
if err != nil { if err != nil {
return invalidReference, nil, err return invalidReference, nil, err
} }
@@ -352,11 +360,6 @@ func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) {
p.builder.Chain(valRef, key) p.builder.Chain(valRef, key)
p.builder.AttachChild(ref, valRef) p.builder.AttachChild(ref, valRef)
// Set Raw to span the entire key-value expression.
// Access the node directly in the slice to avoid the write barrier
// that NodeAt's nodes-pointer setup would trigger.
p.builder.tree.nodes[ref].Raw = p.rangeOfToken(startB[:len(startB)-len(b)], b)
return ref, b, err return ref, b, err
} }
@@ -366,7 +369,7 @@ func (p *Parser) parseVal(b []byte) (reference, []byte, error) {
ref := invalidReference ref := invalidReference
if len(b) == 0 { if len(b) == 0 {
return ref, nil, NewParserError(b, p.offsetOf(b), "expected value, not eof") return ref, nil, NewParserError(b, "expected value, not eof")
} }
var err error var err error
@@ -411,7 +414,7 @@ func (p *Parser) parseVal(b []byte) (reference, []byte, error) {
return ref, b, err return ref, b, err
case 't': case 't':
if !scanFollowsTrue(b) { if !scanFollowsTrue(b) {
return ref, nil, NewParserError(atmost(b, 4), p.offsetOf(b), "expected 'true'") return ref, nil, NewParserError(atmost(b, 4), "expected 'true'")
} }
ref = p.builder.Push(Node{ ref = p.builder.Push(Node{
@@ -422,7 +425,7 @@ func (p *Parser) parseVal(b []byte) (reference, []byte, error) {
return ref, b[4:], nil return ref, b[4:], nil
case 'f': case 'f':
if !scanFollowsFalse(b) { if !scanFollowsFalse(b) {
return ref, nil, NewParserError(atmost(b, 5), p.offsetOf(b), "expected 'false'") return ref, nil, NewParserError(atmost(b, 5), "expected 'false'")
} }
ref = p.builder.Push(Node{ ref = p.builder.Push(Node{
@@ -449,7 +452,7 @@ func atmost(b []byte, n int) []byte {
} }
func (p *Parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { func (p *Parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) {
v, rest, err := scanLiteralString(b, p.offsetOf(b)) v, rest, err := scanLiteralString(b)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@@ -457,12 +460,14 @@ func (p *Parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) {
return v, v[1 : len(v)-1], rest, nil return v, v[1 : len(v)-1], rest, nil
} }
//nolint:funlen,cyclop,dupl
func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) { func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) {
// inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close // inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close
// inline-table-open = %x7B ws ; { // inline-table-open = %x7B ws ; {
// inline-table-close = ws %x7D ; } // inline-table-close = ws %x7D ; }
// inline-table-sep = ws %x2C ws ; , Comma // inline-table-sep = ws %x2C ws ; , Comma
// inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ]
tableStart := b
parent := p.builder.Push(Node{ parent := p.builder.Push(Node{
Kind: InlineTable, Kind: InlineTable,
Raw: p.rangeOfToken(b[:1], b[1:]), Raw: p.rangeOfToken(b[:1], b[1:]),
@@ -470,55 +475,87 @@ func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) {
first := true first := true
var child reference lastChild := invalidReference
addChild := func(ref reference) {
if lastChild == invalidReference {
p.builder.AttachChild(parent, ref)
} else {
p.builder.Chain(lastChild, ref)
}
lastChild = ref
}
b = b[1:] b = b[1:]
var err error var err error
for len(b) > 0 { for len(b) > 0 {
previousB := b var cref reference
b = p.parseWhitespace(b) cref, b, err = p.parseOptionalWhitespaceCommentNewline(b)
if err != nil {
return parent, nil, err
}
if cref != invalidReference {
addChild(cref)
}
if len(b) == 0 { if len(b) == 0 {
return parent, nil, NewParserError(previousB[:1], p.offsetOf(previousB), "inline table is incomplete") return parent, nil, NewParserError(tableStart[:1], "inline table is incomplete")
} }
if b[0] == '}' { if b[0] == '}' {
break break
} }
if !first { if b[0] == ',' {
b, err = expect(',', b, p.offsetOf(b)) if first {
return parent, nil, NewParserError(b[0:1], "inline table cannot start with comma")
}
b = b[1:]
cref, b, err = p.parseOptionalWhitespaceCommentNewline(b)
if err != nil { if err != nil {
return parent, nil, err return parent, nil, err
} }
b = p.parseWhitespace(b) if cref != invalidReference {
addChild(cref)
}
} else if !first {
return parent, nil, NewParserError(b[0:1], "inline table entries must be separated by commas")
}
// trailing comma: if '}' follows, stop
if len(b) > 0 && b[0] == '}' {
break
} }
var kv reference var kv reference
kv, b, err = p.parseKeyval(b) kv, b, err = p.parseKeyval(b)
if err != nil { if err != nil {
return parent, nil, err return parent, nil, err
} }
if first { addChild(kv)
p.builder.AttachChild(parent, kv)
} else { cref, b, err = p.parseOptionalWhitespaceCommentNewline(b)
p.builder.Chain(child, kv) if err != nil {
return parent, nil, err
}
if cref != invalidReference {
addChild(cref)
} }
child = kv
first = false first = false
} }
rest, err := expect('}', b, p.offsetOf(b)) rest, err := expect('}', b)
return parent, rest, err return parent, rest, err
} }
//nolint:funlen,cyclop //nolint:funlen,cyclop,dupl
func (p *Parser) parseValArray(b []byte) (reference, []byte, error) { func (p *Parser) parseValArray(b []byte) (reference, []byte, error) {
// array = array-open [ array-values ] ws-comment-newline array-close // array = array-open [ array-values ] ws-comment-newline array-close
// array-open = %x5B ; [ // array-open = %x5B ; [
@@ -562,7 +599,7 @@ func (p *Parser) parseValArray(b []byte) (reference, []byte, error) {
} }
if len(b) == 0 { if len(b) == 0 {
return parent, nil, NewParserError(arrayStart[:1], p.offsetOf(arrayStart), "array is incomplete") return parent, nil, NewParserError(arrayStart[:1], "array is incomplete")
} }
if b[0] == ']' { if b[0] == ']' {
@@ -571,7 +608,7 @@ func (p *Parser) parseValArray(b []byte) (reference, []byte, error) {
if b[0] == ',' { if b[0] == ',' {
if first { if first {
return parent, nil, NewParserError(b[0:1], p.offsetOf(b), "array cannot start with comma") return parent, nil, NewParserError(b[0:1], "array cannot start with comma")
} }
b = b[1:] b = b[1:]
@@ -583,7 +620,7 @@ func (p *Parser) parseValArray(b []byte) (reference, []byte, error) {
addChild(cref) addChild(cref)
} }
} else if !first { } else if !first {
return parent, nil, NewParserError(b[0:1], p.offsetOf(b), "array elements must be separated by commas") return parent, nil, NewParserError(b[0:1], "array elements must be separated by commas")
} }
// TOML allows trailing commas in arrays. // TOML allows trailing commas in arrays.
@@ -610,7 +647,7 @@ func (p *Parser) parseValArray(b []byte) (reference, []byte, error) {
first = false first = false
} }
rest, err := expect(']', b, p.offsetOf(b)) rest, err := expect(']', b)
return parent, rest, err return parent, rest, err
} }
@@ -665,7 +702,7 @@ func (p *Parser) parseOptionalWhitespaceCommentNewline(b []byte) (reference, []b
} }
func (p *Parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) { func (p *Parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) {
token, rest, err := scanMultilineLiteralString(b, p.offsetOf(b)) token, rest, err := scanMultilineLiteralString(b)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@@ -694,7 +731,7 @@ func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er
// mlb-quotes = 1*2quotation-mark // mlb-quotes = 1*2quotation-mark
// mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
// mlb-escaped-nl = escape ws newline *( wschar / newline ) // mlb-escaped-nl = escape ws newline *( wschar / newline )
token, escaped, rest, err := scanMultilineBasicString(b, p.offsetOf(b)) token, escaped, rest, err := scanMultilineBasicString(b)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@@ -711,15 +748,14 @@ func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er
// fast path // fast path
startIdx := i startIdx := i
endIdx := len(token) - len(`"""`) endIdx := len(token) - len(`"""`)
tokenBase := p.offsetOf(token)
if !escaped { if !escaped {
str := token[startIdx:endIdx] str := token[startIdx:endIdx]
invalidIdx := characters.Utf8TomlValidAlreadyEscaped(str) highlight := characters.Utf8TomlValidAlreadyEscaped(str)
if invalidIdx < 0 { if len(highlight) == 0 {
return token, str, rest, nil return token, str, rest, nil
} }
return nil, nil, nil, NewParserError(str[invalidIdx:invalidIdx+1], tokenBase+startIdx+invalidIdx, "invalid UTF-8") return nil, nil, nil, NewParserError(highlight, "invalid UTF-8")
} }
var builder bytes.Buffer var builder bytes.Buffer
@@ -783,15 +819,22 @@ func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er
builder.WriteByte('\t') builder.WriteByte('\t')
case 'e': case 'e':
builder.WriteByte(0x1B) builder.WriteByte(0x1B)
case 'x':
x, err := hexToRune(atmost(token[i+1:], 2), 2)
if err != nil {
return nil, nil, nil, err
}
builder.WriteRune(x)
i += 2
case 'u': case 'u':
x, err := hexToRune(atmost(token[i+1:], 4), tokenBase+i+1, 4) x, err := hexToRune(atmost(token[i+1:], 4), 4)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
builder.WriteRune(x) builder.WriteRune(x)
i += 4 i += 4
case 'U': case 'U':
x, err := hexToRune(atmost(token[i+1:], 8), tokenBase+i+1, 8) x, err := hexToRune(atmost(token[i+1:], 8), 8)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@@ -799,13 +842,13 @@ func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er
builder.WriteRune(x) builder.WriteRune(x)
i += 8 i += 8
default: default:
return nil, nil, nil, NewParserError(token[i:i+1], tokenBase+i, "invalid escaped character %#U", c) return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c)
} }
i++ i++
} else { } else {
size := characters.Utf8ValidNext(token[i:]) size := characters.Utf8ValidNext(token[i:])
if size == 0 { if size == 0 {
return nil, nil, nil, NewParserError(token[i:i+1], tokenBase+i, "invalid character %#U", c) return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c)
} }
builder.Write(token[i : i+size]) builder.Write(token[i : i+size])
i += size i += size
@@ -860,9 +903,12 @@ func (p *Parser) parseKey(b []byte) (reference, []byte, error) {
func (p *Parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { func (p *Parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) {
if len(b) == 0 { if len(b) == 0 {
return nil, nil, nil, NewParserError(b, p.offsetOf(b), "expected key but found none") return nil, nil, nil, NewParserError(b, "expected key but found none")
} }
// simple-key = quoted-key / unquoted-key
// unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _
// quoted-key = basic-string / literal-string
switch { switch {
case b[0] == '\'': case b[0] == '\'':
return p.parseLiteralString(b) return p.parseLiteralString(b)
@@ -872,7 +918,7 @@ func (p *Parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) {
key, rest = scanUnquotedKey(b) key, rest = scanUnquotedKey(b)
return key, key, rest, nil return key, key, rest, nil
default: default:
return nil, nil, nil, NewParserError(b[0:1], p.offsetOf(b), "invalid character at start of key: %c", b[0]) return nil, nil, nil, NewParserError(b[0:1], "invalid character at start of key: %c", b[0])
} }
} }
@@ -892,7 +938,7 @@ func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
// escape-seq-char =/ %x74 ; t tab U+0009 // escape-seq-char =/ %x74 ; t tab U+0009
// escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX // escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX
// escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX // escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX
token, escaped, rest, err := scanBasicString(b, p.offsetOf(b)) token, escaped, rest, err := scanBasicString(b)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@@ -903,15 +949,13 @@ func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
// Fast path. If there is no escape sequence, the string should just be // Fast path. If there is no escape sequence, the string should just be
// an UTF-8 encoded string, which is the same as Go. In that case, // an UTF-8 encoded string, which is the same as Go. In that case,
// validate the string and return a direct reference to the buffer. // validate the string and return a direct reference to the buffer.
tokenBase := p.offsetOf(token)
if !escaped { if !escaped {
str := token[startIdx:endIdx] str := token[startIdx:endIdx]
invalidIdx := characters.Utf8TomlValidAlreadyEscaped(str) highlight := characters.Utf8TomlValidAlreadyEscaped(str)
if invalidIdx < 0 { if len(highlight) == 0 {
return token, str, rest, nil return token, str, rest, nil
} }
return nil, nil, nil, NewParserError(str[invalidIdx:invalidIdx+1], tokenBase+startIdx+invalidIdx, "invalid UTF-8") return nil, nil, nil, NewParserError(highlight, "invalid UTF-8")
} }
i := startIdx i := startIdx
@@ -941,8 +985,15 @@ func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
builder.WriteByte('\t') builder.WriteByte('\t')
case 'e': case 'e':
builder.WriteByte(0x1B) builder.WriteByte(0x1B)
case 'x':
x, err := hexToRune(token[i+1:len(token)-1], 2)
if err != nil {
return nil, nil, nil, err
}
builder.WriteRune(x)
i += 2
case 'u': case 'u':
x, err := hexToRune(token[i+1:len(token)-1], tokenBase+i+1, 4) x, err := hexToRune(token[i+1:len(token)-1], 4)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@@ -950,7 +1001,7 @@ func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
builder.WriteRune(x) builder.WriteRune(x)
i += 4 i += 4
case 'U': case 'U':
x, err := hexToRune(token[i+1:len(token)-1], tokenBase+i+1, 8) x, err := hexToRune(token[i+1:len(token)-1], 8)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@@ -958,13 +1009,13 @@ func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
builder.WriteRune(x) builder.WriteRune(x)
i += 8 i += 8
default: default:
return nil, nil, nil, NewParserError(token[i:i+1], tokenBase+i, "invalid escaped character %#U", c) return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c)
} }
i++ i++
} else { } else {
size := characters.Utf8ValidNext(token[i:]) size := characters.Utf8ValidNext(token[i:])
if size == 0 { if size == 0 {
return nil, nil, nil, NewParserError(token[i:i+1], tokenBase+i, "invalid character %#U", c) return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c)
} }
builder.Write(token[i : i+size]) builder.Write(token[i : i+size])
i += size i += size
@@ -974,9 +1025,9 @@ func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
return token, builder.Bytes(), rest, nil return token, builder.Bytes(), rest, nil
} }
func hexToRune(b []byte, base int, length int) (rune, error) { func hexToRune(b []byte, length int) (rune, error) {
if len(b) < length { if len(b) < length {
return -1, NewParserError(b, base, "unicode point needs %d character, not %d", length, len(b)) return -1, NewParserError(b, "unicode point needs %d character, not %d", length, len(b))
} }
b = b[:length] b = b[:length]
@@ -991,13 +1042,13 @@ func hexToRune(b []byte, base int, length int) (rune, error) {
case 'A' <= c && c <= 'F': case 'A' <= c && c <= 'F':
d = uint32(c - 'A' + 10) d = uint32(c - 'A' + 10)
default: default:
return -1, NewParserError(b[i:i+1], base+i, "non-hex character") return -1, NewParserError(b[i:i+1], "non-hex character")
} }
r = r*16 + d r = r*16 + d
} }
if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 { if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 {
return -1, NewParserError(b, base, "escape sequence is invalid Unicode code point") return -1, NewParserError(b, "escape sequence is invalid Unicode code point")
} }
return rune(r), nil return rune(r), nil
@@ -1017,7 +1068,7 @@ func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error)
switch b[0] { switch b[0] {
case 'i': case 'i':
if !scanFollowsInf(b) { if !scanFollowsInf(b) {
return invalidReference, nil, NewParserError(atmost(b, 3), p.offsetOf(b), "expected 'inf'") return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'inf'")
} }
return p.builder.Push(Node{ return p.builder.Push(Node{
@@ -1027,7 +1078,7 @@ func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error)
}), b[3:], nil }), b[3:], nil
case 'n': case 'n':
if !scanFollowsNan(b) { if !scanFollowsNan(b) {
return invalidReference, nil, NewParserError(atmost(b, 3), p.offsetOf(b), "expected 'nan'") return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'nan'")
} }
return p.builder.Push(Node{ return p.builder.Push(Node{
@@ -1186,7 +1237,7 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) {
}), b[i+3:], nil }), b[i+3:], nil
} }
return invalidReference, nil, NewParserError(b[i:i+1], p.offsetOf(b)+i, "unexpected character 'i' while scanning for a number") return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'i' while scanning for a number")
} }
if c == 'n' { if c == 'n' {
@@ -1198,14 +1249,14 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) {
}), b[i+3:], nil }), b[i+3:], nil
} }
return invalidReference, nil, NewParserError(b[i:i+1], p.offsetOf(b)+i, "unexpected character 'n' while scanning for a number") return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'n' while scanning for a number")
} }
break break
} }
if i == 0 { if i == 0 {
return invalidReference, b, NewParserError(b, p.offsetOf(b), "incomplete number") return invalidReference, b, NewParserError(b, "incomplete number")
} }
kind := Integer kind := Integer
@@ -1242,13 +1293,13 @@ func isValidBinaryRune(r byte) bool {
return r == '0' || r == '1' || r == '_' return r == '0' || r == '1' || r == '_'
} }
func expect(x byte, b []byte, base int) ([]byte, error) { func expect(x byte, b []byte) ([]byte, error) {
if len(b) == 0 { if len(b) == 0 {
return nil, NewParserError(b, base, "expected character %c but the document ended here", x) return nil, NewParserError(b, "expected character %c but the document ended here", x)
} }
if b[0] != x { if b[0] != x {
return nil, NewParserError(b[0:1], base, "expected character %c", x) return nil, NewParserError(b[0:1], "expected character %c", x)
} }
return b[1:], nil return b[1:], nil
+192 -97
View File
@@ -1,7 +1,6 @@
package unstable package unstable
import ( import (
"errors"
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
@@ -332,6 +331,154 @@ func TestParser_AST(t *testing.T) {
}, },
}, },
}, },
{
desc: "multiline inline table",
input: "name = {\n first = \"Tom\",\n last = \"Preston-Werner\"\n}",
ast: astNode{
Kind: KeyValue,
Children: []astNode{
{
Kind: InlineTable,
Children: []astNode{
{
Kind: KeyValue,
Children: []astNode{
{Kind: String, Data: []byte(`Tom`)},
{Kind: Key, Data: []byte(`first`)},
},
},
{
Kind: KeyValue,
Children: []astNode{
{Kind: String, Data: []byte(`Preston-Werner`)},
{Kind: Key, Data: []byte(`last`)},
},
},
},
},
{
Kind: Key,
Data: []byte(`name`),
},
},
},
},
{
desc: "inline table with trailing comma",
input: `name = { first = "Tom", last = "Preston-Werner", }`,
ast: astNode{
Kind: KeyValue,
Children: []astNode{
{
Kind: InlineTable,
Children: []astNode{
{
Kind: KeyValue,
Children: []astNode{
{Kind: String, Data: []byte(`Tom`)},
{Kind: Key, Data: []byte(`first`)},
},
},
{
Kind: KeyValue,
Children: []astNode{
{Kind: String, Data: []byte(`Preston-Werner`)},
{Kind: Key, Data: []byte(`last`)},
},
},
},
},
{
Kind: Key,
Data: []byte(`name`),
},
},
},
},
{
desc: "empty inline table with newline",
input: "name = {\n}",
ast: astNode{
Kind: KeyValue,
Children: []astNode{
{
Kind: InlineTable,
Children: nil,
},
{
Kind: Key,
Data: []byte(`name`),
},
},
},
},
{
desc: "inline table with leading comma",
input: "name = { first = \"Tom\"\n, last = \"Werner\" }",
ast: astNode{
Kind: KeyValue,
Children: []astNode{
{
Kind: InlineTable,
Children: []astNode{
{
Kind: KeyValue,
Children: []astNode{
{Kind: String, Data: []byte(`Tom`)},
{Kind: Key, Data: []byte(`first`)},
},
},
{
Kind: KeyValue,
Children: []astNode{
{Kind: String, Data: []byte(`Werner`)},
{Kind: Key, Data: []byte(`last`)},
},
},
},
},
{
Kind: Key,
Data: []byte(`name`),
},
},
},
},
{
desc: "inline table with leading trailing comma",
input: "name = { first = \"Tom\"\n, }",
ast: astNode{
Kind: KeyValue,
Children: []astNode{
{
Kind: InlineTable,
Children: []astNode{
{
Kind: KeyValue,
Children: []astNode{
{Kind: String, Data: []byte(`Tom`)},
{Kind: Key, Data: []byte(`first`)},
},
},
},
},
{
Kind: Key,
Data: []byte(`name`),
},
},
},
},
{
desc: "inline table comma at start is error",
input: "name = { , first = \"Tom\" }",
err: true,
},
{
desc: "inline table double comma across newline is error",
input: "name = { first = \"Tom\",\n, last = \"Werner\" }",
err: true,
},
} }
for _, e := range examples { for _, e := range examples {
@@ -351,6 +498,44 @@ func TestParser_AST(t *testing.T) {
} }
} }
func TestParseInlineTable_CommentsWithKeepComments(t *testing.T) {
// Exercise comment reference handling inside parseInlineTable when
// KeepComments is true. This covers the addChild(cref) branches
// at the start of the loop, after comma, and after keyval.
examples := []struct {
desc string
input string
}{
{
desc: "comment at start of inline table",
input: "a = {\n# comment\nb = 1\n}",
},
{
desc: "comment after comma",
input: "a = {b = 1,\n# comment\nc = 2\n}",
},
{
desc: "comment after keyval",
input: "a = {b = 1 # comment\n, c = 2}",
},
{
desc: "comment only in inline table",
input: "a = {\n# just a comment\n}",
},
}
for _, e := range examples {
e := e
t.Run(e.desc, func(t *testing.T) {
p := Parser{KeepComments: true}
p.Reset([]byte(e.input))
p.NextExpression()
err := p.Error()
assert.NoError(t, err)
})
}
}
func BenchmarkParseBasicStringWithUnicode(b *testing.B) { func BenchmarkParseBasicStringWithUnicode(b *testing.B) {
p := &Parser{} p := &Parser{}
b.Run("4", func(b *testing.B) { b.Run("4", func(b *testing.B) {
@@ -540,7 +725,7 @@ key5 = [ # Next to start of inline array.
// --- // ---
// 6:1->6:22 (105->126) | Comment [# Above simple value.] // 6:1->6:22 (105->126) | Comment [# Above simple value.]
// --- // ---
// 7:1->7:14 (127->140) | KeyValue [] // 1:1->1:1 (0->0) | KeyValue []
// 7:7->7:14 (133->140) | String [value] // 7:7->7:14 (133->140) | String [value]
// 7:1->7:4 (127->130) | Key [key] // 7:1->7:4 (127->130) | Key [key]
// 7:15->7:38 (141->164) | Comment [# Next to simple value.] // 7:15->7:38 (141->164) | Comment [# Next to simple value.]
@@ -553,12 +738,12 @@ key5 = [ # Next to start of inline array.
// --- // ---
// 14:1->14:22 (252->273) | Comment [# Above inline table.] // 14:1->14:22 (252->273) | Comment [# Above inline table.]
// --- // ---
// 15:1->15:50 (274->323) | KeyValue [] // 1:1->1:1 (0->0) | KeyValue []
// 15:8->15:9 (281->282) | InlineTable [] // 15:8->15:9 (281->282) | InlineTable []
// 15:10->15:23 (283->296) | KeyValue [] // 1:1->1:1 (0->0) | KeyValue []
// 15:18->15:23 (291->296) | String [Tom] // 15:18->15:23 (291->296) | String [Tom]
// 15:10->15:15 (283->288) | Key [first] // 15:10->15:15 (283->288) | Key [first]
// 15:25->15:48 (298->321) | KeyValue [] // 1:1->1:1 (0->0) | KeyValue []
// 15:32->15:48 (305->321) | String [Preston-Werner] // 15:32->15:48 (305->321) | String [Preston-Werner]
// 15:25->15:29 (298->302) | Key [last] // 15:25->15:29 (298->302) | Key [last]
// 15:1->15:5 (274->278) | Key [name] // 15:1->15:5 (274->278) | Key [name]
@@ -568,7 +753,7 @@ key5 = [ # Next to start of inline array.
// --- // ---
// 18:1->18:15 (371->385) | Comment [# Above array.] // 18:1->18:15 (371->385) | Comment [# Above array.]
// --- // ---
// 19:1->19:20 (386->405) | KeyValue [] // 1:1->1:1 (0->0) | KeyValue []
// 1:1->1:1 (0->0) | Array [] // 1:1->1:1 (0->0) | Array []
// 19:11->19:12 (396->397) | Integer [1] // 19:11->19:12 (396->397) | Integer [1]
// 19:14->19:15 (399->400) | Integer [2] // 19:14->19:15 (399->400) | Integer [2]
@@ -580,7 +765,7 @@ key5 = [ # Next to start of inline array.
// --- // ---
// 22:1->22:26 (448->473) | Comment [# Above multi-line array.] // 22:1->22:26 (448->473) | Comment [# Above multi-line array.]
// --- // ---
// 23:1->31:2 (474->694) | KeyValue [] // 1:1->1:1 (0->0) | KeyValue []
// 1:1->1:1 (0->0) | Array [] // 1:1->1:1 (0->0) | Array []
// 23:10->23:42 (483->515) | Comment [# Next to start of inline array.] // 23:10->23:42 (483->515) | Comment [# Next to start of inline array.]
// 24:3->24:38 (518->553) | Comment [# Second line before array content.] // 24:3->24:38 (518->553) | Comment [# Second line before array content.]
@@ -674,96 +859,6 @@ key3 = "value3"
assert.Equal(t, []string{"key1", "key2", "key3"}, keys) assert.Equal(t, []string{"key1", "key2", "key3"}, keys)
} }
func TestErrorOffsetAfterComment(t *testing.T) {
input := []byte("# comment\n= \"value\"")
p := Parser{}
p.Reset(input)
for p.NextExpression() {
}
err := p.Error()
if err == nil {
t.Fatal("expected an error")
}
var perr *ParserError
if !errors.As(err, &perr) {
t.Fatalf("expected ParserError, got %T", err)
}
if perr.Offset != 10 {
t.Errorf("offset: got %d, want 10", perr.Offset)
}
shape := p.Shape(Range{Offset: uint32(perr.Offset), Length: uint32(len(perr.Highlight))})
if shape.Start.Line != 2 || shape.Start.Column != 1 {
t.Errorf("position: got %d:%d, want 2:1", shape.Start.Line, shape.Start.Column)
}
}
func TestErrorHighlightPositions(t *testing.T) {
examples := []struct {
desc string
input string
wantLine int
wantColumn int
}{
{
desc: "invalid key start after comment",
input: "# comment\n= \"value\"",
wantLine: 2,
wantColumn: 1,
},
{
desc: "invalid key start on first line",
input: "= \"value\"",
wantLine: 1,
wantColumn: 1,
},
{
desc: "invalid key after multiple comments",
input: "# comment 1\n# comment 2\n= \"value\"",
wantLine: 3,
wantColumn: 1,
},
{
desc: "invalid key after valid key-value",
input: "a = 1\n= \"value\"",
wantLine: 2,
wantColumn: 1,
},
{
desc: "invalid key after whitespace on line",
input: "a = 1\n = \"value\"",
wantLine: 2,
wantColumn: 3,
},
}
for _, e := range examples {
t.Run(e.desc, func(t *testing.T) {
p := Parser{}
p.Reset([]byte(e.input))
for p.NextExpression() {
}
err := p.Error()
if err == nil {
t.Fatal("expected an error")
}
var perr *ParserError
if !errors.As(err, &perr) {
t.Fatalf("expected ParserError, got %T", err)
}
shape := p.Shape(Range{Offset: uint32(perr.Offset), Length: uint32(len(perr.Highlight))})
if shape.Start.Line != e.wantLine {
t.Errorf("line: got %d, want %d", shape.Start.Line, e.wantLine)
}
if shape.Start.Column != e.wantColumn {
t.Errorf("column: got %d, want %d", shape.Start.Column, e.wantColumn)
}
})
}
}
func ExampleParser() { func ExampleParser() {
doc := ` doc := `
hello = "world" hello = "world"
+72 -27
View File
@@ -47,31 +47,48 @@ func isUnquotedKeyChar(r byte) bool {
return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' || r == '_' return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' || r == '_'
} }
func scanLiteralString(b []byte, base int) ([]byte, []byte, error) { func scanLiteralString(b []byte) ([]byte, []byte, error) {
// literal-string = apostrophe *literal-char apostrophe
// apostrophe = %x27 ; ' apostrophe
// literal-char = %x09 / %x20-26 / %x28-7E / non-ascii
for i := 1; i < len(b); { for i := 1; i < len(b); {
switch b[i] { switch b[i] {
case '\'': case '\'':
return b[:i+1], b[i+1:], nil return b[:i+1], b[i+1:], nil
case '\n', '\r': case '\n', '\r':
return nil, nil, NewParserError(b[i:i+1], base+i, "literal strings cannot have new lines") return nil, nil, NewParserError(b[i:i+1], "literal strings cannot have new lines")
} }
size := characters.Utf8ValidNext(b[i:]) size := characters.Utf8ValidNext(b[i:])
if size == 0 { if size == 0 {
return nil, nil, NewParserError(b[i:i+1], base+i, "invalid character") return nil, nil, NewParserError(b[i:i+1], "invalid character")
} }
i += size i += size
} }
return nil, nil, NewParserError(b[len(b):], base+len(b), "unterminated literal string") return nil, nil, NewParserError(b[len(b):], "unterminated literal string")
} }
func scanMultilineLiteralString(b []byte, base int) ([]byte, []byte, error) { func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) {
// ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body
// ml-literal-string-delim
// ml-literal-string-delim = 3apostrophe
// ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ]
//
// mll-content = mll-char / newline
// mll-char = %x09 / %x20-26 / %x28-7E / non-ascii
// mll-quotes = 1*2apostrophe
for i := 3; i < len(b); { for i := 3; i < len(b); {
switch b[i] { switch b[i] {
case '\'': case '\'':
if scanFollowsMultilineLiteralStringDelimiter(b[i:]) { if scanFollowsMultilineLiteralStringDelimiter(b[i:]) {
i += 3 i += 3
// At that point we found 3 apostrophe, and i is the
// index of the byte after the third one. The scanner
// needs to be eager, because there can be an extra 2
// apostrophe that can be accepted at the end of the
// string.
if i >= len(b) || b[i] != '\'' { if i >= len(b) || b[i] != '\'' {
return b[:i], b[i:], nil return b[:i], b[i:], nil
} }
@@ -83,39 +100,39 @@ func scanMultilineLiteralString(b []byte, base int) ([]byte, []byte, error) {
i++ i++
if i < len(b) && b[i] == '\'' { if i < len(b) && b[i] == '\'' {
return nil, nil, NewParserError(b[i-3:i+1], base+i-3, "''' not allowed in multiline literal string") return nil, nil, NewParserError(b[i-3:i+1], "''' not allowed in multiline literal string")
} }
return b[:i], b[i:], nil return b[:i], b[i:], nil
} }
case '\r': case '\r':
if len(b) < i+2 { if len(b) < i+2 {
return nil, nil, NewParserError(b[len(b):], base+len(b), `need a \n after \r`) return nil, nil, NewParserError(b[len(b):], `need a \n after \r`)
} }
if b[i+1] != '\n' { if b[i+1] != '\n' {
return nil, nil, NewParserError(b[i:i+2], base+i, `need a \n after \r`) return nil, nil, NewParserError(b[i:i+2], `need a \n after \r`)
} }
i += 2 i += 2 // skip the \n
continue continue
} }
size := characters.Utf8ValidNext(b[i:]) size := characters.Utf8ValidNext(b[i:])
if size == 0 { if size == 0 {
return nil, nil, NewParserError(b[i:i+1], base+i, "invalid character") return nil, nil, NewParserError(b[i:i+1], "invalid character")
} }
i += size i += size
} }
return nil, nil, NewParserError(b[len(b):], base+len(b), `multiline literal string not terminated by '''`) return nil, nil, NewParserError(b[len(b):], `multiline literal string not terminated by '''`)
} }
func scanWindowsNewline(b []byte, base int) ([]byte, []byte, error) { func scanWindowsNewline(b []byte) ([]byte, []byte, error) {
const lenCRLF = 2 const lenCRLF = 2
if len(b) < lenCRLF { if len(b) < lenCRLF {
return nil, nil, NewParserError(b, base, "windows new line expected") return nil, nil, NewParserError(b, "windows new line expected")
} }
if b[1] != '\n' { if b[1] != '\n' {
return nil, nil, NewParserError(b, base, `windows new line should be \r\n`) return nil, nil, NewParserError(b, `windows new line should be \r\n`)
} }
return b[:lenCRLF], b[lenCRLF:], nil return b[:lenCRLF], b[lenCRLF:], nil
@@ -134,7 +151,13 @@ func scanWhitespace(b []byte) ([]byte, []byte) {
return b, b[len(b):] return b, b[len(b):]
} }
func scanComment(b []byte, base int) ([]byte, []byte, error) { func scanComment(b []byte) ([]byte, []byte, error) {
// comment-start-symbol = %x23 ; #
// non-ascii = %x80-D7FF / %xE000-10FFFF
// non-eol = %x09 / %x20-7F / non-ascii
//
// comment = comment-start-symbol *non-eol
for i := 1; i < len(b); { for i := 1; i < len(b); {
if b[i] == '\n' { if b[i] == '\n' {
return b[:i], b[i:], nil return b[:i], b[i:], nil
@@ -143,11 +166,11 @@ func scanComment(b []byte, base int) ([]byte, []byte, error) {
if i+1 < len(b) && b[i+1] == '\n' { if i+1 < len(b) && b[i+1] == '\n' {
return b[:i+1], b[i+1:], nil return b[:i+1], b[i+1:], nil
} }
return nil, nil, NewParserError(b[i:i+1], base+i, "invalid character in comment") return nil, nil, NewParserError(b[i:i+1], "invalid character in comment")
} }
size := characters.Utf8ValidNext(b[i:]) size := characters.Utf8ValidNext(b[i:])
if size == 0 { if size == 0 {
return nil, nil, NewParserError(b[i:i+1], base+i, "invalid character in comment") return nil, nil, NewParserError(b[i:i+1], "invalid character in comment")
} }
i += size i += size
@@ -156,7 +179,12 @@ func scanComment(b []byte, base int) ([]byte, []byte, error) {
return b, b[len(b):], nil return b, b[len(b):], nil
} }
func scanBasicString(b []byte, base int) ([]byte, bool, []byte, error) { func scanBasicString(b []byte) ([]byte, bool, []byte, error) {
// basic-string = quotation-mark *basic-char quotation-mark
// quotation-mark = %x22 ; "
// basic-char = basic-unescaped / escaped
// basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
// escaped = escape escape-seq-char
escaped := false escaped := false
i := 1 i := 1
@@ -165,20 +193,31 @@ func scanBasicString(b []byte, base int) ([]byte, bool, []byte, error) {
case '"': case '"':
return b[:i+1], escaped, b[i+1:], nil return b[:i+1], escaped, b[i+1:], nil
case '\n', '\r': case '\n', '\r':
return nil, escaped, nil, NewParserError(b[i:i+1], base+i, "basic strings cannot have new lines") return nil, escaped, nil, NewParserError(b[i:i+1], "basic strings cannot have new lines")
case '\\': case '\\':
if len(b) < i+2 { if len(b) < i+2 {
return nil, escaped, nil, NewParserError(b[i:i+1], base+i, "need a character after \\") return nil, escaped, nil, NewParserError(b[i:i+1], "need a character after \\")
} }
escaped = true escaped = true
i++ // skip the next character i++ // skip the next character
} }
} }
return nil, escaped, nil, NewParserError(b[len(b):], base+len(b), `basic string not terminated by "`) return nil, escaped, nil, NewParserError(b[len(b):], `basic string not terminated by "`)
} }
func scanMultilineBasicString(b []byte, base int) ([]byte, bool, []byte, error) { func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) {
// ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body
// ml-basic-string-delim
// ml-basic-string-delim = 3quotation-mark
// ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ]
//
// mlb-content = mlb-char / newline / mlb-escaped-nl
// mlb-char = mlb-unescaped / escaped
// mlb-quotes = 1*2quotation-mark
// mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
// mlb-escaped-nl = escape ws newline *( wschar / newline )
escaped := false escaped := false
i := 3 i := 3
@@ -188,6 +227,12 @@ func scanMultilineBasicString(b []byte, base int) ([]byte, bool, []byte, error)
if scanFollowsMultilineBasicStringDelimiter(b[i:]) { if scanFollowsMultilineBasicStringDelimiter(b[i:]) {
i += 3 i += 3
// At that point we found 3 apostrophe, and i is the
// index of the byte after the third one. The scanner
// needs to be eager, because there can be an extra 2
// apostrophe that can be accepted at the end of the
// string.
if i >= len(b) || b[i] != '"' { if i >= len(b) || b[i] != '"' {
return b[:i], escaped, b[i:], nil return b[:i], escaped, b[i:], nil
} }
@@ -199,27 +244,27 @@ func scanMultilineBasicString(b []byte, base int) ([]byte, bool, []byte, error)
i++ i++
if i < len(b) && b[i] == '"' { if i < len(b) && b[i] == '"' {
return nil, escaped, nil, NewParserError(b[i-3:i+1], base+i-3, `""" not allowed in multiline basic string`) return nil, escaped, nil, NewParserError(b[i-3:i+1], `""" not allowed in multiline basic string`)
} }
return b[:i], escaped, b[i:], nil return b[:i], escaped, b[i:], nil
} }
case '\\': case '\\':
if len(b) < i+2 { if len(b) < i+2 {
return nil, escaped, nil, NewParserError(b[len(b):], base+len(b), "need a character after \\") return nil, escaped, nil, NewParserError(b[len(b):], "need a character after \\")
} }
escaped = true escaped = true
i++ // skip the next character i++ // skip the next character
case '\r': case '\r':
if len(b) < i+2 { if len(b) < i+2 {
return nil, escaped, nil, NewParserError(b[len(b):], base+len(b), `need a \n after \r`) return nil, escaped, nil, NewParserError(b[len(b):], `need a \n after \r`)
} }
if b[i+1] != '\n' { if b[i+1] != '\n' {
return nil, escaped, nil, NewParserError(b[i:i+2], base+i, `need a \n after \r`) return nil, escaped, nil, NewParserError(b[i:i+2], `need a \n after \r`)
} }
i++ // skip the \n i++ // skip the \n
} }
} }
return nil, escaped, nil, NewParserError(b[len(b):], base+len(b), `multiline basic string not terminated by """`) return nil, escaped, nil, NewParserError(b[len(b):], `multiline basic string not terminated by """`)
} }
+3 -28
View File
@@ -1,32 +1,7 @@
package unstable package unstable
// Unmarshaler is implemented by types that can unmarshal a TOML // The Unmarshaler interface may be implemented by types to customize their
// description of themselves. The input is a valid TOML document // behavior when being unmarshaled from a TOML document.
// containing the relevant portion of the parsed document.
//
// For tables (including split tables defined in multiple places),
// the data contains the raw key-value bytes from the original document
// with adjusted table headers to be relative to the unmarshaling target.
type Unmarshaler interface { type Unmarshaler interface {
UnmarshalTOML(data []byte) error UnmarshalTOML(value *Node) error
}
// RawMessage is a raw encoded TOML value. It implements Unmarshaler
// and can be used to delay TOML decoding or capture raw content.
//
// Example usage:
//
// type Config struct {
// Plugin RawMessage `toml:"plugin"`
// }
//
// var cfg Config
// toml.NewDecoder(r).EnableUnmarshalerInterface().Decode(&cfg)
// // cfg.Plugin now contains the raw TOML bytes for [plugin]
type RawMessage []byte
// UnmarshalTOML implements Unmarshaler.
func (m *RawMessage) UnmarshalTOML(data []byte) error {
*m = append((*m)[0:0], data...)
return nil
} }