Compare commits

..

6 Commits

Author SHA1 Message Date
Thomas Pelletier dc72d75f3e Keep separate fn for []interface{} unmarshal 2021-11-13 19:20:20 -05:00
Thomas Pelletier f77775b59e Use less reflection when making slices
```
name                               old time/op    new time/op    delta
UnmarshalDataset/config-2            24.9ms ± 0%    24.6ms ± 0%  -1.09%  (p=0.029 n=4+4)
UnmarshalDataset/canada-2            61.7ms ± 1%    62.1ms ± 3%    ~     (p=1.000 n=5+5)
UnmarshalDataset/citm_catalog-2      24.7ms ± 1%    24.2ms ± 0%  -2.30%  (p=0.008 n=5+5)
UnmarshalDataset/twitter-2           10.9ms ± 2%    10.7ms ± 1%  -1.46%  (p=0.008 n=5+5)
UnmarshalDataset/code-2               108ms ± 0%     106ms ± 0%  -1.91%  (p=0.008 n=5+5)
UnmarshalDataset/example-2            176µs ± 0%     173µs ± 0%  -1.83%  (p=0.008 n=5+5)
Unmarshal/SimpleDocument/struct-2     586ns ± 1%     587ns ± 0%    ~     (p=0.690 n=5+5)
Unmarshal/SimpleDocument/map-2        876ns ± 0%     872ns ± 0%    ~     (p=0.095 n=5+5)
Unmarshal/ReferenceFile/struct-2     49.5µs ± 0%    49.5µs ± 0%    ~     (p=0.222 n=5+5)
Unmarshal/ReferenceFile/map-2        79.6µs ± 0%    79.1µs ± 0%  -0.62%  (p=0.008 n=5+5)
Unmarshal/HugoFrontMatter-2          13.7µs ± 0%    13.5µs ± 0%  -0.91%  (p=0.008 n=5+5)

name                               old speed      new speed      delta
UnmarshalDataset/config-2          42.2MB/s ± 0%  42.7MB/s ± 0%  +1.10%  (p=0.029 n=4+4)
UnmarshalDataset/canada-2          35.7MB/s ± 1%  35.5MB/s ± 3%    ~     (p=1.000 n=5+5)
UnmarshalDataset/citm_catalog-2    22.6MB/s ± 1%  23.1MB/s ± 0%  +2.36%  (p=0.008 n=5+5)
UnmarshalDataset/twitter-2         40.6MB/s ± 2%  41.2MB/s ± 1%  +1.47%  (p=0.008 n=5+5)
UnmarshalDataset/code-2            24.9MB/s ± 0%  25.4MB/s ± 0%  +1.95%  (p=0.008 n=5+5)
UnmarshalDataset/example-2         46.0MB/s ± 0%  46.9MB/s ± 0%  +1.86%  (p=0.008 n=5+5)
Unmarshal/SimpleDocument/struct-2  18.8MB/s ± 1%  18.7MB/s ± 0%    ~     (p=0.651 n=5+5)
Unmarshal/SimpleDocument/map-2     12.6MB/s ± 0%  12.6MB/s ± 0%    ~     (p=0.087 n=5+5)
Unmarshal/ReferenceFile/struct-2    106MB/s ± 0%   106MB/s ± 0%    ~     (p=0.222 n=5+5)
Unmarshal/ReferenceFile/map-2      65.8MB/s ± 0%  66.2MB/s ± 0%  +0.63%  (p=0.008 n=5+5)
Unmarshal/HugoFrontMatter-2        40.0MB/s ± 0%  40.3MB/s ± 0%  +0.92%  (p=0.008 n=5+5)

name                               old alloc/op   new alloc/op   delta
UnmarshalDataset/config-2            5.85MB ± 0%    5.85MB ± 0%    ~     (p=1.000 n=5+5)
UnmarshalDataset/canada-2            75.2MB ± 0%    75.2MB ± 0%    ~     (p=1.000 n=5+5)
UnmarshalDataset/citm_catalog-2      35.0MB ± 0%    35.0MB ± 0%    ~     (p=0.841 n=5+5)
UnmarshalDataset/twitter-2           13.5MB ± 0%    13.5MB ± 0%    ~     (p=0.548 n=5+5)
UnmarshalDataset/code-2              22.0MB ± 0%    22.0MB ± 0%    ~     (p=0.738 n=5+5)
UnmarshalDataset/example-2            203kB ± 0%     203kB ± 0%    ~     (p=0.714 n=5+5)
Unmarshal/SimpleDocument/struct-2      709B ± 0%      709B ± 0%    ~     (all equal)
Unmarshal/SimpleDocument/map-2       1.08kB ± 0%    1.08kB ± 0%    ~     (all equal)
Unmarshal/ReferenceFile/struct-2     19.7kB ± 0%    19.7kB ± 0%    ~     (all equal)
Unmarshal/ReferenceFile/map-2        37.0kB ± 0%    37.0kB ± 0%    ~     (p=0.333 n=4+5)
Unmarshal/HugoFrontMatter-2          7.22kB ± 0%    7.22kB ± 0%    ~     (all equal)

name                               old allocs/op  new allocs/op  delta
UnmarshalDataset/config-2              230k ± 0%      230k ± 0%    ~     (p=0.556 n=4+5)
UnmarshalDataset/canada-2              391k ± 0%      391k ± 0%    ~     (all equal)
UnmarshalDataset/citm_catalog-2        158k ± 0%      158k ± 0%    ~     (p=1.000 n=4+5)
UnmarshalDataset/twitter-2            54.7k ± 0%     54.7k ± 0%    ~     (p=1.000 n=4+5)
UnmarshalDataset/code-2               1.05M ± 0%     1.05M ± 0%    ~     (all equal)
UnmarshalDataset/example-2            1.28k ± 0%     1.28k ± 0%    ~     (all equal)
Unmarshal/SimpleDocument/struct-2      8.00 ± 0%      8.00 ± 0%    ~     (all equal)
Unmarshal/SimpleDocument/map-2         13.0 ± 0%      13.0 ± 0%    ~     (all equal)
Unmarshal/ReferenceFile/struct-2        123 ± 0%       123 ± 0%    ~     (all equal)
Unmarshal/ReferenceFile/map-2           590 ± 0%       590 ± 0%    ~     (all equal)
Unmarshal/HugoFrontMatter-2             130 ± 0%       130 ± 0%    ~     (all equal)
```
2021-11-13 19:20:20 -05:00
Thomas Pelletier b52f6c9823 Remove some allocs for slices in interfaces
```
name                               old time/op    new time/op    delta
UnmarshalDataset/config-2            24.9ms ± 1%    24.9ms ± 0%     ~     (p=0.413 n=5+4)
UnmarshalDataset/canada-2            66.1ms ± 0%    61.7ms ± 1%   -6.63%  (p=0.008 n=5+5)
UnmarshalDataset/citm_catalog-2      25.3ms ± 5%    24.7ms ± 1%   -2.09%  (p=0.032 n=5+5)
UnmarshalDataset/twitter-2           10.9ms ± 2%    10.9ms ± 2%     ~     (p=1.000 n=5+5)
UnmarshalDataset/code-2               108ms ± 0%     108ms ± 0%     ~     (p=0.095 n=5+5)
UnmarshalDataset/example-2            177µs ± 2%     176µs ± 0%     ~     (p=0.841 n=5+5)
Unmarshal/SimpleDocument/struct-2     579ns ± 0%     586ns ± 1%   +1.30%  (p=0.008 n=5+5)
Unmarshal/SimpleDocument/map-2        875ns ± 1%     876ns ± 0%     ~     (p=0.548 n=5+5)
Unmarshal/ReferenceFile/struct-2     49.7µs ± 1%    49.5µs ± 0%     ~     (p=0.095 n=5+5)
Unmarshal/ReferenceFile/map-2        80.4µs ± 0%    79.6µs ± 0%   -0.99%  (p=0.008 n=5+5)
Unmarshal/HugoFrontMatter-2          13.9µs ± 0%    13.7µs ± 0%   -1.70%  (p=0.008 n=5+5)

name                               old speed      new speed      delta
UnmarshalDataset/config-2          42.1MB/s ± 1%  42.2MB/s ± 0%     ~     (p=0.381 n=5+4)
UnmarshalDataset/canada-2          33.3MB/s ± 0%  35.7MB/s ± 1%   +7.11%  (p=0.008 n=5+5)
UnmarshalDataset/citm_catalog-2    22.1MB/s ± 5%  22.6MB/s ± 1%   +2.08%  (p=0.032 n=5+5)
UnmarshalDataset/twitter-2         40.7MB/s ± 2%  40.6MB/s ± 2%     ~     (p=1.000 n=5+5)
UnmarshalDataset/code-2            24.8MB/s ± 0%  24.9MB/s ± 0%     ~     (p=0.103 n=5+5)
UnmarshalDataset/example-2         45.8MB/s ± 2%  46.0MB/s ± 0%     ~     (p=0.841 n=5+5)
Unmarshal/SimpleDocument/struct-2  19.0MB/s ± 0%  18.8MB/s ± 1%   -1.26%  (p=0.008 n=5+5)
Unmarshal/SimpleDocument/map-2     12.6MB/s ± 1%  12.6MB/s ± 0%     ~     (p=0.508 n=5+5)
Unmarshal/ReferenceFile/struct-2    105MB/s ± 1%   106MB/s ± 0%     ~     (p=0.095 n=5+5)
Unmarshal/ReferenceFile/map-2      65.2MB/s ± 0%  65.8MB/s ± 0%   +1.00%  (p=0.008 n=5+5)
Unmarshal/HugoFrontMatter-2        39.3MB/s ± 0%  40.0MB/s ± 0%   +1.73%  (p=0.008 n=5+5)

name                               old alloc/op   new alloc/op   delta
UnmarshalDataset/config-2            5.85MB ± 0%    5.85MB ± 0%   -0.00%  (p=0.008 n=5+5)
UnmarshalDataset/canada-2            76.6MB ± 0%    75.2MB ± 0%   -1.76%  (p=0.016 n=4+5)
UnmarshalDataset/citm_catalog-2      35.3MB ± 0%    35.0MB ± 0%   -0.71%  (p=0.008 n=5+5)
UnmarshalDataset/twitter-2           13.5MB ± 0%    13.5MB ± 0%   -0.19%  (p=0.016 n=4+5)
UnmarshalDataset/code-2              22.3MB ± 0%    22.0MB ± 0%   -1.31%  (p=0.008 n=5+5)
UnmarshalDataset/example-2            204kB ± 0%     203kB ± 0%   -0.34%  (p=0.008 n=5+5)
Unmarshal/SimpleDocument/struct-2      709B ± 0%      709B ± 0%     ~     (all equal)
Unmarshal/SimpleDocument/map-2       1.08kB ± 0%    1.08kB ± 0%     ~     (all equal)
Unmarshal/ReferenceFile/struct-2     19.8kB ± 0%    19.7kB ± 0%   -0.24%  (p=0.008 n=5+5)
Unmarshal/ReferenceFile/map-2        37.3kB ± 0%    37.0kB ± 0%   -0.64%  (p=0.029 n=4+4)
Unmarshal/HugoFrontMatter-2          7.26kB ± 0%    7.22kB ± 0%   -0.66%  (p=0.008 n=5+5)

name                               old allocs/op  new allocs/op  delta
UnmarshalDataset/config-2              230k ± 0%      230k ± 0%   -0.00%  (p=0.000 n=5+4)
UnmarshalDataset/canada-2              447k ± 0%      391k ± 0%  -12.53%  (p=0.008 n=5+5)
UnmarshalDataset/citm_catalog-2        169k ± 0%      158k ± 0%   -6.20%  (p=0.029 n=4+4)
UnmarshalDataset/twitter-2            55.8k ± 0%     54.7k ± 0%   -1.88%  (p=0.029 n=4+4)
UnmarshalDataset/code-2               1.06M ± 0%     1.05M ± 0%   -1.14%  (p=0.008 n=5+5)
UnmarshalDataset/example-2            1.31k ± 0%     1.28k ± 0%   -2.21%  (p=0.008 n=5+5)
Unmarshal/SimpleDocument/struct-2      8.00 ± 0%      8.00 ± 0%     ~     (all equal)
Unmarshal/SimpleDocument/map-2         13.0 ± 0%      13.0 ± 0%     ~     (all equal)
Unmarshal/ReferenceFile/struct-2        125 ± 0%       123 ± 0%   -1.60%  (p=0.008 n=5+5)
Unmarshal/ReferenceFile/map-2           600 ± 0%       590 ± 0%   -1.67%  (p=0.008 n=5+5)
Unmarshal/HugoFrontMatter-2             132 ± 0%       130 ± 0%   -1.52%  (p=0.008 n=5+5)
```
2021-11-13 19:20:20 -05:00
Thomas Pelletier 12244064bb Use global cache to unmarshal all slice types 2021-11-13 19:20:20 -05:00
Thomas Pelletier 6430ee0bfa Generic slice unmarshal fn 2021-11-13 19:20:20 -05:00
Thomas Pelletier cf530eba46 Specialize array unmarshal into []interface{} 2021-11-13 19:20:19 -05:00
62 changed files with 795 additions and 3064 deletions
-1
View File
@@ -1,4 +1,3 @@
* text=auto * text=auto
benchmark/benchmark.toml text eol=lf benchmark/benchmark.toml text eol=lf
testdata/** text eol=lf
-1
View File
@@ -2,7 +2,6 @@ changelog:
exclude: exclude:
labels: labels:
- build - build
- testing
categories: categories:
- title: What's new - title: What's new
labels: labels:
+4 -4
View File
@@ -35,11 +35,11 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v3 uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v2 uses: github/codeql-action/init@v1
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file. # If you wish to specify custom queries, you can do so here or in a config file.
@@ -50,7 +50,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below) # If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@v2 uses: github/codeql-action/autobuild@v1
# ️ Command-line programs to run using the OS shell. # ️ Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl # 📚 https://git.io/JvXDl
@@ -64,4 +64,4 @@ jobs:
# make release # make release
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2 uses: github/codeql-action/analyze@v1
+2 -2
View File
@@ -9,12 +9,12 @@ jobs:
runs-on: "ubuntu-latest" runs-on: "ubuntu-latest"
name: report name: report
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@master
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup go - name: Setup go
uses: actions/setup-go@master uses: actions/setup-go@master
with: with:
go-version: 1.19 go-version: 1.16
- name: Run tests with coverage - name: Run tests with coverage
run: ./ci.sh coverage -d "${GITHUB_BASE_REF-HEAD}" run: ./ci.sh coverage -d "${GITHUB_BASE_REF-HEAD}"
-39
View File
@@ -1,39 +0,0 @@
name: release
on:
push:
tags:
- "v2.*"
workflow_call:
inputs:
args:
description: "Extra arguments to pass goreleaser"
default: ""
required: false
type: string
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v3
with:
distribution: goreleaser
version: latest
args: release ${{ inputs.args }} --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+2 -9
View File
@@ -12,21 +12,14 @@ jobs:
strategy: strategy:
matrix: matrix:
os: [ 'ubuntu-latest', 'windows-latest', 'macos-latest'] os: [ 'ubuntu-latest', 'windows-latest', 'macos-latest']
go: [ '1.18', '1.19' ] go: [ '1.16', '1.17' ]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
name: ${{ matrix.go }}/${{ matrix.os }} name: ${{ matrix.go }}/${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@master
with:
fetch-depth: 0
- name: Setup go ${{ matrix.go }} - name: Setup go ${{ matrix.go }}
uses: actions/setup-go@master uses: actions/setup-go@master
with: with:
go-version: ${{ matrix.go }} go-version: ${{ matrix.go }}
- name: Run unit tests - name: Run unit tests
run: go test -race ./... run: go test -race ./...
release-check:
if: ${{ github.ref != 'refs/heads/v2' }}
uses: pelletier/go-toml/.github/workflows/release.yml@v2
with:
args: --snapshot
-1
View File
@@ -3,4 +3,3 @@ fuzz/
cmd/tomll/tomll cmd/tomll/tomll
cmd/tomljson/tomljson cmd/tomljson/tomljson
cmd/tomltestgen/tomltestgen cmd/tomltestgen/tomltestgen
dist
-123
View File
@@ -1,123 +0,0 @@
before:
hooks:
- go mod tidy
- go fmt ./...
- go test ./...
builds:
- id: tomll
main: ./cmd/tomll
binary: tomll
env:
- CGO_ENABLED=0
flags:
- -trimpath
ldflags:
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
mod_timestamp: '{{ .CommitTimestamp }}'
targets:
- linux_amd64
- linux_arm64
- linux_arm
- windows_amd64
- windows_arm64
- windows_arm
- darwin_amd64
- darwin_arm64
- id: tomljson
main: ./cmd/tomljson
binary: tomljson
env:
- CGO_ENABLED=0
flags:
- -trimpath
ldflags:
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
mod_timestamp: '{{ .CommitTimestamp }}'
targets:
- linux_amd64
- linux_arm64
- linux_arm
- windows_amd64
- windows_arm64
- windows_arm
- darwin_amd64
- darwin_arm64
- id: jsontoml
main: ./cmd/jsontoml
binary: jsontoml
env:
- CGO_ENABLED=0
flags:
- -trimpath
ldflags:
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
mod_timestamp: '{{ .CommitTimestamp }}'
targets:
- linux_amd64
- linux_arm64
- linux_arm
- windows_amd64
- windows_arm64
- windows_arm
- darwin_amd64
- darwin_arm64
universal_binaries:
- id: tomll
replace: true
name_template: tomll
- id: tomljson
replace: true
name_template: tomljson
- id: jsontoml
replace: true
name_template: jsontoml
archives:
- id: jsontoml
format: tar.xz
builds:
- jsontoml
files:
- none*
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
- id: tomljson
format: tar.xz
builds:
- tomljson
files:
- none*
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
- id: tomll
format: tar.xz
builds:
- tomll
files:
- none*
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
dockers:
- id: tools
goos: linux
goarch: amd64
ids:
- jsontoml
- tomljson
- tomll
image_templates:
- "ghcr.io/pelletier/go-toml:latest"
- "ghcr.io/pelletier/go-toml:{{ .Tag }}"
- "ghcr.io/pelletier/go-toml:v{{ .Major }}"
skip_push: false
checksum:
name_template: 'sha256sums.txt'
snapshot:
name_template: "{{ incpatch .Version }}-next"
release:
github:
owner: pelletier
name: go-toml
draft: true
prerelease: auto
mode: replace
changelog:
use: github-native
announce:
skip: true
+9 -23
View File
@@ -155,8 +155,6 @@ Checklist:
- Does not introduce backward-incompatible changes (unless discussed). - Does not introduce backward-incompatible changes (unless discussed).
- Has relevant doc changes. - Has relevant doc changes.
- Benchstat does not show performance regression. - Benchstat does not show performance regression.
- Pull request is [labeled appropriately][pr-labels].
- Title will be understandable in the changelog.
1. Merge using "squash and merge". 1. Merge using "squash and merge".
2. Make sure to edit the commit message to keep all the useful information 2. Make sure to edit the commit message to keep all the useful information
@@ -165,25 +163,13 @@ Checklist:
### New release ### New release
1. Decide on the next version number. Use semver. 1. Go to [releases][releases]. Click on "X commits to master since this
2. Generate release notes using [`gh`][gh]. Example: release".
``` 2. Make note of all the changes. Look for backward incompatible changes,
$ gh api -X POST \ new features, and bug fixes.
-F tag_name='v2.0.0-beta.5' \ 3. Pick the new version using the above and semver.
-F target_commitish='v2' \ 4. Create a [new release][new-release].
-F previous_tag_name='v2.0.0-beta.4' \ 5. Follow the same format as [1.1.0][release-110].
--jq '.body' \
repos/pelletier/go-toml/releases/generate-notes
```
3. Look for "Other changes". That would indicate a pull request not labeled
properly. Tweak labels and pull request titles until changelog looks good for
users.
4. [Draft new release][new-release].
5. Fill tag and target with the same value used to generate the changelog.
6. Set title to the new tag value.
7. Paste the generated changelog.
8. Check "create discussion", in the "Releases" category.
9. Check pre-release if new version is an alpha or beta.
[issues-tracker]: https://github.com/pelletier/go-toml/issues [issues-tracker]: https://github.com/pelletier/go-toml/issues
[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md [bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md
@@ -191,6 +177,6 @@ $ gh api -X POST \
[readme]: ./README.md [readme]: ./README.md
[fork]: https://help.github.com/articles/fork-a-repo [fork]: https://help.github.com/articles/fork-a-repo
[pull-request]: https://help.github.com/en/articles/creating-a-pull-request [pull-request]: https://help.github.com/en/articles/creating-a-pull-request
[releases]: https://github.com/pelletier/go-toml/releases
[new-release]: https://github.com/pelletier/go-toml/releases/new [new-release]: https://github.com/pelletier/go-toml/releases/new
[gh]: https://github.com/cli/cli [release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0
[pr-labels]: https://github.com/pelletier/go-toml/blob/v2/.github/release.yml
-5
View File
@@ -1,5 +0,0 @@
FROM scratch
ENV PATH "$PATH:/bin"
COPY tomll /bin/tomll
COPY tomljson /bin/tomljson
COPY jsontoml /bin/jsontoml
+1 -1
View File
@@ -1,6 +1,6 @@
The MIT License (MIT) The MIT License (MIT)
Copyright (c) 2013 - 2022 Thomas Pelletier, Eric Anderton Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
+34 -159
View File
@@ -4,14 +4,24 @@ Go library for the [TOML](https://toml.io/en/) format.
This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0). This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0).
## Development status
This is the upcoming major version of go-toml. It is currently in active
development. As of release v2.0.0-beta.1, the library has reached feature parity
with v1, and fixes a lot known bugs and performance issues along the way.
If you do not need the advanced document editing features of v1, you are
encouraged to try out this version.
[👉 Roadmap for v2](https://github.com/pelletier/go-toml/discussions/506)
[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues) [🐞 Bug Reports](https://github.com/pelletier/go-toml/issues)
[💬 Anything else](https://github.com/pelletier/go-toml/discussions) [💬 Anything else](https://github.com/pelletier/go-toml/discussions)
## Documentation ## Documentation
Full API, examples, and implementation notes are available in the Go Full API, examples, and implementation notes are available in the Go documentation.
documentation.
[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml/v2.svg)](https://pkg.go.dev/github.com/pelletier/go-toml/v2) [![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml/v2.svg)](https://pkg.go.dev/github.com/pelletier/go-toml/v2)
@@ -38,16 +48,15 @@ operations should not be shockingly slow. See [benchmarks](#benchmarks).
### Strict mode ### Strict mode
`Decoder` can be set to "strict mode", which makes it error when some parts of `Decoder` can be set to "strict mode", which makes it error when some parts of
the TOML document was not present in the target structure. This is a great way the TOML document was not prevent in the target structure. This is a great way
to check for typos. [See example in the documentation][strict]. to check for typos. [See example in the documentation][strict].
[strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.DisallowUnknownFields [strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.SetStrict
### Contextualized errors ### Contextualized errors
When most decoding errors occur, go-toml returns [`DecodeError`][decode-err]), When decoding errors occur, go-toml returns [`DecodeError`][decode-err]), which
which contains a human readable contextualized version of the error. For contains a human readable contextualized version of the error. For example:
example:
``` ```
2| key1 = "value1" 2| key1 = "value1"
@@ -150,11 +159,11 @@ Execution time speedup compared to other Go TOML libraries:
</thead> </thead>
<tbody> <tbody>
<tr><td>Marshal/HugoFrontMatter-2</td><td>1.9x</td><td>1.9x</td></tr> <tr><td>Marshal/HugoFrontMatter-2</td><td>1.9x</td><td>1.9x</td></tr>
<tr><td>Marshal/ReferenceFile/map-2</td><td>1.7x</td><td>1.8x</td></tr> <tr><td>Marshal/ReferenceFile/map-2</td><td>1.7x</td><td>1.9x</td></tr>
<tr><td>Marshal/ReferenceFile/struct-2</td><td>2.2x</td><td>2.5x</td></tr> <tr><td>Marshal/ReferenceFile/struct-2</td><td>2.4x</td><td>2.6x</td></tr>
<tr><td>Unmarshal/HugoFrontMatter-2</td><td>2.9x</td><td>2.9x</td></tr> <tr><td>Unmarshal/HugoFrontMatter-2</td><td>2.9x</td><td>2.5x</td></tr>
<tr><td>Unmarshal/ReferenceFile/map-2</td><td>2.6x</td><td>2.9x</td></tr> <tr><td>Unmarshal/ReferenceFile/map-2</td><td>2.7x</td><td>2.6x</td></tr>
<tr><td>Unmarshal/ReferenceFile/struct-2</td><td>4.4x</td><td>5.3x</td></tr> <tr><td>Unmarshal/ReferenceFile/struct-2</td><td>4.8x</td><td>5.1x</td></tr>
</tbody> </tbody>
</table> </table>
<details><summary>See more</summary> <details><summary>See more</summary>
@@ -167,17 +176,17 @@ provided for completeness.</p>
<tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr> <tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr>
</thead> </thead>
<tbody> <tbody>
<tr><td>Marshal/SimpleDocument/map-2</td><td>1.8x</td><td>2.9x</td></tr> <tr><td>Marshal/SimpleDocument/map-2</td><td>1.7x</td><td>2.1x</td></tr>
<tr><td>Marshal/SimpleDocument/struct-2</td><td>2.7x</td><td>4.2x</td></tr> <tr><td>Marshal/SimpleDocument/struct-2</td><td>2.5x</td><td>2.8x</td></tr>
<tr><td>Unmarshal/SimpleDocument/map-2</td><td>4.5x</td><td>3.1x</td></tr> <tr><td>Unmarshal/SimpleDocument/map-2</td><td>4.1x</td><td>3.1x</td></tr>
<tr><td>Unmarshal/SimpleDocument/struct-2</td><td>6.2x</td><td>3.9x</td></tr> <tr><td>Unmarshal/SimpleDocument/struct-2</td><td>6.4x</td><td>4.3x</td></tr>
<tr><td>UnmarshalDataset/example-2</td><td>3.1x</td><td>3.5x</td></tr> <tr><td>UnmarshalDataset/example-2</td><td>3.4x</td><td>3.2x</td></tr>
<tr><td>UnmarshalDataset/code-2</td><td>2.3x</td><td>3.1x</td></tr> <tr><td>UnmarshalDataset/code-2</td><td>2.2x</td><td>2.5x</td></tr>
<tr><td>UnmarshalDataset/twitter-2</td><td>2.5x</td><td>2.6x</td></tr> <tr><td>UnmarshalDataset/twitter-2</td><td>2.8x</td><td>2.7x</td></tr>
<tr><td>UnmarshalDataset/citm_catalog-2</td><td>2.1x</td><td>2.2x</td></tr> <tr><td>UnmarshalDataset/citm_catalog-2</td><td>2.2x</td><td>2.0x</td></tr>
<tr><td>UnmarshalDataset/canada-2</td><td>1.6x</td><td>1.3x</td></tr> <tr><td>UnmarshalDataset/canada-2</td><td>1.8x</td><td>1.4x</td></tr>
<tr><td>UnmarshalDataset/config-2</td><td>4.3x</td><td>3.2x</td></tr> <tr><td>UnmarshalDataset/config-2</td><td>4.4x</td><td>2.9x</td></tr>
<tr><td>[Geo mean]</td><td>2.7x</td><td>2.8x</td></tr> <tr><td>[Geo mean]</td><td>2.8x</td><td>2.6x</td></tr>
</tbody> </tbody>
</table> </table>
<p>This table can be generated with <code>./ci.sh benchmark -a -html</code>.</p> <p>This table can be generated with <code>./ci.sh benchmark -a -html</code>.</p>
@@ -197,44 +206,6 @@ In case of trouble: [Go Modules FAQ][mod-faq].
[mod-faq]: https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module [mod-faq]: https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module
## Tools
Go-toml provides three handy command line tools:
* `tomljson`: Reads a TOML file and outputs its JSON representation.
```
$ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest
$ tomljson --help
```
* `jsontoml`: Reads a JSON file and outputs a TOML representation.
```
$ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest
$ jsontoml --help
```
* `tomll`: Lints and reformats a TOML file.
```
$ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest
$ tomll --help
```
### Docker image
Those tools are also available as a [Docker image][docker]. For example, to use
`tomljson`:
```
docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml
```
Multiple versions are availble on [ghcr.io][docker].
[docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml
## Migrating from v1 ## Migrating from v1
This section describes the differences between v1 and v2, with some pointers on This section describes the differences between v1 and v2, with some pointers on
@@ -353,29 +324,6 @@ The recommended replacement is pre-filling the struct before unmarshaling.
[go-defaults]: https://github.com/mcuadros/go-defaults [go-defaults]: https://github.com/mcuadros/go-defaults
#### `toml.Tree` replacement
This structure was the initial attempt at providing a document model for
go-toml. It allows manipulating the structure of any document, encoding and
decoding from their TOML representation. While a more robust feature was
initially planned in go-toml v2, this has been ultimately [removed from
scope][nodoc] of this library, with no plan to add it back at the moment. The
closest equivalent at the moment would be to unmarshal into an `interface{}` and
use type assertions and/or reflection to manipulate the arbitrary
structure. However this would fall short of providing all of the TOML features
such as adding comments and be specific about whitespace.
#### `toml.Position` are not retrievable anymore
The API for retrieving the position (line, column) of a specific TOML element do
not exist anymore. This was done to minimize the amount of concepts introduced
by the library (query path), and avoid the performance hit related to storing
positions in the absence of a document model, for a feature that seemed to have
little use. Errors however have gained more detailed position
information. Position retrieval seems better fitted for a document model, which
has been [removed from the scope][nodoc] of go-toml v2 at the moment.
### Encoding / Marshal ### Encoding / Marshal
#### Default struct fields order #### Default struct fields order
@@ -411,8 +359,7 @@ fmt.Println("v2:\n" + string(b))
``` ```
There is no way to make v2 encoder behave like v1. A workaround could be to There is no way to make v2 encoder behave like v1. A workaround could be to
manually sort the fields alphabetically in the struct definition, or generate manually sort the fields alphabetically in the struct definition.
struct types using `reflect.StructOf`.
#### No indentation by default #### No indentation by default
@@ -460,9 +407,7 @@ fmt.Println("v2 Encoder:\n" + string(buf.Bytes()))
V1 always uses double quotes (`"`) around strings and keys that cannot be V1 always uses double quotes (`"`) around strings and keys that cannot be
represented bare (unquoted). V2 uses single quotes instead by default (`'`), represented bare (unquoted). V2 uses single quotes instead by default (`'`),
unless a character cannot be represented, then falls back to double quotes. As a unless a character cannot be represented, then falls back to double quotes.
result of this change, `Encoder.QuoteMapKeys` has been removed, as it is not
useful anymore.
There is no way to make v2 encoder behave like v1. There is no way to make v2 encoder behave like v1.
@@ -477,76 +422,6 @@ There is no way to make v2 encoder behave like v1.
[tm]: https://golang.org/pkg/encoding/#TextMarshaler [tm]: https://golang.org/pkg/encoding/#TextMarshaler
#### `Encoder.CompactComments` has been removed
Emitting compact comments is now the default behavior of go-toml. This option
is not necessary anymore.
#### Struct tags have been merged
V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`,
`toml`, and `omitempty`. To behave more like the standard library, v2 has merged
`toml`, `multiline`, and `omitempty`. For example:
```go
type doc struct {
// v1
F string `toml:"field" multiline:"true" omitempty:"true"`
// v2
F string `toml:"field,multiline,omitempty"`
}
```
Has a result, the `Encoder.SetTag*` methods have been removed, as there is just
one tag now.
#### `commented` tag has been removed
There is no replacement for the `commented` tag. This feature would be better
suited in a proper document model for go-toml v2, which has been [cut from
scope][nodoc] at the moment.
#### `Encoder.ArraysWithOneElementPerLine` has been renamed
The new name is `Encoder.SetArraysMultiline`. The behavior should be the same.
#### `Encoder.Indentation` has been renamed
The new name is `Encoder.SetIndentSymbol`. The behavior should be the same.
#### Embedded structs behave like stdlib
V1 defaults to merging embedded struct fields into the embedding struct. This
behavior was unexpected because it does not follow the standard library. To
avoid breaking backward compatibility, the `Encoder.PromoteAnonymous` method was
added to make the encoder behave correctly. Given backward compatibility is not
a problem anymore, v2 does the right thing by default: it follows the behavior
of `encoding/json`. `Encoder.PromoteAnonymous` has been removed.
[nodoc]: https://github.com/pelletier/go-toml/discussions/506#discussioncomment-1526038
### `query`
go-toml v1 provided the [`go-toml/query`][query] package. It allowed to run
JSONPath-style queries on TOML files. This feature is not available in v2. For a
replacement, check out [dasel][dasel].
This package has been removed because it was essentially not supported anymore
(last commit May 2020), increased the complexity of the code base, and more
complete solutions exist out there.
[query]: https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query
[dasel]: https://github.com/TomWright/dasel
## Versioning
Go-toml follows [Semantic Versioning](http://semver.org/). The supported version
of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of
this document. The last two major versions of Go are supported
(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)).
## License ## License
The MIT License (MIT). Read [LICENSE](LICENSE). The MIT License (MIT). Read [LICENSE](LICENSE).
-1
View File
@@ -321,7 +321,6 @@ type benchmarkDoc struct {
Key1 []int64 Key1 []int64
Key2 []string Key2 []string
Key3 [][]int64 Key3 [][]int64
// TODO: Key4 not supported by go-toml's Unmarshal
Key4 []interface{} Key4 []interface{}
Key5 []int64 Key5 []int64
Key6 []int64 Key6 []int64
+5 -13
View File
@@ -76,8 +76,7 @@ cover() {
fi fi
pushd "$dir" pushd "$dir"
go test -covermode=atomic -coverpkg=./... -coverprofile=coverage.out.tmp ./... go test -covermode=atomic -coverprofile=coverage.out ./...
cat coverage.out.tmp | grep -v testsuite | grep -v tomltestgen | grep -v gotoml-test-decoder > coverage.out
go tool cover -func=coverage.out go tool cover -func=coverage.out
popd popd
@@ -104,23 +103,16 @@ coverage() {
echo "" echo ""
target_pct="$(tail -n2 ${target_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%.*/\1/')" target_pct="$(cat ${target_out} |sed -E 's/.*total.*\t([0-9.]+)%/\1/;t;d')"
head_pct="$(tail -n2 ${head_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%/\1/')" head_pct="$(cat ${head_out} |sed -E 's/.*total.*\t([0-9.]+)%/\1/;t;d')"
echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%" echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%"
delta_pct=$(echo "$head_pct - $target_pct" | bc -l) delta_pct=$(echo "$head_pct - $target_pct" | bc -l)
echo "Delta: ${delta_pct}" echo "Delta: ${delta_pct}"
if [[ $delta_pct = \-* ]]; then if [[ $delta_pct = \-* ]]; then
echo "Regression!"; echo "Regression!";
return 1
target_diff="${output_dir}/target.diff.txt"
head_diff="${output_dir}/head.diff.txt"
cat "${target_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${target_diff}"
cat "${head_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${head_diff}"
diff --side-by-side --suppress-common-lines "${target_diff}" "${head_diff}"
return 1
fi fi
return 0 return 0
;; ;;
+1 -1
View File
@@ -6,7 +6,7 @@ import (
"os" "os"
"path" "path"
"github.com/pelletier/go-toml/v2/internal/testsuite" "github.com/pelletier/go-toml/v2/testsuite"
) )
func main() { func main() {
-55
View File
@@ -1,55 +0,0 @@
// Package jsontoml is a program that converts JSON to TOML.
//
// # Usage
//
// Reading from stdin:
//
// cat file.json | jsontoml > file.toml
//
// Reading from a file:
//
// jsontoml file.json > file.toml
//
// # Installation
//
// Using Go:
//
// go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest
package main
import (
"encoding/json"
"io"
"github.com/pelletier/go-toml/v2"
"github.com/pelletier/go-toml/v2/internal/cli"
)
const usage = `jsontoml can be used in two ways:
Reading from stdin:
cat file.json | jsontoml > file.toml
Reading from a file:
jsontoml file.json > file.toml
`
func main() {
p := cli.Program{
Usage: usage,
Fn: convert,
}
p.Execute()
}
func convert(r io.Reader, w io.Writer) error {
var v interface{}
d := json.NewDecoder(r)
err := d.Decode(&v)
if err != nil {
return err
}
e := toml.NewEncoder(w)
return e.Encode(v)
}
-48
View File
@@ -1,48 +0,0 @@
package main
import (
"bytes"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConvert(t *testing.T) {
examples := []struct {
name string
input string
expected string
errors bool
}{
{
name: "valid json",
input: `
{
"mytoml": {
"a": 42
}
}`,
expected: `[mytoml]
a = 42.0
`,
},
{
name: "invalid json",
input: `{ foo`,
errors: true,
},
}
for _, e := range examples {
b := new(bytes.Buffer)
err := convert(strings.NewReader(e.input), b)
if e.errors {
require.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, e.expected, b.String())
}
}
}
-63
View File
@@ -1,63 +0,0 @@
// Package tomljson is a program that converts TOML to JSON.
//
// # Usage
//
// Reading from stdin:
//
// cat file.toml | tomljson > file.json
//
// Reading from a file:
//
// tomljson file.toml > file.json
//
// # Installation
//
// Using Go:
//
// go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest
package main
import (
"encoding/json"
"errors"
"fmt"
"io"
"github.com/pelletier/go-toml/v2"
"github.com/pelletier/go-toml/v2/internal/cli"
)
const usage = `tomljson can be used in two ways:
Reading from stdin:
cat file.toml | tomljson > file.json
Reading from a file:
tomljson file.toml > file.json
`
func main() {
p := cli.Program{
Usage: usage,
Fn: convert,
}
p.Execute()
}
func convert(r io.Reader, w io.Writer) error {
var v interface{}
d := toml.NewDecoder(r)
err := d.Decode(&v)
if err != nil {
var derr *toml.DecodeError
if errors.As(err, &derr) {
row, col := derr.Position()
return fmt.Errorf("%s\nerror occurred at row %d column %d", derr.String(), row, col)
}
return err
}
e := json.NewEncoder(w)
e.SetIndent("", " ")
return e.Encode(v)
}
-61
View File
@@ -1,61 +0,0 @@
package main
import (
"bytes"
"fmt"
"io"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConvert(t *testing.T) {
examples := []struct {
name string
input io.Reader
expected string
errors bool
}{
{
name: "valid toml",
input: strings.NewReader(`
[mytoml]
a = 42`),
expected: `{
"mytoml": {
"a": 42
}
}
`,
},
{
name: "invalid toml",
input: strings.NewReader(`bad = []]`),
errors: true,
},
{
name: "bad reader",
input: &badReader{},
errors: true,
},
}
for _, e := range examples {
b := new(bytes.Buffer)
err := convert(e.input, b)
if e.errors {
require.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, e.expected, b.String())
}
}
}
type badReader struct{}
func (r *badReader) Read([]byte) (int, error) {
return 0, fmt.Errorf("reader failed on purpose")
}
-58
View File
@@ -1,58 +0,0 @@
// Package tomll is a linter program for TOML.
//
// # Usage
//
// Reading from stdin, writing to stdout:
//
// cat file.toml | tomll
//
// Reading and updating a list of files in place:
//
// tomll a.toml b.toml c.toml
//
// # Installation
//
// Using Go:
//
// go install github.com/pelletier/go-toml/v2/cmd/tomll@latest
package main
import (
"io"
"github.com/pelletier/go-toml/v2"
"github.com/pelletier/go-toml/v2/internal/cli"
)
const usage = `tomll can be used in two ways:
Reading from stdin, writing to stdout:
cat file.toml | tomll > file.toml
Reading and updating a list of files in place:
tomll a.toml b.toml c.toml
When given a list of files, tomll will modify all files in place without asking.
`
func main() {
p := cli.Program{
Usage: usage,
Fn: convert,
Inplace: true,
}
p.Execute()
}
func convert(r io.Reader, w io.Writer) error {
var v interface{}
d := toml.NewDecoder(r)
err := d.Decode(&v)
if err != nil {
return err
}
e := toml.NewEncoder(w)
return e.Encode(v)
}
-45
View File
@@ -1,45 +0,0 @@
package main
import (
"bytes"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConvert(t *testing.T) {
examples := []struct {
name string
input string
expected string
errors bool
}{
{
name: "valid toml",
input: `
mytoml.a = 42.0
`,
expected: `[mytoml]
a = 42.0
`,
},
{
name: "invalid toml",
input: `[what`,
errors: true,
},
}
for _, e := range examples {
b := new(bytes.Buffer)
err := convert(strings.NewReader(e.input), b)
if e.errors {
require.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, e.expected, b.String())
}
}
}
+1 -1
View File
@@ -3,7 +3,7 @@
// //
// Within the go-toml package, run `go generate`. Otherwise, use: // Within the go-toml package, run `go generate`. Otherwise, use:
// //
// go run github.com/pelletier/go-toml/cmd/tomltestgen -o toml_testgen_test.go // go run github.com/pelletier/go-toml/cmd/tomltestgen -o toml_testgen_test.go
package main package main
import ( import (
+26 -74
View File
@@ -99,42 +99,15 @@ func parseDateTime(b []byte) (time.Time, error) {
if len(b) != dateTimeByteLen { if len(b) != dateTimeByteLen {
return time.Time{}, newDecodeError(b, "invalid date-time timezone") return time.Time{}, newDecodeError(b, "invalid date-time timezone")
} }
var direction int direction := 1
switch b[0] { if b[0] == '-' {
case '-':
direction = -1 direction = -1
case '+':
direction = +1
default:
return time.Time{}, newDecodeError(b[:1], "invalid timezone offset character")
}
if b[3] != ':' {
return time.Time{}, newDecodeError(b[3:4], "expected a : separator")
}
hours, err := parseDecimalDigits(b[1:3])
if err != nil {
return time.Time{}, err
}
if hours > 23 {
return time.Time{}, newDecodeError(b[:1], "invalid timezone offset hours")
}
minutes, err := parseDecimalDigits(b[4:6])
if err != nil {
return time.Time{}, err
}
if minutes > 59 {
return time.Time{}, newDecodeError(b[:1], "invalid timezone offset minutes")
} }
hours := digitsToInt(b[1:3])
minutes := digitsToInt(b[4:6])
seconds := direction * (hours*3600 + minutes*60) seconds := direction * (hours*3600 + minutes*60)
if seconds == 0 { zone = time.FixedZone("", seconds)
zone = time.UTC
} else {
zone = time.FixedZone("", seconds)
}
b = b[dateTimeByteLen:] b = b[dateTimeByteLen:]
} }
@@ -228,53 +201,45 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) {
return t, nil, err return t, nil, err
} }
if t.Second > 60 { if t.Second > 59 {
return t, nil, newDecodeError(b[6:8], "seconds cannot be greater 60") return t, nil, newDecodeError(b[3:5], "seconds cannot be greater 59")
} }
b = b[8:] const minLengthWithFrac = 9
if len(b) >= minLengthWithFrac && b[minLengthWithFrac-1] == '.' {
if len(b) >= 1 && b[0] == '.' {
frac := 0 frac := 0
precision := 0
digits := 0 digits := 0
for i, c := range b[1:] { for i, c := range b[minLengthWithFrac:] {
if !isDigit(c) { if !isDigit(c) {
if i == 0 { if i == 0 {
return t, nil, newDecodeError(b[0:1], "need at least one digit after fraction point") return t, nil, newDecodeError(b[i:i+1], "need at least one digit after fraction point")
} }
break break
} }
digits++
const maxFracPrecision = 9 const maxFracPrecision = 9
if i >= maxFracPrecision { if i >= maxFracPrecision {
// go-toml allows decoding fractional seconds return t, nil, newDecodeError(b[i:i+1], "maximum precision for date time is nanosecond")
// beyond the supported precision of 9
// digits. It truncates the fractional component
// to the supported precision and ignores the
// remaining digits.
//
// https://github.com/pelletier/go-toml/discussions/707
continue
} }
frac *= 10 frac *= 10
frac += int(c - '0') frac += int(c - '0')
precision++ digits++
} }
if precision == 0 { if digits == 0 {
return t, nil, newDecodeError(b[:1], "nanoseconds need at least one digit") return t, nil, newDecodeError(b[minLengthWithFrac-1:minLengthWithFrac], "nanoseconds need at least one digit")
} }
t.Nanosecond = frac * nspow[precision] t.Nanosecond = frac * nspow[digits]
t.Precision = precision t.Precision = digits
return t, b[1+digits:], nil return t, b[9+digits:], nil
} }
return t, b, nil
return t, b[8:], nil
} }
//nolint:cyclop //nolint:cyclop
@@ -313,10 +278,10 @@ func parseFloat(b []byte) (float64, error) {
} }
start := 0 start := 0
if cleaned[0] == '+' || cleaned[0] == '-' { if b[0] == '+' || b[0] == '-' {
start = 1 start = 1
} }
if cleaned[start] == '0' && isDigit(cleaned[start+1]) { if b[start] == '0' && isDigit(b[start+1]) {
return 0, newDecodeError(b, "float integer part cannot have leading zeroes") return 0, newDecodeError(b, "float integer part cannot have leading zeroes")
} }
@@ -399,17 +364,8 @@ func parseIntDec(b []byte) (int64, error) {
} }
func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) {
start := 0 if b[0] == '_' {
if b[start] == '+' || b[start] == '-' { return nil, newDecodeError(b[0:1], "number cannot start with underscore")
start++
}
if len(b) == start {
return b, nil
}
if b[start] == '_' {
return nil, newDecodeError(b[start:start+1], "number cannot start with underscore")
} }
if b[len(b)-1] == '_' { if b[len(b)-1] == '_' {
@@ -482,10 +438,6 @@ func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) {
return nil, newDecodeError(b[i+1:i+2], "cannot have underscore before exponent") return nil, newDecodeError(b[i+1:i+2], "cannot have underscore before exponent")
} }
before = false before = false
case '+', '-':
// signed exponents
cleaned = append(cleaned, c)
before = false
case 'e', 'E': case 'e', 'E':
if i < len(b)-1 && b[i+1] == '_' { if i < len(b)-1 && b[i+1] == '_' {
return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after exponent") return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after exponent")
@@ -510,7 +462,7 @@ func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) {
// isValidDate checks if a provided date is a date that exists. // isValidDate checks if a provided date is a date that exists.
func isValidDate(year int, month int, day int) bool { func isValidDate(year int, month int, day int) bool {
return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year) return day <= daysIn(month, year)
} }
// daysBefore[m] counts the number of days in a non-leap year // daysBefore[m] counts the number of days in a non-leap year
+1 -2
View File
@@ -27,7 +27,7 @@ type DecodeError struct {
// corresponding field in the target value. It contains all the missing fields // corresponding field in the target value. It contains all the missing fields
// in Errors. // in Errors.
// //
// Emitted by Decoder when DisallowUnknownFields() was called. // Emitted by Decoder when SetStrict(true) was called.
type StrictMissingError struct { type StrictMissingError struct {
// One error per field that could not be found. // One error per field that could not be found.
Errors []DecodeError Errors []DecodeError
@@ -103,7 +103,6 @@ func (e *DecodeError) Key() Key {
// //
// The function copies all bytes used in DecodeError, so that document and // The function copies all bytes used in DecodeError, so that document and
// highlight can be freely deallocated. // highlight can be freely deallocated.
//
//nolint:funlen //nolint:funlen
func wrapDecodeError(document []byte, de *decodeError) *DecodeError { func wrapDecodeError(document []byte, de *decodeError) *DecodeError {
offset := danger.SubsliceOffset(document, de.highlight) offset := danger.SubsliceOffset(document, de.highlight)
+6 -6
View File
@@ -212,12 +212,12 @@ func ExampleDecodeError() {
fmt.Println(err) fmt.Println(err)
var derr *DecodeError //nolint:errorlint
if errors.As(err, &derr) { de := err.(*DecodeError)
fmt.Println(derr.String()) fmt.Println(de.String())
row, col := derr.Position()
fmt.Println("error occurred at row", row, "column", col) row, col := de.Position()
} fmt.Println("error occurred at row", row, "column", col)
// Output: // Output:
// toml: number must have at least one digit between underscores // toml: number must have at least one digit between underscores
// 1| name = 123__456 // 1| name = 123__456
-56
View File
@@ -1,56 +0,0 @@
//go:build go1.18 || go1.19
// +build go1.18 go1.19
package toml_test
import (
"io/ioutil"
"strings"
"testing"
"github.com/pelletier/go-toml/v2"
"github.com/stretchr/testify/require"
)
func FuzzUnmarshal(f *testing.F) {
file, err := ioutil.ReadFile("benchmark/benchmark.toml")
if err != nil {
panic(err)
}
f.Add(file)
f.Fuzz(func(t *testing.T, b []byte) {
if strings.Contains(string(b), "nan") {
// Current limitation of testify.
// https://github.com/stretchr/testify/issues/624
t.Skip("can't compare NaNs")
}
t.Log("INITIAL DOCUMENT ===========================")
t.Log(string(b))
var v interface{}
err := toml.Unmarshal(b, &v)
if err != nil {
return
}
t.Log("DECODED VALUE ===========================")
t.Logf("%#+v", v)
encoded, err := toml.Marshal(v)
if err != nil {
t.Fatalf("cannot marshal unmarshaled document: %s", err)
}
t.Log("ENCODED DOCUMENT ===========================")
t.Log(string(encoded))
var v2 interface{}
err = toml.Unmarshal(encoded, &v2)
if err != nil {
t.Fatalf("failed round trip: %s", err)
}
require.Equal(t, v, v2)
})
}
+2 -1
View File
@@ -2,4 +2,5 @@ module github.com/pelletier/go-toml/v2
go 1.16 go 1.16
require github.com/stretchr/testify v1.8.0 // latest (v1.7.0) doesn't have the fix for time.Time
require github.com/stretchr/testify v1.7.1-0.20210427113832-6241f9ab9942
+4 -8
View File
@@ -1,15 +1,11 @@
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.7.1-0.20210427113832-6241f9ab9942 h1:t0lM6y/M5IiUZyvbBTcngso8SZEZICH7is9B6g/obVU=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1-0.20210427113832-6241f9ab9942/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+29 -28
View File
@@ -11,17 +11,17 @@ import (
// //
// For example: // For example:
// //
// it := n.Children() // it := n.Children()
// for it.Next() { // for it.Next() {
// it.Node() // it.Node()
// } // }
type Iterator struct { type Iterator struct {
started bool started bool
node *Node node *Node
} }
// Next moves the iterator forward and returns true if points to a // Next moves the iterator forward and returns true if points to a node, false
// node, false otherwise. // otherwise.
func (c *Iterator) Next() bool { func (c *Iterator) Next() bool {
if !c.started { if !c.started {
c.started = true c.started = true
@@ -31,8 +31,8 @@ func (c *Iterator) Next() bool {
return c.node.Valid() return c.node.Valid()
} }
// IsLast returns true if the current node of the iterator is the last // IsLast returns true if the current node of the iterator is the last one.
// one. Subsequent call to Next() will return false. // Subsequent call to Next() will return false.
func (c *Iterator) IsLast() bool { func (c *Iterator) IsLast() bool {
return c.node.next == 0 return c.node.next == 0
} }
@@ -62,20 +62,20 @@ func (r *Root) at(idx Reference) *Node {
return &r.nodes[idx] return &r.nodes[idx]
} }
// Arrays have one child per element in the array. InlineTables have // Arrays have one child per element in the array.
// one child per key-value pair in the table. KeyValues have at least // InlineTables have one child per key-value pair in the table.
// two children. The first one is the value. The rest make a // KeyValues have at least two children. The first one is the value. The
// potentially dotted key. Table and Array table have one child per // rest make a potentially dotted key.
// element of the key they represent (same as KeyValue, but without // Table and Array table have one child per element of the key they
// the last node being the value). // represent (same as KeyValue, but without the last node being the value).
// children []Node
type Node struct { type Node struct {
Kind Kind Kind Kind
Raw Range // Raw bytes from the input. Raw Range // Raw bytes from the input.
Data []byte // Node value (either allocated or referencing the input). Data []byte // Node value (could be either allocated or referencing the input).
// References to other nodes, as offsets in the backing array // References to other nodes, as offsets in the backing array from this
// from this node. References can go backward, so those can be // node. References can go backward, so those can be negative.
// negative.
next int // 0 if last element next int // 0 if last element
child int // 0 if no child child int // 0 if no child
} }
@@ -85,8 +85,8 @@ type Range struct {
Length uint32 Length uint32
} }
// Next returns a copy of the next node, or an invalid Node if there // Next returns a copy of the next node, or an invalid Node if there is no
// is no next node. // next node.
func (n *Node) Next() *Node { func (n *Node) Next() *Node {
if n.next == 0 { if n.next == 0 {
return nil return nil
@@ -96,9 +96,9 @@ func (n *Node) Next() *Node {
return (*Node)(danger.Stride(ptr, size, n.next)) return (*Node)(danger.Stride(ptr, size, n.next))
} }
// Child returns a copy of the first child node of this node. Other // Child returns a copy of the first child node of this node. Other children
// children can be accessed calling Next on the first child. Returns // can be accessed calling Next on the first child.
// an invalid Node if there is none. // Returns an invalid Node if there is none.
func (n *Node) Child() *Node { func (n *Node) Child() *Node {
if n.child == 0 { if n.child == 0 {
return nil return nil
@@ -113,9 +113,10 @@ func (n *Node) Valid() bool {
return n != nil return n != nil
} }
// Key returns the child nodes making the Key on a supported // Key returns the child nodes making the Key on a supported node. Panics
// node. Panics otherwise. They are guaranteed to be all be of the // otherwise.
// Kind Key. A simple key would return just one element. // They are guaranteed to be all be of the Kind Key. A simple key would return
// just one element.
func (n *Node) Key() Iterator { func (n *Node) Key() Iterator {
switch n.Kind { switch n.Kind {
case KeyValue: case KeyValue:
@@ -132,8 +133,8 @@ func (n *Node) Key() Iterator {
} }
// Value returns a pointer to the value node of a KeyValue. // Value returns a pointer to the value node of a KeyValue.
// Guaranteed to be non-nil. Panics if not called on a KeyValue node, // Guaranteed to be non-nil.
// or if the Children are malformed. // Panics if not called on a KeyValue node, or if the Children are malformed.
func (n *Node) Value() *Node { func (n *Node) Value() *Node {
return n.Child() return n.Child()
} }
-88
View File
@@ -1,88 +0,0 @@
package cli
import (
"bytes"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"github.com/pelletier/go-toml/v2"
)
type ConvertFn func(r io.Reader, w io.Writer) error
type Program struct {
Usage string
Fn ConvertFn
// Inplace allows the command to take more than one file as argument and
// perform convertion in place on each provided file.
Inplace bool
}
func (p *Program) Execute() {
flag.Usage = func() { fmt.Fprintf(os.Stderr, p.Usage) }
flag.Parse()
os.Exit(p.main(flag.Args(), os.Stdin, os.Stdout, os.Stderr))
}
func (p *Program) main(files []string, input io.Reader, output, error io.Writer) int {
err := p.run(files, input, output)
if err != nil {
var derr *toml.DecodeError
if errors.As(err, &derr) {
fmt.Fprintln(error, derr.String())
row, col := derr.Position()
fmt.Fprintln(error, "error occurred at row", row, "column", col)
} else {
fmt.Fprintln(error, err.Error())
}
return -1
}
return 0
}
func (p *Program) run(files []string, input io.Reader, output io.Writer) error {
if len(files) > 0 {
if p.Inplace {
return p.runAllFilesInPlace(files)
}
f, err := os.Open(files[0])
if err != nil {
return err
}
defer f.Close()
input = f
}
return p.Fn(input, output)
}
func (p *Program) runAllFilesInPlace(files []string) error {
for _, path := range files {
err := p.runFileInPlace(path)
if err != nil {
return err
}
}
return nil
}
func (p *Program) runFileInPlace(path string) error {
in, err := ioutil.ReadFile(path)
if err != nil {
return err
}
out := new(bytes.Buffer)
err = p.Fn(bytes.NewReader(in), out)
if err != nil {
return err
}
return ioutil.WriteFile(path, out.Bytes(), 0600)
}
-172
View File
@@ -1,172 +0,0 @@
package cli
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"strings"
"testing"
"github.com/pelletier/go-toml/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func processMain(args []string, input io.Reader, stdout, stderr io.Writer, f ConvertFn) int {
p := Program{Fn: f}
return p.main(args, input, stdout, stderr)
}
func TestProcessMainStdin(t *testing.T) {
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
input := strings.NewReader("this is the input")
exit := processMain([]string{}, input, stdout, stderr, func(r io.Reader, w io.Writer) error {
return nil
})
assert.Equal(t, 0, exit)
assert.Empty(t, stdout.String())
assert.Empty(t, stderr.String())
}
func TestProcessMainStdinErr(t *testing.T) {
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
input := strings.NewReader("this is the input")
exit := processMain([]string{}, input, stdout, stderr, func(r io.Reader, w io.Writer) error {
return fmt.Errorf("something bad")
})
assert.Equal(t, -1, exit)
assert.Empty(t, stdout.String())
assert.NotEmpty(t, stderr.String())
}
func TestProcessMainStdinDecodeErr(t *testing.T) {
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
input := strings.NewReader("this is the input")
exit := processMain([]string{}, input, stdout, stderr, func(r io.Reader, w io.Writer) error {
var v interface{}
return toml.Unmarshal([]byte(`qwe = 001`), &v)
})
assert.Equal(t, -1, exit)
assert.Empty(t, stdout.String())
assert.Contains(t, stderr.String(), "error occurred at")
}
func TestProcessMainFileExists(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "example")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
_, err = tmpfile.Write([]byte(`some data`))
require.NoError(t, err)
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
exit := processMain([]string{tmpfile.Name()}, nil, stdout, stderr, func(r io.Reader, w io.Writer) error {
return nil
})
assert.Equal(t, 0, exit)
assert.Empty(t, stdout.String())
assert.Empty(t, stderr.String())
}
func TestProcessMainFileDoesNotExist(t *testing.T) {
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
exit := processMain([]string{"/lets/hope/this/does/not/exist"}, nil, stdout, stderr, func(r io.Reader, w io.Writer) error {
return nil
})
assert.Equal(t, -1, exit)
assert.Empty(t, stdout.String())
assert.NotEmpty(t, stderr.String())
}
func TestProcessMainFilesInPlace(t *testing.T) {
dir, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(dir)
path1 := path.Join(dir, "file1")
path2 := path.Join(dir, "file2")
err = ioutil.WriteFile(path1, []byte("content 1"), 0600)
require.NoError(t, err)
err = ioutil.WriteFile(path2, []byte("content 2"), 0600)
require.NoError(t, err)
p := Program{
Fn: dummyFileFn,
Inplace: true,
}
exit := p.main([]string{path1, path2}, os.Stdin, os.Stdout, os.Stderr)
require.Equal(t, 0, exit)
v1, err := ioutil.ReadFile(path1)
require.NoError(t, err)
require.Equal(t, "1", string(v1))
v2, err := ioutil.ReadFile(path2)
require.NoError(t, err)
require.Equal(t, "2", string(v2))
}
func TestProcessMainFilesInPlaceErrRead(t *testing.T) {
p := Program{
Fn: dummyFileFn,
Inplace: true,
}
exit := p.main([]string{"/this/path/is/invalid"}, os.Stdin, os.Stdout, os.Stderr)
require.Equal(t, -1, exit)
}
func TestProcessMainFilesInPlaceFailFn(t *testing.T) {
dir, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(dir)
path1 := path.Join(dir, "file1")
err = ioutil.WriteFile(path1, []byte("content 1"), 0600)
require.NoError(t, err)
p := Program{
Fn: func(io.Reader, io.Writer) error { return fmt.Errorf("oh no") },
Inplace: true,
}
exit := p.main([]string{path1}, os.Stdin, os.Stdout, os.Stderr)
require.Equal(t, -1, exit)
v1, err := ioutil.ReadFile(path1)
require.NoError(t, err)
require.Equal(t, "content 1", string(v1))
}
func dummyFileFn(r io.Reader, w io.Writer) error {
b, err := ioutil.ReadAll(r)
if err != nil {
return err
}
v := strings.SplitN(string(b), " ", 2)[1]
_, err = w.Write([]byte(v))
return err
}
+11
View File
@@ -63,3 +63,14 @@ func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer {
// https://github.com/golang/go/issues/40481 // https://github.com/golang/go/issues/40481
return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset)) return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset))
} }
type Slice struct {
Data unsafe.Pointer
Len int
Cap int
}
type iface struct {
typ unsafe.Pointer
ptr unsafe.Pointer
}
+20
View File
@@ -0,0 +1,20 @@
//go:build go1.18
// +build go1.18
package danger
import (
"reflect"
"unsafe"
)
func ExtendSlice(t reflect.Type, s *Slice, n int) Slice {
arrayType := reflect.ArrayOf(n, t.Elem())
arrayData := reflect.New(arrayType)
reflect.Copy(arrayData.Elem(), reflect.NewAt(t, unsafe.Pointer(s)).Elem())
return Slice{
Data: unsafe.Pointer(arrayData.Pointer()),
Len: s.Len,
Cap: n,
}
}
+30
View File
@@ -0,0 +1,30 @@
//go:build !go1.18
// +build !go1.18
package danger
import (
"reflect"
"unsafe"
)
//go:linkname unsafe_NewArray reflect.unsafe_NewArray
func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer
//go:linkname typedslicecopy reflect.typedslicecopy
//go:noescape
func typedslicecopy(elemType unsafe.Pointer, dst, src Slice) int
func ExtendSlice(t reflect.Type, s *Slice, n int) Slice {
elemTypeRef := t.Elem()
elemTypePtr := ((*iface)(unsafe.Pointer(&elemTypeRef))).ptr
d := Slice{
Data: unsafe_NewArray(elemTypePtr, n),
Len: s.Len,
Cap: n,
}
typedslicecopy(elemTypePtr, d, *s)
return d
}
@@ -67,7 +67,6 @@ func TestDocMarshal(t *testing.T) {
} }
marshalTestToml := `title = 'TOML Marshal Testing' marshalTestToml := `title = 'TOML Marshal Testing'
[basic_lists] [basic_lists]
floats = [12.3, 45.6, 78.9] floats = [12.3, 45.6, 78.9]
bools = [true, false, true] bools = [true, false, true]
@@ -90,6 +89,7 @@ name = 'Second'
[subdoc.first] [subdoc.first]
name = 'First' name = 'First'
[basic] [basic]
uint = 5001 uint = 5001
bool = true bool = true
@@ -101,9 +101,9 @@ date = 1979-05-27T07:32:00Z
[[subdoclist]] [[subdoclist]]
name = 'List.First' name = 'List.First'
[[subdoclist]] [[subdoclist]]
name = 'List.Second' name = 'List.Second'
` `
result, err := toml.Marshal(docData) result, err := toml.Marshal(docData)
@@ -117,15 +117,14 @@ func TestBasicMarshalQuotedKey(t *testing.T) {
expected := `'Z.string-àéù' = 'Hello' expected := `'Z.string-àéù' = 'Hello'
'Yfloat-𝟘' = 3.5 'Yfloat-𝟘' = 3.5
['Xsubdoc-àéù'] ['Xsubdoc-àéù']
String2 = 'One' String2 = 'One'
[['W.sublist-𝟘']] [['W.sublist-𝟘']]
String2 = 'Two' String2 = 'Two'
[['W.sublist-𝟘']] [['W.sublist-𝟘']]
String2 = 'Three' String2 = 'Three'
` `
require.Equal(t, string(expected), string(result)) require.Equal(t, string(expected), string(result))
@@ -160,8 +159,8 @@ bool = false
int = 0 int = 0
string = '' string = ''
stringlist = [] stringlist = []
[map] [map]
` `
require.Equal(t, string(expected), string(result)) require.Equal(t, string(expected), string(result))
@@ -151,7 +151,6 @@ type quotedKeyMarshalTestStruct struct {
} }
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var quotedKeyMarshalTestData = quotedKeyMarshalTestStruct{ var quotedKeyMarshalTestData = quotedKeyMarshalTestStruct{
String: "Hello", String: "Hello",
@@ -161,7 +160,6 @@ var quotedKeyMarshalTestData = quotedKeyMarshalTestStruct{
} }
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var quotedKeyMarshalTestToml = []byte(`"Yfloat-𝟘" = 3.5 var quotedKeyMarshalTestToml = []byte(`"Yfloat-𝟘" = 3.5
"Z.string-àéù" = "Hello" "Z.string-àéù" = "Hello"
@@ -274,7 +272,6 @@ var docData = testDoc{
} }
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var mapTestDoc = testMapDoc{ var mapTestDoc = testMapDoc{
Title: "TOML Marshal Testing", Title: "TOML Marshal Testing",
@@ -460,6 +457,35 @@ func TestEmptytomlUnmarshal(t *testing.T) {
assert.Equal(t, emptyTestData, result) assert.Equal(t, emptyTestData, result)
} }
func TestEmptyUnmarshalOmit(t *testing.T) {
t.Skipf("Have not figured yet if omitempty is a good idea")
type emptyMarshalTestStruct2 struct {
Title string `toml:"title"`
Bool bool `toml:"bool,omitempty"`
Int int `toml:"int, omitempty"`
String string `toml:"string,omitempty "`
StringList []string `toml:"stringlist,omitempty"`
Ptr *basicMarshalTestStruct `toml:"ptr,omitempty"`
Map map[string]string `toml:"map,omitempty"`
}
emptyTestData2 := emptyMarshalTestStruct2{
Title: "Placeholder",
Bool: false,
Int: 0,
String: "",
StringList: []string{},
Ptr: nil,
Map: map[string]string{},
}
result := emptyMarshalTestStruct2{}
err := toml.Unmarshal(emptyTestToml, &result)
require.NoError(t, err)
assert.Equal(t, emptyTestData2, result)
}
type pointerMarshalTestStruct struct { type pointerMarshalTestStruct struct {
Str *string Str *string
List *[]string List *[]string
@@ -562,12 +588,10 @@ func (c customMarshaler) MarshalTOML() ([]byte, error) {
var customMarshalerData = customMarshaler{FirstName: "Sally", LastName: "Fields"} var customMarshalerData = customMarshaler{FirstName: "Sally", LastName: "Fields"}
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var customMarshalerToml = []byte(`Sally Fields`) var customMarshalerToml = []byte(`Sally Fields`)
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var nestedCustomMarshalerData = customMarshalerParent{ var nestedCustomMarshalerData = customMarshalerParent{
Self: customMarshaler{FirstName: "Maiku", LastName: "Suteda"}, Self: customMarshaler{FirstName: "Maiku", LastName: "Suteda"},
@@ -575,7 +599,6 @@ var nestedCustomMarshalerData = customMarshalerParent{
} }
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var nestedCustomMarshalerToml = []byte(`friends = ["Sally Fields"] var nestedCustomMarshalerToml = []byte(`friends = ["Sally Fields"]
me = "Maiku Suteda" me = "Maiku Suteda"
@@ -617,7 +640,6 @@ func TestUnmarshalTextMarshaler(t *testing.T) {
} }
// TODO: Remove nolint once type and methods are used by a test // TODO: Remove nolint once type and methods are used by a test
//
//nolint:unused //nolint:unused
type precedentMarshaler struct { type precedentMarshaler struct {
FirstName string FirstName string
@@ -636,7 +658,6 @@ func (m precedentMarshaler) MarshalTOML() ([]byte, error) {
} }
// TODO: Remove nolint once type and method are used by a test // TODO: Remove nolint once type and method are used by a test
//
//nolint:unused //nolint:unused
type customPointerMarshaler struct { type customPointerMarshaler struct {
FirstName string FirstName string
@@ -649,7 +670,6 @@ func (m *customPointerMarshaler) MarshalTOML() ([]byte, error) {
} }
// TODO: Remove nolint once type and method are used by a test // TODO: Remove nolint once type and method are used by a test
//
//nolint:unused //nolint:unused
type textPointerMarshaler struct { type textPointerMarshaler struct {
FirstName string FirstName string
@@ -662,7 +682,6 @@ func (m *textPointerMarshaler) MarshalText() ([]byte, error) {
} }
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var commentTestToml = []byte(` var commentTestToml = []byte(`
# it's a comment on type # it's a comment on type
@@ -700,7 +719,6 @@ type mapsTestStruct struct {
} }
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var mapsTestData = mapsTestStruct{ var mapsTestData = mapsTestStruct{
Simple: map[string]string{ Simple: map[string]string{
@@ -724,7 +742,6 @@ var mapsTestData = mapsTestStruct{
} }
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var mapsTestToml = []byte(` var mapsTestToml = []byte(`
[Other] [Other]
@@ -747,7 +764,6 @@ var mapsTestToml = []byte(`
`) `)
// TODO: Remove nolint once type is used by a test // TODO: Remove nolint once type is used by a test
//
//nolint:deadcode,unused //nolint:deadcode,unused
type structArrayNoTag struct { type structArrayNoTag struct {
A struct { A struct {
@@ -757,7 +773,6 @@ type structArrayNoTag struct {
} }
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var customTagTestToml = []byte(` var customTagTestToml = []byte(`
[postgres] [postgres]
@@ -772,7 +787,6 @@ var customTagTestToml = []byte(`
`) `)
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var customCommentTagTestToml = []byte(` var customCommentTagTestToml = []byte(`
# db connection # db connection
@@ -786,7 +800,6 @@ var customCommentTagTestToml = []byte(`
`) `)
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var customCommentedTagTestToml = []byte(` var customCommentedTagTestToml = []byte(`
[postgres] [postgres]
@@ -841,7 +854,6 @@ func TestUnmarshalTabInStringAndQuotedKey(t *testing.T) {
} }
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var customMultilineTagTestToml = []byte(`int_slice = [ var customMultilineTagTestToml = []byte(`int_slice = [
1, 1,
@@ -851,7 +863,6 @@ var customMultilineTagTestToml = []byte(`int_slice = [
`) `)
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var testDocBasicToml = []byte(` var testDocBasicToml = []byte(`
[document] [document]
@@ -864,14 +875,12 @@ var testDocBasicToml = []byte(`
`) `)
// TODO: Remove nolint once type is used by a test // TODO: Remove nolint once type is used by a test
//
//nolint:deadcode //nolint:deadcode
type testDocCustomTag struct { type testDocCustomTag struct {
Doc testDocBasicsCustomTag `file:"document"` Doc testDocBasicsCustomTag `file:"document"`
} }
// TODO: Remove nolint once type is used by a test // TODO: Remove nolint once type is used by a test
//
//nolint:deadcode //nolint:deadcode
type testDocBasicsCustomTag struct { type testDocBasicsCustomTag struct {
Bool bool `file:"bool_val"` Bool bool `file:"bool_val"`
@@ -884,7 +893,6 @@ type testDocBasicsCustomTag struct {
} }
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,varcheck //nolint:deadcode,varcheck
var testDocCustomTagData = testDocCustomTag{ var testDocCustomTagData = testDocCustomTag{
Doc: testDocBasicsCustomTag{ Doc: testDocBasicsCustomTag{
@@ -948,29 +956,6 @@ func TestUnmarshalMapWithTypedKey(t *testing.T) {
} }
} }
func TestUnmarshalTypeTableHeader(t *testing.T) {
testToml := []byte(`
[test]
a = 1
`)
type header string
var result map[header]map[string]int
err := toml.Unmarshal(testToml, &result)
if err != nil {
t.Errorf("Received unexpected error: %s", err)
return
}
expected := map[header]map[string]int{
"test": map[string]int{"a": 1},
}
if !reflect.DeepEqual(result, expected) {
t.Errorf("Bad unmarshal: expected %v, got %v", expected, result)
}
}
func TestUnmarshalNonPointer(t *testing.T) { func TestUnmarshalNonPointer(t *testing.T) {
a := 1 a := 1
err := toml.Unmarshal([]byte{}, a) err := toml.Unmarshal([]byte{}, a)
@@ -987,7 +972,6 @@ func TestUnmarshalInvalidPointerKind(t *testing.T) {
} }
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused //nolint:deadcode,unused
type testDuration struct { type testDuration struct {
Nanosec time.Duration `toml:"nanosec"` Nanosec time.Duration `toml:"nanosec"`
@@ -1002,7 +986,6 @@ type testDuration struct {
} }
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var testDurationToml = []byte(` var testDurationToml = []byte(`
nanosec = "1ns" nanosec = "1ns"
@@ -1017,7 +1000,6 @@ a_string = "15s"
`) `)
// TODO: Remove nolint once var is used by a test // TODO: Remove nolint once var is used by a test
//
//nolint:deadcode,unused,varcheck //nolint:deadcode,unused,varcheck
var testDurationToml2 = []byte(`a_string = "15s" var testDurationToml2 = []byte(`a_string = "15s"
hour = "1h0m0s" hour = "1h0m0s"
@@ -1031,7 +1013,6 @@ sec = "1s"
`) `)
// TODO: Remove nolint once type is used by a test // TODO: Remove nolint once type is used by a test
//
//nolint:deadcode,unused //nolint:deadcode,unused
type testBadDuration struct { type testBadDuration struct {
Val time.Duration `toml:"val"` Val time.Duration `toml:"val"`
@@ -1973,7 +1954,7 @@ func decoder(doc string) *toml.Decoder {
func strictDecoder(doc string) *toml.Decoder { func strictDecoder(doc string) *toml.Decoder {
d := decoder(doc) d := decoder(doc)
d.DisallowUnknownFields() d.SetStrict(true)
return d return d
} }
+76 -128
View File
@@ -3,7 +3,6 @@ package tracker
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"sync"
"github.com/pelletier/go-toml/v2/internal/ast" "github.com/pelletier/go-toml/v2/internal/ast"
) )
@@ -55,108 +54,69 @@ func (k keyKind) String() string {
type SeenTracker struct { type SeenTracker struct {
entries []entry entries []entry
currentIdx int currentIdx int
} nextID int
var pool sync.Pool
func (s *SeenTracker) reset() {
// Always contains a root element at index 0.
s.currentIdx = 0
if len(s.entries) == 0 {
s.entries = make([]entry, 1, 2)
} else {
s.entries = s.entries[:1]
}
s.entries[0].child = -1
s.entries[0].next = -1
} }
type entry struct { type entry struct {
// Use -1 to indicate no child or no sibling. id int
child int parent int
next int
name []byte name []byte
kind keyKind kind keyKind
explicit bool explicit bool
kv bool
}
// Find the index of the child of parentIdx with key k. Returns -1 if
// it does not exist.
func (s *SeenTracker) find(parentIdx int, k []byte) int {
for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next {
if bytes.Equal(s.entries[i].name, k) {
return i
}
}
return -1
} }
// Remove all descendants of node at position idx. // Remove all descendants of node at position idx.
func (s *SeenTracker) clear(idx int) { func (s *SeenTracker) clear(idx int) {
if idx >= len(s.entries) { p := s.entries[idx].id
return rest := clear(p, s.entries[idx+1:])
} s.entries = s.entries[:idx+1+len(rest)]
for i := s.entries[idx].child; i >= 0; {
next := s.entries[i].next
n := s.entries[0].next
s.entries[0].next = i
s.entries[i].next = n
s.entries[i].name = nil
s.clear(i)
i = next
}
s.entries[idx].child = -1
} }
func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool, kv bool) int { func clear(parentID int, entries []entry) []entry {
e := entry{ for i := 0; i < len(entries); {
child: -1, if entries[i].parent == parentID {
next: s.entries[parentIdx].child, id := entries[i].id
copy(entries[i:], entries[i+1:])
entries = entries[:len(entries)-1]
rest := clear(id, entries[i:])
entries = entries[:i+len(rest)]
} else {
i++
}
}
return entries
}
func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool) int {
parentID := s.id(parentIdx)
idx := len(s.entries)
s.entries = append(s.entries, entry{
id: s.nextID,
parent: parentID,
name: name, name: name,
kind: kind, kind: kind,
explicit: explicit, explicit: explicit,
kv: kv, })
} s.nextID++
var idx int
if s.entries[0].next >= 0 {
idx = s.entries[0].next
s.entries[0].next = s.entries[idx].next
s.entries[idx] = e
} else {
idx = len(s.entries)
s.entries = append(s.entries, e)
}
s.entries[parentIdx].child = idx
return idx return idx
} }
func (s *SeenTracker) setExplicitFlag(parentIdx int) {
for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next {
if s.entries[i].kv {
s.entries[i].explicit = true
s.entries[i].kv = false
}
s.setExplicitFlag(i)
}
}
// CheckExpression takes a top-level node and checks that it does not contain // CheckExpression takes a top-level node and checks that it does not contain
// keys that have been seen in previous calls, and validates that types are // keys that have been seen in previous calls, and validates that types are
// consistent. // consistent.
func (s *SeenTracker) CheckExpression(node *ast.Node) error { func (s *SeenTracker) CheckExpression(node *ast.Node) error {
if s.entries == nil { if s.entries == nil {
s.reset() // Skip ID = 0 to remove the confusion between nodes whose
// parent has id 0 and root nodes (parent id is 0 because it's
// the zero value).
s.nextID = 1
// Start unscoped, so idx is negative.
s.currentIdx = -1
} }
switch node.Kind { switch node.Kind {
case ast.KeyValue: case ast.KeyValue:
return s.checkKeyValue(node) return s.checkKeyValue(s.currentIdx, node)
case ast.Table: case ast.Table:
return s.checkTable(node) return s.checkTable(node)
case ast.ArrayTable: case ast.ArrayTable:
@@ -167,13 +127,9 @@ func (s *SeenTracker) CheckExpression(node *ast.Node) error {
} }
func (s *SeenTracker) checkTable(node *ast.Node) error { func (s *SeenTracker) checkTable(node *ast.Node) error {
if s.currentIdx >= 0 {
s.setExplicitFlag(s.currentIdx)
}
it := node.Key() it := node.Key()
parentIdx := 0 parentIdx := -1
// This code is duplicated in checkArrayTable. This is because factoring // This code is duplicated in checkArrayTable. This is because factoring
// it in a function requires to copy the iterator, or allocate it to the // it in a function requires to copy the iterator, or allocate it to the
@@ -188,12 +144,7 @@ func (s *SeenTracker) checkTable(node *ast.Node) error {
idx := s.find(parentIdx, k) idx := s.find(parentIdx, k)
if idx < 0 { if idx < 0 {
idx = s.create(parentIdx, k, tableKind, false, false) idx = s.create(parentIdx, k, tableKind, false)
} else {
entry := s.entries[idx]
if entry.kind == valueKind {
return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
}
} }
parentIdx = idx parentIdx = idx
} }
@@ -211,7 +162,7 @@ func (s *SeenTracker) checkTable(node *ast.Node) error {
} }
s.entries[idx].explicit = true s.entries[idx].explicit = true
} else { } else {
idx = s.create(parentIdx, k, tableKind, true, false) idx = s.create(parentIdx, k, tableKind, true)
} }
s.currentIdx = idx s.currentIdx = idx
@@ -220,13 +171,9 @@ func (s *SeenTracker) checkTable(node *ast.Node) error {
} }
func (s *SeenTracker) checkArrayTable(node *ast.Node) error { func (s *SeenTracker) checkArrayTable(node *ast.Node) error {
if s.currentIdx >= 0 {
s.setExplicitFlag(s.currentIdx)
}
it := node.Key() it := node.Key()
parentIdx := 0 parentIdx := -1
for it.Next() { for it.Next() {
if it.IsLast() { if it.IsLast() {
@@ -238,14 +185,8 @@ func (s *SeenTracker) checkArrayTable(node *ast.Node) error {
idx := s.find(parentIdx, k) idx := s.find(parentIdx, k)
if idx < 0 { if idx < 0 {
idx = s.create(parentIdx, k, tableKind, false, false) idx = s.create(parentIdx, k, tableKind, false)
} else {
entry := s.entries[idx]
if entry.kind == valueKind {
return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
}
} }
parentIdx = idx parentIdx = idx
} }
@@ -259,7 +200,7 @@ func (s *SeenTracker) checkArrayTable(node *ast.Node) error {
} }
s.clear(idx) s.clear(idx)
} else { } else {
idx = s.create(parentIdx, k, arrayTableKind, true, false) idx = s.create(parentIdx, k, arrayTableKind, true)
} }
s.currentIdx = idx s.currentIdx = idx
@@ -267,8 +208,7 @@ func (s *SeenTracker) checkArrayTable(node *ast.Node) error {
return nil return nil
} }
func (s *SeenTracker) checkKeyValue(node *ast.Node) error { func (s *SeenTracker) checkKeyValue(parentIdx int, node *ast.Node) error {
parentIdx := s.currentIdx
it := node.Key() it := node.Key()
for it.Next() { for it.Next() {
@@ -277,7 +217,7 @@ func (s *SeenTracker) checkKeyValue(node *ast.Node) error {
idx := s.find(parentIdx, k) idx := s.find(parentIdx, k)
if idx < 0 { if idx < 0 {
idx = s.create(parentIdx, k, tableKind, false, true) idx = s.create(parentIdx, k, tableKind, false)
} else { } else {
entry := s.entries[idx] entry := s.entries[idx]
if it.IsLast() { if it.IsLast() {
@@ -298,59 +238,67 @@ func (s *SeenTracker) checkKeyValue(node *ast.Node) error {
switch value.Kind { switch value.Kind {
case ast.InlineTable: case ast.InlineTable:
return s.checkInlineTable(value) return s.checkInlineTable(parentIdx, value)
case ast.Array: case ast.Array:
return s.checkArray(value) return s.checkArray(parentIdx, value)
} }
return nil return nil
} }
func (s *SeenTracker) checkArray(node *ast.Node) error { func (s *SeenTracker) checkArray(parentIdx int, node *ast.Node) error {
set := false
it := node.Children() it := node.Children()
for it.Next() { for it.Next() {
if set {
s.clear(parentIdx)
}
n := it.Node() n := it.Node()
switch n.Kind { switch n.Kind {
case ast.InlineTable: case ast.InlineTable:
err := s.checkInlineTable(n) err := s.checkInlineTable(parentIdx, n)
if err != nil { if err != nil {
return err return err
} }
set = true
case ast.Array: case ast.Array:
err := s.checkArray(n) err := s.checkArray(parentIdx, n)
if err != nil { if err != nil {
return err return err
} }
set = true
} }
} }
return nil return nil
} }
func (s *SeenTracker) checkInlineTable(node *ast.Node) error { func (s *SeenTracker) checkInlineTable(parentIdx int, node *ast.Node) error {
if pool.New == nil {
pool.New = func() interface{} {
return &SeenTracker{}
}
}
s = pool.Get().(*SeenTracker)
s.reset()
it := node.Children() it := node.Children()
for it.Next() { for it.Next() {
n := it.Node() n := it.Node()
err := s.checkKeyValue(n) err := s.checkKeyValue(parentIdx, n)
if err != nil { if err != nil {
return err return err
} }
} }
// As inline tables are self-contained, the tracker does not
// need to retain the details of what they contain. The
// keyValue element that creates the inline table is kept to
// mark the presence of the inline table and prevent
// redefinition of its keys: check* functions cannot walk into
// a value.
pool.Put(s)
return nil return nil
} }
func (s *SeenTracker) id(idx int) int {
if idx >= 0 {
return s.entries[idx].id
}
return 0
}
func (s *SeenTracker) find(parentIdx int, k []byte) int {
parentID := s.id(parentIdx)
for i := parentIdx + 1; i < len(s.entries); i++ {
if s.entries[i].parent == parentID && bytes.Equal(s.entries[i].name, k) {
return i
}
}
return -1
}
-16
View File
@@ -1,16 +0,0 @@
package tracker
import (
"testing"
"unsafe"
"github.com/stretchr/testify/require"
)
func TestEntrySize(t *testing.T) {
// Validate no regression on the size of entry{}. This is a critical bit for
// performance of unmarshaling documents. Should only be increased with care
// and a very good reason.
maxExpectedEntrySize := 48
require.LessOrEqual(t, int(unsafe.Sizeof(entry{})), maxExpectedEntrySize)
}
+87 -317
View File
@@ -11,7 +11,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"time" "time"
"unicode"
) )
// Marshal serializes a Go value as a TOML document. // Marshal serializes a Go value as a TOML document.
@@ -54,7 +53,7 @@ func NewEncoder(w io.Writer) *Encoder {
// This behavior can be controlled on an individual struct field basis with the // This behavior can be controlled on an individual struct field basis with the
// inline tag: // inline tag:
// //
// MyField `toml:",inline"` // MyField `inline:"true"`
func (enc *Encoder) SetTablesInline(inline bool) *Encoder { func (enc *Encoder) SetTablesInline(inline bool) *Encoder {
enc.tablesInline = inline enc.tablesInline = inline
return enc return enc
@@ -65,7 +64,7 @@ func (enc *Encoder) SetTablesInline(inline bool) *Encoder {
// //
// This behavior can be controlled on an individual struct field basis with the multiline tag: // This behavior can be controlled on an individual struct field basis with the multiline tag:
// //
// MyField `multiline:"true"` // MyField `multiline:"true"`
func (enc *Encoder) SetArraysMultiline(multiline bool) *Encoder { func (enc *Encoder) SetArraysMultiline(multiline bool) *Encoder {
enc.arraysMultiline = multiline enc.arraysMultiline = multiline
return enc return enc
@@ -89,7 +88,7 @@ func (enc *Encoder) SetIndentTables(indent bool) *Encoder {
// //
// If v cannot be represented to TOML it returns an error. // If v cannot be represented to TOML it returns an error.
// //
// # Encoding rules // Encoding rules
// //
// A top level slice containing only maps or structs is encoded as [[table // A top level slice containing only maps or structs is encoded as [[table
// array]]. // array]].
@@ -104,52 +103,27 @@ func (enc *Encoder) SetIndentTables(indent bool) *Encoder {
// Intermediate tables are always printed. // Intermediate tables are always printed.
// //
// By default, strings are encoded as literal string, unless they contain either // By default, strings are encoded as literal string, unless they contain either
// a newline character or a single quote. In that case they are emitted as // a newline character or a single quote. In that case they are emitted as quoted
// quoted strings. // strings.
//
// Unsigned integers larger than math.MaxInt64 cannot be encoded. Doing so
// results in an error. This rule exists because the TOML specification only
// requires parsers to support at least the 64 bits integer range. Allowing
// larger numbers would create non-standard TOML documents, which may not be
// readable (at best) by other implementations. To encode such numbers, a
// solution is a custom type that implements encoding.TextMarshaler.
// //
// When encoding structs, fields are encoded in order of definition, with their // When encoding structs, fields are encoded in order of definition, with their
// exact name. // exact name.
// //
// Tables and array tables are separated by empty lines. However, consecutive // Struct tags
// subtables definitions are not. For example:
// //
// [top1] // The following struct tags are available to tweak encoding on a per-field
// basis:
// //
// [top2] // toml:"foo"
// [top2.child1] // Changes the name of the key to use for the field to foo.
// //
// [[array]] // multiline:"true"
// When the field contains a string, it will be emitted as a quoted
// multi-line TOML string.
// //
// [[array]] // inline:"true"
// [array.child2] // When the field would normally be encoded as a table, it is instead
// // encoded as an inline table.
// # Struct tags
//
// The encoding of each public struct field can be customized by the format
// string in the "toml" key of the struct field's tag. This follows
// encoding/json's convention. The format string starts with the name of the
// field, optionally followed by a comma-separated list of options. The name may
// be empty in order to provide options without overriding the default name.
//
// The "multiline" option emits strings as quoted multi-line TOML strings. It
// has no effect on fields that would not be encoded as strings.
//
// The "inline" option turns fields that would be emitted as tables into inline
// tables instead. It has no effect on other fields.
//
// The "omitempty" option prevents empty values or groups from being emitted.
//
// In addition to the "toml" tag struct tag, a "comment" tag can be used to emit
// a TOML comment before the value being annotated. Comments are ignored inside
// inline tables. For array tables, the comment is only present before the first
// element of the array.
func (enc *Encoder) Encode(v interface{}) error { func (enc *Encoder) Encode(v interface{}) error {
var ( var (
b []byte b []byte
@@ -177,8 +151,6 @@ func (enc *Encoder) Encode(v interface{}) error {
type valueOptions struct { type valueOptions struct {
multiline bool multiline bool
omitempty bool
comment string
} }
type encoderCtx struct { type encoderCtx struct {
@@ -228,29 +200,16 @@ func (ctx *encoderCtx) isRoot() bool {
return len(ctx.parentKey) == 0 && !ctx.hasKey return len(ctx.parentKey) == 0 && !ctx.hasKey
} }
//nolint:cyclop,funlen
func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
i := v.Interface() if !v.IsZero() {
i, ok := v.Interface().(time.Time)
switch x := i.(type) { if ok {
case time.Time: return i.AppendFormat(b, time.RFC3339), nil
if x.Nanosecond() > 0 {
return x.AppendFormat(b, time.RFC3339Nano), nil
} }
return x.AppendFormat(b, time.RFC3339), nil
case LocalTime:
return append(b, x.String()...), nil
case LocalDate:
return append(b, x.String()...), nil
case LocalDateTime:
return append(b, x.String()...), nil
} }
hasTextMarshaler := v.Type().Implements(textMarshalerType) if v.Type().Implements(textMarshalerType) {
if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) {
if !hasTextMarshaler {
v = v.Addr()
}
if ctx.isRoot() { if ctx.isRoot() {
return nil, fmt.Errorf("toml: type %s implementing the TextMarshaler interface cannot be a root element", v.Type()) return nil, fmt.Errorf("toml: type %s implementing the TextMarshaler interface cannot be a root element", v.Type())
} }
@@ -290,31 +249,16 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e
case reflect.String: case reflect.String:
b = enc.encodeString(b, v.String(), ctx.options) b = enc.encodeString(b, v.String(), ctx.options)
case reflect.Float32: case reflect.Float32:
f := v.Float() if math.Trunc(v.Float()) == v.Float() {
b = strconv.AppendFloat(b, v.Float(), 'f', 1, 32)
if math.IsNaN(f) {
b = append(b, "nan"...)
} else if f > math.MaxFloat32 {
b = append(b, "inf"...)
} else if f < -math.MaxFloat32 {
b = append(b, "-inf"...)
} else if math.Trunc(f) == f {
b = strconv.AppendFloat(b, f, 'f', 1, 32)
} else { } else {
b = strconv.AppendFloat(b, f, 'f', -1, 32) b = strconv.AppendFloat(b, v.Float(), 'f', -1, 32)
} }
case reflect.Float64: case reflect.Float64:
f := v.Float() if math.Trunc(v.Float()) == v.Float() {
if math.IsNaN(f) { b = strconv.AppendFloat(b, v.Float(), 'f', 1, 64)
b = append(b, "nan"...)
} else if f > math.MaxFloat64 {
b = append(b, "inf"...)
} else if f < -math.MaxFloat64 {
b = append(b, "-inf"...)
} else if math.Trunc(f) == f {
b = strconv.AppendFloat(b, f, 'f', 1, 64)
} else { } else {
b = strconv.AppendFloat(b, f, 'f', -1, 64) b = strconv.AppendFloat(b, v.Float(), 'f', -1, 64)
} }
case reflect.Bool: case reflect.Bool:
if v.Bool() { if v.Bool() {
@@ -323,11 +267,7 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e
b = append(b, "false"...) b = append(b, "false"...)
} }
case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint: case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint:
x := v.Uint() b = strconv.AppendUint(b, v.Uint(), 10)
if x > uint64(math.MaxInt64) {
return nil, fmt.Errorf("toml: not encoding uint (%d) greater than max int64 (%d)", x, int64(math.MaxInt64))
}
b = strconv.AppendUint(b, x, 10)
case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int:
b = strconv.AppendInt(b, v.Int(), 10) b = strconv.AppendInt(b, v.Int(), 10)
default: default:
@@ -346,19 +286,19 @@ func isNil(v reflect.Value) bool {
} }
} }
func shouldOmitEmpty(options valueOptions, v reflect.Value) bool {
return options.omitempty && isEmptyValue(v)
}
func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) { func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) {
var err error var err error
if !ctx.inline { if !ctx.hasKey {
b = enc.encodeComment(ctx.indent, options.comment, b) panic("caller of encodeKv should have set the key in the context")
}
b = enc.indent(ctx.indent, b)
b, err = enc.encodeKey(b, ctx.key)
if err != nil {
return nil, err
} }
b = enc.indent(ctx.indent, b)
b = enc.encodeKey(b, ctx.key)
b = append(b, " = "...) b = append(b, " = "...)
// create a copy of the context because the value of a KV shouldn't // create a copy of the context because the value of a KV shouldn't
@@ -376,54 +316,6 @@ func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v r
return b, nil return b, nil
} }
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Struct:
return isEmptyStruct(v)
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func isEmptyStruct(v reflect.Value) bool {
// TODO: merge with walkStruct and cache.
typ := v.Type()
for i := 0; i < typ.NumField(); i++ {
fieldType := typ.Field(i)
// only consider exported fields
if fieldType.PkgPath != "" {
continue
}
tag := fieldType.Tag.Get("toml")
// special field name to skip field
if tag == "-" {
continue
}
f := v.Field(i)
if !isEmptyValue(f) {
return false
}
}
return true
}
const literalQuote = '\'' const literalQuote = '\''
func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byte { func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byte {
@@ -435,13 +327,7 @@ func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byt
} }
func needsQuoting(v string) bool { func needsQuoting(v string) bool {
// TODO: vectorize return strings.ContainsAny(v, "'\b\f\n\r\t")
for _, b := range []byte(v) {
if b == '\'' || b == '\r' || b == '\n' || invalidAscii(b) {
return true
}
}
return false
} }
// caller should have checked that the string does not contain new lines or ' . // caller should have checked that the string does not contain new lines or ' .
@@ -453,6 +339,7 @@ func (enc *Encoder) encodeLiteralString(b []byte, v string) []byte {
return b return b
} }
//nolint:cyclop
func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byte { func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byte {
stringQuote := `"` stringQuote := `"`
@@ -512,7 +399,7 @@ func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byt
return b return b
} }
// caller should have checked that the string is in A-Z / a-z / 0-9 / - / _ . // called should have checked that the string is in A-Z / a-z / 0-9 / - / _ .
func (enc *Encoder) encodeUnquotedKey(b []byte, v string) []byte { func (enc *Encoder) encodeUnquotedKey(b []byte, v string) []byte {
return append(b, v...) return append(b, v...)
} }
@@ -522,17 +409,24 @@ func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error)
return b, nil return b, nil
} }
b = enc.encodeComment(ctx.indent, ctx.options.comment, b)
b = enc.indent(ctx.indent, b) b = enc.indent(ctx.indent, b)
b = append(b, '[') b = append(b, '[')
b = enc.encodeKey(b, ctx.parentKey[0]) var err error
b, err = enc.encodeKey(b, ctx.parentKey[0])
if err != nil {
return nil, err
}
for _, k := range ctx.parentKey[1:] { for _, k := range ctx.parentKey[1:] {
b = append(b, '.') b = append(b, '.')
b = enc.encodeKey(b, k)
b, err = enc.encodeKey(b, k)
if err != nil {
return nil, err
}
} }
b = append(b, "]\n"...) b = append(b, "]\n"...)
@@ -541,19 +435,19 @@ func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error)
} }
//nolint:cyclop //nolint:cyclop
func (enc *Encoder) encodeKey(b []byte, k string) []byte { func (enc *Encoder) encodeKey(b []byte, k string) ([]byte, error) {
needsQuotation := false needsQuotation := false
cannotUseLiteral := false cannotUseLiteral := false
if len(k) == 0 {
return append(b, "''"...)
}
for _, c := range k { for _, c := range k {
if (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || c == '_' { if (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || c == '_' {
continue continue
} }
if c == '\n' {
return nil, fmt.Errorf("toml: new line characters in keys are not supported")
}
if c == literalQuote { if c == literalQuote {
cannotUseLiteral = true cannotUseLiteral = true
} }
@@ -561,17 +455,13 @@ func (enc *Encoder) encodeKey(b []byte, k string) []byte {
needsQuotation = true needsQuotation = true
} }
if needsQuotation && needsQuoting(k) {
cannotUseLiteral = true
}
switch { switch {
case cannotUseLiteral: case cannotUseLiteral:
return enc.encodeQuotedString(false, b, k) return enc.encodeQuotedString(false, b, k), nil
case needsQuotation: case needsQuotation:
return enc.encodeLiteralString(b, k) return enc.encodeLiteralString(b, k), nil
default: default:
return enc.encodeUnquotedKey(b, k) return enc.encodeUnquotedKey(b, k), nil
} }
} }
@@ -625,26 +515,18 @@ type table struct {
} }
func (t *table) pushKV(k string, v reflect.Value, options valueOptions) { func (t *table) pushKV(k string, v reflect.Value, options valueOptions) {
for _, e := range t.kvs {
if e.Key == k {
return
}
}
t.kvs = append(t.kvs, entry{Key: k, Value: v, Options: options}) t.kvs = append(t.kvs, entry{Key: k, Value: v, Options: options})
} }
func (t *table) pushTable(k string, v reflect.Value, options valueOptions) { func (t *table) pushTable(k string, v reflect.Value, options valueOptions) {
for _, e := range t.tables {
if e.Key == k {
return
}
}
t.tables = append(t.tables, entry{Key: k, Value: v, Options: options}) t.tables = append(t.tables, entry{Key: k, Value: v, Options: options})
} }
func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { func (enc *Encoder) encodeStruct(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
// TODO: cache this var t table
//nolint:godox
// TODO: cache this?
typ := v.Type() typ := v.Type()
for i := 0; i < typ.NumField(); i++ { for i := 0; i < typ.NumField(); i++ {
fieldType := typ.Field(i) fieldType := typ.Field(i)
@@ -654,130 +536,45 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) {
continue continue
} }
tag := fieldType.Tag.Get("toml") k, ok := fieldType.Tag.Lookup("toml")
if !ok {
k = fieldType.Name
}
// special field name to skip field // special field name to skip field
if tag == "-" { if k == "-" {
continue continue
} }
k, opts := parseTag(tag)
if !isValidName(k) {
k = ""
}
f := v.Field(i) f := v.Field(i)
if k == "" {
if fieldType.Anonymous {
if fieldType.Type.Kind() == reflect.Struct {
walkStruct(ctx, t, f)
}
continue
} else {
k = fieldType.Name
}
}
if isNil(f) { if isNil(f) {
continue continue
} }
options := valueOptions{ options := valueOptions{
multiline: opts.multiline, multiline: fieldBoolTag(fieldType, "multiline"),
omitempty: opts.omitempty,
comment: fieldType.Tag.Get("comment"),
} }
if opts.inline || !willConvertToTableOrArrayTable(ctx, f) { inline := fieldBoolTag(fieldType, "inline")
if inline || !willConvertToTableOrArrayTable(ctx, f) {
t.pushKV(k, f, options) t.pushKV(k, f, options)
} else { } else {
t.pushTable(k, f, options) t.pushTable(k, f, options)
} }
} }
}
func (enc *Encoder) encodeStruct(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
var t table
walkStruct(ctx, &t, v)
return enc.encodeTable(b, ctx, t) return enc.encodeTable(b, ctx, t)
} }
func (enc *Encoder) encodeComment(indent int, comment string, b []byte) []byte { func fieldBoolTag(field reflect.StructField, tag string) bool {
for len(comment) > 0 { x, ok := field.Tag.Lookup(tag)
var line string
idx := strings.IndexByte(comment, '\n') return ok && x == "true"
if idx >= 0 {
line = comment[:idx]
comment = comment[idx+1:]
} else {
line = comment
comment = ""
}
b = enc.indent(indent, b)
b = append(b, "# "...)
b = append(b, line...)
b = append(b, '\n')
}
return b
}
func isValidName(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
case !unicode.IsLetter(c) && !unicode.IsDigit(c):
return false
}
}
return true
}
type tagOptions struct {
multiline bool
inline bool
omitempty bool
}
func parseTag(tag string) (string, tagOptions) {
opts := tagOptions{}
idx := strings.Index(tag, ",")
if idx == -1 {
return tag, opts
}
raw := tag[idx+1:]
tag = string(tag[:idx])
for raw != "" {
var o string
i := strings.Index(raw, ",")
if i >= 0 {
o, raw = raw[:i], raw[i+1:]
} else {
o, raw = raw, ""
}
switch o {
case "multiline":
opts.multiline = true
case "inline":
opts.inline = true
case "omitempty":
opts.omitempty = true
}
}
return tag, opts
} }
//nolint:cyclop
func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, error) { func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, error) {
var err error var err error
@@ -799,13 +596,7 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro
} }
ctx.skipTableHeader = false ctx.skipTableHeader = false
hasNonEmptyKV := false
for _, kv := range t.kvs { for _, kv := range t.kvs {
if shouldOmitEmpty(kv.Options, kv.Value) {
continue
}
hasNonEmptyKV = true
ctx.setKey(kv.Key) ctx.setKey(kv.Key)
b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value) b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value)
@@ -816,20 +607,7 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro
b = append(b, '\n') b = append(b, '\n')
} }
first := true
for _, table := range t.tables { for _, table := range t.tables {
if shouldOmitEmpty(table.Options, table.Value) {
continue
}
if first {
first = false
if hasNonEmptyKV {
b = append(b, '\n')
}
} else {
b = append(b, "\n"...)
}
ctx.setKey(table.Key) ctx.setKey(table.Key)
ctx.options = table.Options ctx.options = table.Options
@@ -838,6 +616,8 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro
if err != nil { if err != nil {
return nil, err return nil, err
} }
b = append(b, '\n')
} }
return b, nil return b, nil
@@ -850,10 +630,6 @@ func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte
first := true first := true
for _, kv := range t.kvs { for _, kv := range t.kvs {
if shouldOmitEmpty(kv.Options, kv.Value) {
continue
}
if first { if first {
first = false first = false
} else { } else {
@@ -869,7 +645,7 @@ func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte
} }
if len(t.tables) > 0 { if len(t.tables) > 0 {
panic("inline table cannot contain nested tables, only key-values") panic("inline table cannot contain nested tables, online key-values")
} }
b = append(b, "}"...) b = append(b, "}"...)
@@ -881,7 +657,7 @@ func willConvertToTable(ctx encoderCtx, v reflect.Value) bool {
if !v.IsValid() { if !v.IsValid() {
return false return false
} }
if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { if v.Type() == timeType || v.Type().Implements(textMarshalerType) {
return false return false
} }
@@ -903,9 +679,6 @@ func willConvertToTable(ctx encoderCtx, v reflect.Value) bool {
} }
func willConvertToTableOrArrayTable(ctx encoderCtx, v reflect.Value) bool { func willConvertToTableOrArrayTable(ctx encoderCtx, v reflect.Value) bool {
if ctx.insideKv {
return false
}
t := v.Type() t := v.Type()
if t.Kind() == reflect.Interface { if t.Kind() == reflect.Interface {
@@ -951,6 +724,7 @@ func (enc *Encoder) encodeSlice(b []byte, ctx encoderCtx, v reflect.Value) ([]by
func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
ctx.shiftKey() ctx.shiftKey()
var err error
scratch := make([]byte, 0, 64) scratch := make([]byte, 0, 64)
scratch = append(scratch, "[["...) scratch = append(scratch, "[["...)
@@ -959,22 +733,18 @@ func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.
scratch = append(scratch, '.') scratch = append(scratch, '.')
} }
scratch = enc.encodeKey(scratch, k) scratch, err = enc.encodeKey(scratch, k)
if err != nil {
return nil, err
}
} }
scratch = append(scratch, "]]\n"...) scratch = append(scratch, "]]\n"...)
ctx.skipTableHeader = true ctx.skipTableHeader = true
b = enc.encodeComment(ctx.indent, ctx.options.comment, b)
for i := 0; i < v.Len(); i++ { for i := 0; i < v.Len(); i++ {
if i != 0 {
b = append(b, "\n"...)
}
b = append(b, scratch...) b = append(b, scratch...)
var err error
b, err = enc.encode(b, ctx, v.Index(i)) b, err = enc.encode(b, ctx, v.Index(i))
if err != nil { if err != nil {
return nil, err return nil, err
+105 -474
View File
@@ -4,28 +4,20 @@ import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"math"
"math/big"
"strings" "strings"
"testing" "testing"
"time"
"github.com/pelletier/go-toml/v2" "github.com/pelletier/go-toml/v2"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
//nolint:funlen
func TestMarshal(t *testing.T) { func TestMarshal(t *testing.T) {
someInt := 42 someInt := 42
type structInline struct { type structInline struct {
A interface{} `toml:",inline"` A interface{} `inline:"true"`
}
type comments struct {
One int
Two int `comment:"Before kv"`
Three []int `comment:"Before array"`
} }
examples := []struct { examples := []struct {
@@ -39,21 +31,21 @@ func TestMarshal(t *testing.T) {
v: map[string]string{ v: map[string]string{
"hello": "world", "hello": "world",
}, },
expected: "hello = 'world'\n", expected: "hello = 'world'",
}, },
{ {
desc: "map with new line in key", desc: "map with new line in key",
v: map[string]string{ v: map[string]string{
"hel\nlo": "world", "hel\nlo": "world",
}, },
expected: "\"hel\\nlo\" = 'world'\n", err: true,
}, },
{ {
desc: `map with " in key`, desc: `map with " in key`,
v: map[string]string{ v: map[string]string{
`hel"lo`: "world", `hel"lo`: "world",
}, },
expected: "'hel\"lo' = 'world'\n", expected: `'hel"lo' = 'world'`,
}, },
{ {
desc: "map in map and string", desc: "map in map and string",
@@ -62,9 +54,9 @@ func TestMarshal(t *testing.T) {
"hello": "world", "hello": "world",
}, },
}, },
expected: `[table] expected: `
hello = 'world' [table]
`, hello = 'world'`,
}, },
{ {
desc: "map in map in map and string", desc: "map in map in map and string",
@@ -75,10 +67,10 @@ hello = 'world'
}, },
}, },
}, },
expected: `[this] expected: `
[this]
[this.is] [this.is]
a = 'test' a = 'test'`,
`,
}, },
{ {
desc: "map in map in map and string with values", desc: "map in map in map and string with values",
@@ -90,20 +82,18 @@ a = 'test'
"also": "that", "also": "that",
}, },
}, },
expected: `[this] expected: `
[this]
also = 'that' also = 'that'
[this.is] [this.is]
a = 'test' a = 'test'`,
`,
}, },
{ {
desc: "simple string array", desc: "simple string array",
v: map[string][]string{ v: map[string][]string{
"array": {"one", "two", "three"}, "array": {"one", "two", "three"},
}, },
expected: `array = ['one', 'two', 'three'] expected: `array = ['one', 'two', 'three']`,
`,
}, },
{ {
desc: "empty string array", desc: "empty string array",
@@ -120,16 +110,14 @@ a = 'test'
v: map[string][][]string{ v: map[string][][]string{
"array": {{"one", "two"}, {"three"}}, "array": {{"one", "two"}, {"three"}},
}, },
expected: `array = [['one', 'two'], ['three']] expected: `array = [['one', 'two'], ['three']]`,
`,
}, },
{ {
desc: "mixed strings and nested string arrays", desc: "mixed strings and nested string arrays",
v: map[string][]interface{}{ v: map[string][]interface{}{
"array": {"a string", []string{"one", "two"}, "last"}, "array": {"a string", []string{"one", "two"}, "last"},
}, },
expected: `array = ['a string', ['one', 'two'], 'last'] expected: `array = ['a string', ['one', 'two'], 'last']`,
`,
}, },
{ {
desc: "array of maps", desc: "array of maps",
@@ -139,9 +127,9 @@ a = 'test'
{"map2.1": "v2.1"}, {"map2.1": "v2.1"},
}, },
}, },
expected: `[[top]] expected: `
[[top]]
'map1.1' = 'v1.1' 'map1.1' = 'v1.1'
[[top]] [[top]]
'map2.1' = 'v2.1' 'map2.1' = 'v2.1'
`, `,
@@ -152,9 +140,9 @@ a = 'test'
"key1": "value1", "key1": "value1",
"key2": "value2", "key2": "value2",
}, },
expected: `key1 = 'value1' expected: `
key2 = 'value2' key1 = 'value1'
`, key2 = 'value2'`,
}, },
{ {
desc: "simple struct", desc: "simple struct",
@@ -163,8 +151,7 @@ key2 = 'value2'
}{ }{
A: "foo", A: "foo",
}, },
expected: `A = 'foo' expected: `A = 'foo'`,
`,
}, },
{ {
desc: "one level of structs within structs", desc: "one level of structs within structs",
@@ -179,7 +166,8 @@ key2 = 'value2'
K2: "v2", K2: "v2",
}, },
}, },
expected: `[A] expected: `
[A]
K1 = 'v1' K1 = 'v1'
K2 = 'v2' K2 = 'v2'
`, `,
@@ -194,10 +182,10 @@ K2 = 'v2'
}, },
}, },
}, },
expected: `[root] expected: `
[root]
[[root.nested]] [[root.nested]]
name = 'Bob' name = 'Bob'
[[root.nested]] [[root.nested]]
name = 'Alice' name = 'Alice'
`, `,
@@ -205,72 +193,67 @@ name = 'Alice'
{ {
desc: "string escapes", desc: "string escapes",
v: map[string]interface{}{ v: map[string]interface{}{
"a": "'\b\f\r\t\"\\", "a": `'"\`,
}, },
expected: `a = "'\b\f\r\t\"\\" expected: `a = "'\"\\"`,
`,
}, },
{ {
desc: "string utf8 low", desc: "string utf8 low",
v: map[string]interface{}{ v: map[string]interface{}{
"a": "'Ę", "a": "'Ę",
}, },
expected: `a = "'Ę" expected: `a = "'Ę"`,
`,
}, },
{ {
desc: "string utf8 low 2", desc: "string utf8 low 2",
v: map[string]interface{}{ v: map[string]interface{}{
"a": "'\u10A85", "a": "'\u10A85",
}, },
expected: "a = \"'\u10A85\"\n", expected: "a = \"'\u10A85\"",
}, },
{ {
desc: "string utf8 low 2", desc: "string utf8 low 2",
v: map[string]interface{}{ v: map[string]interface{}{
"a": "'\u10A85", "a": "'\u10A85",
}, },
expected: "a = \"'\u10A85\"\n", expected: "a = \"'\u10A85\"",
}, },
{ {
desc: "emoji", desc: "emoji",
v: map[string]interface{}{ v: map[string]interface{}{
"a": "'😀", "a": "'😀",
}, },
expected: "a = \"'😀\"\n", expected: "a = \"'😀\"",
}, },
{ {
desc: "control char", desc: "control char",
v: map[string]interface{}{ v: map[string]interface{}{
"a": "'\u001A", "a": "'\u001A",
}, },
expected: `a = "'\u001A" expected: `a = "'\u001A"`,
`,
}, },
{ {
desc: "multi-line string", desc: "multi-line string",
v: map[string]interface{}{ v: map[string]interface{}{
"a": "hello\nworld", "a": "hello\nworld",
}, },
expected: `a = "hello\nworld" expected: `a = "hello\nworld"`,
`,
}, },
{ {
desc: "multi-line forced", desc: "multi-line forced",
v: struct { v: struct {
A string `toml:",multiline"` A string `multiline:"true"`
}{ }{
A: "hello\nworld", A: "hello\nworld",
}, },
expected: `A = """ expected: `A = """
hello hello
world""" world"""`,
`,
}, },
{ {
desc: "inline field", desc: "inline field",
v: struct { v: struct {
A map[string]string `toml:",inline"` A map[string]string `inline:"true"`
B map[string]string B map[string]string
}{ }{
A: map[string]string{ A: map[string]string{
@@ -280,8 +263,8 @@ world"""
"isinline": "no", "isinline": "no",
}, },
}, },
expected: `A = {isinline = 'yes'} expected: `
A = {isinline = 'yes'}
[B] [B]
isinline = 'no' isinline = 'no'
`, `,
@@ -289,13 +272,14 @@ isinline = 'no'
{ {
desc: "mutiline array int", desc: "mutiline array int",
v: struct { v: struct {
A []int `toml:",multiline"` A []int `multiline:"true"`
B []int B []int
}{ }{
A: []int{1, 2, 3, 4}, A: []int{1, 2, 3, 4},
B: []int{1, 2, 3, 4}, B: []int{1, 2, 3, 4},
}, },
expected: `A = [ expected: `
A = [
1, 1,
2, 2,
3, 3,
@@ -307,11 +291,12 @@ B = [1, 2, 3, 4]
{ {
desc: "mutiline array in array", desc: "mutiline array in array",
v: struct { v: struct {
A [][]int `toml:",multiline"` A [][]int `multiline:"true"`
}{ }{
A: [][]int{{1, 2}, {3, 4}}, A: [][]int{{1, 2}, {3, 4}},
}, },
expected: `A = [ expected: `
A = [
[1, 2], [1, 2],
[3, 4] [3, 4]
] ]
@@ -336,8 +321,7 @@ B = [1, 2, 3, 4]
}{ }{
A: []*int{nil}, A: []*int{nil},
}, },
expected: `A = [0] expected: `A = [0]`,
`,
}, },
{ {
desc: "nil pointer in slice uses zero value", desc: "nil pointer in slice uses zero value",
@@ -346,8 +330,7 @@ B = [1, 2, 3, 4]
}{ }{
A: []*int{nil}, A: []*int{nil},
}, },
expected: `A = [0] expected: `A = [0]`,
`,
}, },
{ {
desc: "pointer in slice", desc: "pointer in slice",
@@ -356,8 +339,7 @@ B = [1, 2, 3, 4]
}{ }{
A: []*int{&someInt}, A: []*int{&someInt},
}, },
expected: `A = [42] expected: `A = [42]`,
`,
}, },
{ {
desc: "inline table in inline table", desc: "inline table in inline table",
@@ -368,34 +350,30 @@ B = [1, 2, 3, 4]
}, },
}, },
}, },
expected: `A = {A = {A = 'hello'}} expected: `A = {A = {A = 'hello'}}`,
`,
}, },
{ {
desc: "empty slice in map", desc: "empty slice in map",
v: map[string][]string{ v: map[string][]string{
"a": {}, "a": {},
}, },
expected: `a = [] expected: `a = []`,
`,
}, },
{ {
desc: "map in slice", desc: "map in slice",
v: map[string][]map[string]string{ v: map[string][]map[string]string{
"a": {{"hello": "world"}}, "a": {{"hello": "world"}},
}, },
expected: `[[a]] expected: `
hello = 'world' [[a]]
`, hello = 'world'`,
}, },
{ {
desc: "newline in map in slice", desc: "newline in map in slice",
v: map[string][]map[string]string{ v: map[string][]map[string]string{
"a\n": {{"hello": "world"}}, "a\n": {{"hello": "world"}},
}, },
expected: `[["a\n"]] err: true,
hello = 'world'
`,
}, },
{ {
desc: "newline in map in slice", desc: "newline in map in slice",
@@ -411,8 +389,7 @@ hello = 'world'
}{ }{
A: []struct{}{}, A: []struct{}{},
}, },
expected: `A = [] expected: `A = []`,
`,
}, },
{ {
desc: "nil field is ignored", desc: "nil field is ignored",
@@ -432,8 +409,7 @@ hello = 'world'
Public: "shown", Public: "shown",
private: "hidden", private: "hidden",
}, },
expected: `Public = 'shown' expected: `Public = 'shown'`,
`,
}, },
{ {
desc: "fields tagged - are ignored", desc: "fields tagged - are ignored",
@@ -457,8 +433,7 @@ hello = 'world'
v: map[string]interface{}{ v: map[string]interface{}{
"hello\nworld": 42, "hello\nworld": 42,
}, },
expected: `"hello\nworld" = 42 err: true,
`,
}, },
{ {
desc: "new line in parent of nested table key", desc: "new line in parent of nested table key",
@@ -467,9 +442,7 @@ hello = 'world'
"inner": 42, "inner": 42,
}, },
}, },
expected: `["hello\nworld"] err: true,
inner = 42
`,
}, },
{ {
desc: "new line in nested table key", desc: "new line in nested table key",
@@ -480,10 +453,7 @@ inner = 42
}, },
}, },
}, },
expected: `[parent] err: true,
[parent."in\ner"]
foo = 42
`,
}, },
{ {
desc: "invalid map key", desc: "invalid map key",
@@ -499,39 +469,6 @@ foo = 42
}, },
err: true, err: true,
}, },
{
desc: "time",
v: struct {
T time.Time
}{
T: time.Time{},
},
expected: `T = 0001-01-01T00:00:00Z
`,
},
{
desc: "time nano",
v: struct {
T time.Time
}{
T: time.Date(1979, time.May, 27, 0, 32, 0, 999999000, time.UTC),
},
expected: `T = 1979-05-27T00:32:00.999999Z
`,
},
{
desc: "bool",
v: struct {
A bool
B bool
}{
A: false,
B: true,
},
expected: `A = false
B = true
`,
},
{ {
desc: "numbers", desc: "numbers",
v: struct { v: struct {
@@ -546,7 +483,6 @@ B = true
I int16 I int16
J int8 J int8
K int K int
L float64
}{ }{
A: 1.1, A: 1.1,
B: 42, B: 42,
@@ -559,9 +495,9 @@ B = true
I: 42, I: 42,
J: 42, J: 42,
K: 42, K: 42,
L: 2.2,
}, },
expected: `A = 1.1 expected: `
A = 1.1
B = 42 B = 42
C = 42 C = 42
D = 42 D = 42
@@ -571,29 +507,7 @@ G = 42
H = 42 H = 42
I = 42 I = 42
J = 42 J = 42
K = 42 K = 42`,
L = 2.2
`,
},
{
desc: "comments",
v: struct {
Table comments `comment:"Before table"`
}{
Table: comments{
One: 1,
Two: 2,
Three: []int{1, 2, 3},
},
},
expected: `# Before table
[Table]
One = 1
# Before kv
Two = 2
# Before array
Three = [1, 2, 3]
`,
}, },
} }
@@ -608,7 +522,7 @@ Three = [1, 2, 3]
} }
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, e.expected, string(b)) equalStringsIgnoreNewlines(t, e.expected, string(b))
// make sure the output is always valid TOML // make sure the output is always valid TOML
defaultMap := map[string]interface{}{} defaultMap := map[string]interface{}{}
@@ -683,31 +597,10 @@ func testWithFlags(t *testing.T, flags int, setters flagsSetters, testfn func(t
} }
} }
func TestMarshalFloats(t *testing.T) { func equalStringsIgnoreNewlines(t *testing.T, expected string, actual string) {
v := map[string]float32{ t.Helper()
"nan": float32(math.NaN()), cutset := "\n"
"+inf": float32(math.Inf(1)), assert.Equal(t, strings.Trim(expected, cutset), strings.Trim(actual, cutset))
"-inf": float32(math.Inf(-1)),
}
expected := `'+inf' = inf
-inf = -inf
nan = nan
`
actual, err := toml.Marshal(v)
require.NoError(t, err)
require.Equal(t, expected, string(actual))
v64 := map[string]float64{
"nan": math.NaN(),
"+inf": math.Inf(1),
"-inf": math.Inf(-1),
}
actual, err = toml.Marshal(v64)
require.NoError(t, err)
require.Equal(t, expected, string(actual))
} }
//nolint:funlen //nolint:funlen
@@ -722,8 +615,7 @@ func TestMarshalIndentTables(t *testing.T) {
v: map[string]interface{}{ v: map[string]interface{}{
"foo": "bar", "foo": "bar",
}, },
expected: `foo = 'bar' expected: `foo = 'bar'`,
`,
}, },
{ {
desc: "one level table", desc: "one level table",
@@ -733,7 +625,8 @@ func TestMarshalIndentTables(t *testing.T) {
"two": "value2", "two": "value2",
}, },
}, },
expected: `[foo] expected: `
[foo]
one = 'value1' one = 'value1'
two = 'value2' two = 'value2'
`, `,
@@ -749,11 +642,10 @@ func TestMarshalIndentTables(t *testing.T) {
}, },
}, },
}, },
expected: `root = 'value0' expected: `
root = 'value0'
[level1] [level1]
one = 'value1' one = 'value1'
[level1.level2] [level1.level2]
two = 'value2' two = 'value2'
`, `,
@@ -768,7 +660,7 @@ func TestMarshalIndentTables(t *testing.T) {
enc.SetIndentTables(true) enc.SetIndentTables(true)
err := enc.Encode(e.v) err := enc.Encode(e.v)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, e.expected, buf.String()) equalStringsIgnoreNewlines(t, e.expected, buf.String())
}) })
} }
} }
@@ -813,7 +705,7 @@ func TestMarshalTextMarshaler(t *testing.T) {
m := map[string]interface{}{"a": &customTextMarshaler{value: 2}} m := map[string]interface{}{"a": &customTextMarshaler{value: 2}}
r, err := toml.Marshal(m) r, err := toml.Marshal(m)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "a = '::2'\n", string(r)) equalStringsIgnoreNewlines(t, "a = '::2'", string(r))
} }
type brokenWriter struct{} type brokenWriter struct{}
@@ -836,63 +728,10 @@ func TestEncoderSetIndentSymbol(t *testing.T) {
enc.SetIndentSymbol(">>>") enc.SetIndentSymbol(">>>")
err := enc.Encode(map[string]map[string]string{"parent": {"hello": "world"}}) err := enc.Encode(map[string]map[string]string{"parent": {"hello": "world"}})
require.NoError(t, err) require.NoError(t, err)
expected := `[parent] expected := `
>>>hello = 'world' [parent]
` >>>hello = 'world'`
assert.Equal(t, expected, w.String()) equalStringsIgnoreNewlines(t, expected, w.String())
}
func TestEncoderOmitempty(t *testing.T) {
type doc struct {
String string `toml:",omitempty,multiline"`
Bool bool `toml:",omitempty,multiline"`
Int int `toml:",omitempty,multiline"`
Int8 int8 `toml:",omitempty,multiline"`
Int16 int16 `toml:",omitempty,multiline"`
Int32 int32 `toml:",omitempty,multiline"`
Int64 int64 `toml:",omitempty,multiline"`
Uint uint `toml:",omitempty,multiline"`
Uint8 uint8 `toml:",omitempty,multiline"`
Uint16 uint16 `toml:",omitempty,multiline"`
Uint32 uint32 `toml:",omitempty,multiline"`
Uint64 uint64 `toml:",omitempty,multiline"`
Float32 float32 `toml:",omitempty,multiline"`
Float64 float64 `toml:",omitempty,multiline"`
MapNil map[string]string `toml:",omitempty,multiline"`
Slice []string `toml:",omitempty,multiline"`
Ptr *string `toml:",omitempty,multiline"`
Iface interface{} `toml:",omitempty,multiline"`
Struct struct{} `toml:",omitempty,multiline"`
}
d := doc{}
b, err := toml.Marshal(d)
require.NoError(t, err)
expected := ``
assert.Equal(t, expected, string(b))
}
func TestEncoderTagFieldName(t *testing.T) {
type doc struct {
String string `toml:"hello"`
OkSym string `toml:"#"`
Bad string `toml:"\"`
}
d := doc{String: "world"}
b, err := toml.Marshal(d)
require.NoError(t, err)
expected := `hello = 'world'
'#' = ''
Bad = ''
`
assert.Equal(t, expected, string(b))
} }
func TestIssue436(t *testing.T) { func TestIssue436(t *testing.T) {
@@ -906,11 +745,12 @@ func TestIssue436(t *testing.T) {
err = toml.NewEncoder(&buf).Encode(v) err = toml.NewEncoder(&buf).Encode(v)
require.NoError(t, err) require.NoError(t, err)
expected := `[[a]] expected := `
[[a]]
[a.b] [a.b]
c = 'd' c = 'd'
` `
assert.Equal(t, expected, buf.String()) equalStringsIgnoreNewlines(t, expected, buf.String())
} }
func TestIssue424(t *testing.T) { func TestIssue424(t *testing.T) {
@@ -958,238 +798,6 @@ func TestIssue590(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
} }
func TestIssue571(t *testing.T) {
type Foo struct {
Float32 float32
Float64 float64
}
const closeEnough = 1e-9
foo := Foo{
Float32: 42,
Float64: 43,
}
b, err := toml.Marshal(foo)
require.NoError(t, err)
var foo2 Foo
err = toml.Unmarshal(b, &foo2)
require.NoError(t, err)
assert.InDelta(t, 42, foo2.Float32, closeEnough)
assert.InDelta(t, 43, foo2.Float64, closeEnough)
}
func TestIssue678(t *testing.T) {
type Config struct {
BigInt big.Int
}
cfg := &Config{
BigInt: *big.NewInt(123),
}
out, err := toml.Marshal(cfg)
require.NoError(t, err)
assert.Equal(t, "BigInt = '123'\n", string(out))
cfg2 := &Config{}
err = toml.Unmarshal(out, cfg2)
require.NoError(t, err)
require.Equal(t, cfg, cfg2)
}
func TestIssue752(t *testing.T) {
type Fooer interface {
Foo() string
}
type Container struct {
Fooer
}
c := Container{}
out, err := toml.Marshal(c)
require.NoError(t, err)
require.Equal(t, "", string(out))
}
func TestIssue768(t *testing.T) {
type cfg struct {
Name string `comment:"This is a multiline comment.\nThis is line 2."`
}
out, err := toml.Marshal(&cfg{})
require.NoError(t, err)
expected := `# This is a multiline comment.
# This is line 2.
Name = ''
`
require.Equal(t, expected, string(out))
}
func TestIssue786(t *testing.T) {
type Dependencies struct {
Dependencies []string `toml:"dependencies,multiline,omitempty"`
BuildDependencies []string `toml:"buildDependencies,multiline,omitempty"`
OptionalDependencies []string `toml:"optionalDependencies,multiline,omitempty"`
}
type Test struct {
Dependencies Dependencies `toml:"dependencies,omitempty"`
}
x := Test{}
b, err := toml.Marshal(x)
require.NoError(t, err)
require.Equal(t, "", string(b))
type General struct {
From string `toml:"from,omitempty" json:"from,omitempty" comment:"from in graphite-web format, the local TZ is used"`
Randomize bool `toml:"randomize" json:"randomize" comment:"randomize starting time with [0,step)"`
}
type Custom struct {
Name string `toml:"name" json:"name,omitempty" comment:"names for generator, braces are expanded like in shell"`
Type string `toml:"type,omitempty" json:"type,omitempty" comment:"type of generator"`
General
}
type Config struct {
General
Custom []Custom `toml:"custom,omitempty" json:"custom,omitempty" comment:"generators with custom parameters can be specified separately"`
}
buf := new(bytes.Buffer)
config := &Config{General: General{From: "-2d", Randomize: true}}
config.Custom = []Custom{{Name: "omit", General: General{Randomize: false}}}
config.Custom = append(config.Custom, Custom{Name: "present", General: General{From: "-2d", Randomize: true}})
encoder := toml.NewEncoder(buf)
encoder.Encode(config)
expected := `# from in graphite-web format, the local TZ is used
from = '-2d'
# randomize starting time with [0,step)
randomize = true
# generators with custom parameters can be specified separately
[[custom]]
# names for generator, braces are expanded like in shell
name = 'omit'
# randomize starting time with [0,step)
randomize = false
[[custom]]
# names for generator, braces are expanded like in shell
name = 'present'
# from in graphite-web format, the local TZ is used
from = '-2d'
# randomize starting time with [0,step)
randomize = true
`
require.Equal(t, expected, buf.String())
}
func TestMarshalNestedAnonymousStructs(t *testing.T) {
type Embedded struct {
Value string `toml:"value" json:"value"`
Top struct {
Value string `toml:"value" json:"value"`
} `toml:"top" json:"top"`
}
type Named struct {
Value string `toml:"value" json:"value"`
}
var doc struct {
Embedded
Named `toml:"named" json:"named"`
Anonymous struct {
Value string `toml:"value" json:"value"`
} `toml:"anonymous" json:"anonymous"`
}
expected := `value = ''
[top]
value = ''
[named]
value = ''
[anonymous]
value = ''
`
result, err := toml.Marshal(doc)
require.NoError(t, err)
require.Equal(t, expected, string(result))
}
func TestMarshalNestedAnonymousStructs_DuplicateField(t *testing.T) {
type Embedded struct {
Value string `toml:"value" json:"value"`
Top struct {
Value string `toml:"value" json:"value"`
} `toml:"top" json:"top"`
}
var doc struct {
Value string `toml:"value" json:"value"`
Embedded
}
doc.Embedded.Value = "shadowed"
doc.Value = "shadows"
expected := `value = 'shadows'
[top]
value = ''
`
result, err := toml.Marshal(doc)
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, expected, string(result))
}
func TestLocalTime(t *testing.T) {
v := map[string]toml.LocalTime{
"a": {
Hour: 1,
Minute: 2,
Second: 3,
Nanosecond: 4,
},
}
expected := `a = 01:02:03.000000004
`
out, err := toml.Marshal(v)
require.NoError(t, err)
require.Equal(t, expected, string(out))
}
func TestMarshalUint64Overflow(t *testing.T) {
// The TOML spec only requires implementation to provide support for the
// int64 range. To avoid generating TOML documents that would not be
// supported by standard-compliant parsers, uint64 > max int64 cannot be
// marshaled.
x := map[string]interface{}{
"foo": uint64(math.MaxInt64) + 1,
}
_, err := toml.Marshal(x)
require.Error(t, err)
}
func ExampleMarshal() { func ExampleMarshal() {
type MyConfig struct { type MyConfig struct {
Version int Version int
@@ -1214,3 +822,26 @@ func ExampleMarshal() {
// Name = 'go-toml' // Name = 'go-toml'
// Tags = ['go', 'toml'] // Tags = ['go', 'toml']
} }
func TestIssue571(t *testing.T) {
type Foo struct {
Float32 float32
Float64 float64
}
const closeEnough = 1e-9
foo := Foo{
Float32: 42,
Float64: 43,
}
b, err := toml.Marshal(foo)
require.NoError(t, err)
var foo2 Foo
err = toml.Unmarshal(b, &foo2)
require.NoError(t, err)
assert.InDelta(t, 42, foo2.Float32, closeEnough)
assert.InDelta(t, 43, foo2.Float64, closeEnough)
}
+13 -12
View File
@@ -578,10 +578,6 @@ func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er
switch token[i+j] { switch token[i+j] {
case ' ', '\t': case ' ', '\t':
continue continue
case '\r':
if token[i+j+1] == '\n' {
continue
}
case '\n': case '\n':
isLastNonWhitespaceOnLine = true isLastNonWhitespaceOnLine = true
} }
@@ -617,8 +613,6 @@ func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er
builder.WriteByte('\r') builder.WriteByte('\r')
case 't': case 't':
builder.WriteByte('\t') builder.WriteByte('\t')
case 'e':
builder.WriteByte(0x1B)
case 'u': case 'u':
x, err := hexToRune(atmost(token[i+1:], 4), 4) x, err := hexToRune(atmost(token[i+1:], 4), 4)
if err != nil { if err != nil {
@@ -695,10 +689,6 @@ func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) {
} }
func (p *parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { func (p *parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) {
if len(b) == 0 {
return nil, nil, nil, newDecodeError(b, "expected key but found none")
}
// simple-key = quoted-key / unquoted-key // simple-key = quoted-key / unquoted-key
// unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _
// quoted-key = basic-string / literal-string // quoted-key = basic-string / literal-string
@@ -776,8 +766,6 @@ func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
builder.WriteByte('\r') builder.WriteByte('\r')
case 't': case 't':
builder.WriteByte('\t') builder.WriteByte('\t')
case 'e':
builder.WriteByte(0x1B)
case 'u': case 'u':
x, err := hexToRune(token[i+1:len(token)-1], 4) x, err := hexToRune(token[i+1:len(token)-1], 4)
if err != nil { if err != nil {
@@ -874,6 +862,7 @@ func (p *parser) parseIntOrFloatOrDateTime(b []byte) (ast.Reference, []byte, err
return p.scanIntOrFloat(b) return p.scanIntOrFloat(b)
} }
//nolint:gomnd
if len(b) < 3 { if len(b) < 3 {
return p.scanIntOrFloat(b) return p.scanIntOrFloat(b)
} }
@@ -898,6 +887,18 @@ func (p *parser) parseIntOrFloatOrDateTime(b []byte) (ast.Reference, []byte, err
return p.scanIntOrFloat(b) return p.scanIntOrFloat(b)
} }
func digitsToInt(b []byte) int {
x := 0
for _, d := range b {
x *= 10
x += int(d - '0')
}
return x
}
//nolint:gocognit,cyclop
func (p *parser) scanDateTime(b []byte) (ast.Reference, []byte, error) { func (p *parser) scanDateTime(b []byte) (ast.Reference, []byte, error) {
// scans for contiguous characters in [0-9T:Z.+-], and up to one space if // scans for contiguous characters in [0-9T:Z.+-], and up to one space if
// followed by a digit. // followed by a digit.
+19 -39
View File
@@ -53,7 +53,7 @@ func scanLiteralString(b []byte) ([]byte, []byte, error) {
switch b[i] { switch b[i] {
case '\'': case '\'':
return b[:i+1], b[i+1:], nil return b[:i+1], b[i+1:], nil
case '\n', '\r': case '\n':
return nil, nil, newDecodeError(b[i:i+1], "literal strings cannot have new lines") return nil, nil, newDecodeError(b[i:i+1], "literal strings cannot have new lines")
} }
size := utf8ValidNext(b[i:]) size := utf8ValidNext(b[i:])
@@ -76,42 +76,30 @@ func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) {
// mll-char = %x09 / %x20-26 / %x28-7E / non-ascii // mll-char = %x09 / %x20-26 / %x28-7E / non-ascii
// mll-quotes = 1*2apostrophe // mll-quotes = 1*2apostrophe
for i := 3; i < len(b); { for i := 3; i < len(b); {
switch b[i] { if scanFollowsMultilineLiteralStringDelimiter(b[i:]) {
case '\'': i += 3
if scanFollowsMultilineLiteralStringDelimiter(b[i:]) {
i += 3
// At that point we found 3 apostrophe, and i is the // At that point we found 3 apostrophe, and i is the
// index of the byte after the third one. The scanner // index of the byte after the third one. The scanner
// needs to be eager, because there can be an extra 2 // needs to be eager, because there can be an extra 2
// apostrophe that can be accepted at the end of the // apostrophe that can be accepted at the end of the
// string. // string.
if i >= len(b) || b[i] != '\'' {
return b[:i], b[i:], nil
}
i++
if i >= len(b) || b[i] != '\'' {
return b[:i], b[i:], nil
}
i++
if i < len(b) && b[i] == '\'' {
return nil, nil, newDecodeError(b[i-3:i+1], "''' not allowed in multiline literal string")
}
if i >= len(b) || b[i] != '\'' {
return b[:i], b[i:], nil return b[:i], b[i:], nil
} }
case '\r': i++
if len(b) < i+2 {
return nil, nil, newDecodeError(b[len(b):], `need a \n after \r`) if i >= len(b) || b[i] != '\'' {
return b[:i], b[i:], nil
} }
if b[i+1] != '\n' { i++
return nil, nil, newDecodeError(b[i:i+2], `need a \n after \r`)
if i < len(b) && b[i] == '\'' {
return nil, nil, newDecodeError(b[i-3:i+1], "''' not allowed in multiline literal string")
} }
i += 2 // skip the \n
continue return b[:i], b[i:], nil
} }
size := utf8ValidNext(b[i:]) size := utf8ValidNext(b[i:])
if size == 0 { if size == 0 {
@@ -254,14 +242,6 @@ func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) {
} }
escaped = true escaped = true
i++ // skip the next character i++ // skip the next character
case '\r':
if len(b) < i+2 {
return nil, escaped, nil, newDecodeError(b[len(b):], `need a \n after \r`)
}
if b[i+1] != '\n' {
return nil, escaped, nil, newDecodeError(b[i:i+2], `need a \n after \r`)
}
i++ // skip the \n
} }
} }
@@ -1,2 +0,0 @@
go test fuzz v1
[]byte("0=0000-01-01 00:00:00")
@@ -1,2 +0,0 @@
go test fuzz v1
[]byte("\"\\n\"=\"\"")
@@ -1,2 +0,0 @@
go test fuzz v1
[]byte("''=0")
@@ -1,2 +0,0 @@
go test fuzz v1
[]byte("0=0000-01-01")
@@ -1,2 +0,0 @@
go test fuzz v1
[]byte("0=\"\"\"\\U00000000\"\"\"")
@@ -1,2 +0,0 @@
go test fuzz v1
[]byte("0=[[{}]]")
@@ -1,2 +0,0 @@
go test fuzz v1
[]byte("\"\\b\"=\"\"")
@@ -1,2 +0,0 @@
go test fuzz v1
[]byte("0=inf")
@@ -1,2 +0,0 @@
go test fuzz v1
[]byte("0=0000-01-01 00:00:00+00:00")
@@ -1,2 +0,0 @@
go test fuzz v1
[]byte("0=[{}]")
@@ -1,2 +0,0 @@
go test fuzz v1
[]byte("0=nan")
+1 -1
View File
@@ -8,7 +8,7 @@ import (
"testing" "testing"
"github.com/pelletier/go-toml/v2" "github.com/pelletier/go-toml/v2"
"github.com/pelletier/go-toml/v2/internal/testsuite" "github.com/pelletier/go-toml/v2/testsuite"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
+32 -215
View File
@@ -1,4 +1,4 @@
// Generated by tomltestgen for toml-test ref master on 2022-04-07T20:09:42-04:00 // Generated by tomltestgen for toml-test ref master on 2021-11-08T22:33:24-05:00
package toml_test package toml_test
import ( import (
@@ -55,51 +55,11 @@ func TestTOMLTest_Invalid_Array_TextInArray(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Bool_AlmostFalseWithExtra(t *testing.T) {
input := "a = falsify\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Bool_AlmostFalse(t *testing.T) {
input := "a = fals\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Bool_AlmostTrueWithExtra(t *testing.T) {
input := "a = truthy\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Bool_AlmostTrue(t *testing.T) {
input := "a = tru\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Bool_JustF(t *testing.T) {
input := "a = f\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Bool_JustT(t *testing.T) {
input := "a = t\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Bool_MixedCase(t *testing.T) { func TestTOMLTest_Invalid_Bool_MixedCase(t *testing.T) {
input := "valid = False\n" input := "valid = False\n"
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Bool_StartingSameFalse(t *testing.T) {
input := "a = falsey\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Bool_StartingSameTrue(t *testing.T) {
input := "a = truer\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Bool_WrongCaseFalse(t *testing.T) { func TestTOMLTest_Invalid_Bool_WrongCaseFalse(t *testing.T) {
input := "b = FALSE\n" input := "b = FALSE\n"
testgenInvalid(t, input) testgenInvalid(t, input)
@@ -110,31 +70,6 @@ func TestTOMLTest_Invalid_Bool_WrongCaseTrue(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Control_BareCr(t *testing.T) {
input := "# The following line contains a single carriage return control character\r\n\r"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Control_BareFormfeed(t *testing.T) {
input := "bare-formfeed = \f\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Control_BareNull(t *testing.T) {
input := "bare-null = \"some value\" \x00\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Control_BareVerticalTab(t *testing.T) {
input := "bare-vertical-tab = \v\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Control_CommentCr(t *testing.T) {
input := "comment-cr = \"Carriage return in comment\" # \ra=1\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Control_CommentDel(t *testing.T) { func TestTOMLTest_Invalid_Control_CommentDel(t *testing.T) {
input := "comment-del = \"0x7f\" # \u007f\n" input := "comment-del = \"0x7f\" # \u007f\n"
testgenInvalid(t, input) testgenInvalid(t, input)
@@ -240,73 +175,33 @@ func TestTOMLTest_Invalid_Control_StringUs(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Datetime_HourOver(t *testing.T) { func TestTOMLTest_Invalid_Datetime_ImpossibleDate(t *testing.T) {
input := "# time-hour = 2DIGIT ; 00-23\nd = 2006-01-01T24:00:00-00:00\n" input := "d = 2006-01-50T00:00:00Z\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Datetime_MdayOver(t *testing.T) {
input := "# date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on\n# ; month/year\nd = 2006-01-32T00:00:00-00:00\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Datetime_MdayUnder(t *testing.T) {
input := "# date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on\n# ; month/year\nd = 2006-01-00T00:00:00-00:00\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Datetime_MinuteOver(t *testing.T) {
input := "# time-minute = 2DIGIT ; 00-59\nd = 2006-01-01T00:60:00-00:00\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Datetime_MonthOver(t *testing.T) {
input := "# date-month = 2DIGIT ; 01-12\nd = 2006-13-01T00:00:00-00:00\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Datetime_MonthUnder(t *testing.T) {
input := "# date-month = 2DIGIT ; 01-12\nd = 2007-00-01T00:00:00-00:00\n"
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Datetime_NoLeadsWithMilli(t *testing.T) { func TestTOMLTest_Invalid_Datetime_NoLeadsWithMilli(t *testing.T) {
input := "# Day \"5\" instead of \"05\"; the leading zero is required.\nwith-milli = 1987-07-5T17:45:00.12Z\n" input := "with-milli = 1987-07-5T17:45:00.12Z\n"
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Datetime_NoLeads(t *testing.T) { func TestTOMLTest_Invalid_Datetime_NoLeads(t *testing.T) {
input := "# Month \"7\" instead of \"07\"; the leading zero is required.\nno-leads = 1987-7-05T17:45:00Z\n" input := "no-leads = 1987-7-05T17:45:00Z\n"
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Datetime_NoSecs(t *testing.T) { func TestTOMLTest_Invalid_Datetime_NoSecs(t *testing.T) {
input := "# No seconds in time.\nno-secs = 1987-07-05T17:45Z\n" input := "no-secs = 1987-07-05T17:45Z\n"
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Datetime_NoT(t *testing.T) { func TestTOMLTest_Invalid_Datetime_NoT(t *testing.T) {
input := "# No \"t\" or \"T\" between the date and time.\nno-t = 1987-07-0517:45:00Z\n" input := "no-t = 1987-07-0517:45:00Z\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Datetime_SecondOver(t *testing.T) {
input := "# time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second\n# ; rules\nd = 2006-01-01T00:00:61-00:00\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Datetime_TimeNoLeads2(t *testing.T) {
input := "# Leading 0 is always required.\nd = 01:32:0\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Datetime_TimeNoLeads(t *testing.T) {
input := "# Leading 0 is always required.\nd = 1:32:00\n"
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Datetime_TrailingT(t *testing.T) { func TestTOMLTest_Invalid_Datetime_TrailingT(t *testing.T) {
input := "# Date cannot end with trailing T\nd = 2006-01-30T\n" input := "d = 2006-01-30T\n"
testgenInvalid(t, input) testgenInvalid(t, input)
} }
@@ -500,11 +395,6 @@ func TestTOMLTest_Invalid_Float_UsBeforePoint(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_InlineTable_Add(t *testing.T) {
input := "a={}\n# Inline tables are immutable and can't be extended\n[a.b]\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_InlineTable_DoubleComma(t *testing.T) { func TestTOMLTest_Invalid_InlineTable_DoubleComma(t *testing.T) {
input := "t = {x=3,,y=4}\n" input := "t = {x=3,,y=4}\n"
testgenInvalid(t, input) testgenInvalid(t, input)
@@ -545,11 +435,6 @@ func TestTOMLTest_Invalid_InlineTable_NoComma(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_InlineTable_Overwrite(t *testing.T) {
input := "a.b=0\n# Since table \"a\" is already defined, it can't be replaced by an inline table.\na={}\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_InlineTable_TrailingComma(t *testing.T) { func TestTOMLTest_Invalid_InlineTable_TrailingComma(t *testing.T) {
input := "# A terminating comma (also called trailing comma) is not permitted after the\n# last key/value pair in an inline table\nabc = { abc = 123, }\n" input := "# A terminating comma (also called trailing comma) is not permitted after the\n# last key/value pair in an inline table\nabc = { abc = 123, }\n"
testgenInvalid(t, input) testgenInvalid(t, input)
@@ -585,21 +470,6 @@ func TestTOMLTest_Invalid_Integer_DoubleUs(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Integer_IncompleteBin(t *testing.T) {
input := "incomplete-bin = 0b\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Integer_IncompleteHex(t *testing.T) {
input := "incomplete-hex = 0x\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Integer_IncompleteOct(t *testing.T) {
input := "incomplete-oct = 0o\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Integer_InvalidBin(t *testing.T) { func TestTOMLTest_Invalid_Integer_InvalidBin(t *testing.T) {
input := "invalid-bin = 0b0012\n" input := "invalid-bin = 0b0012\n"
testgenInvalid(t, input) testgenInvalid(t, input)
@@ -645,11 +515,6 @@ func TestTOMLTest_Invalid_Integer_LeadingZero2(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Integer_LeadingZero3(t *testing.T) {
input := "leading-zero-3 = 0_0\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Integer_LeadingZeroSign1(t *testing.T) { func TestTOMLTest_Invalid_Integer_LeadingZeroSign1(t *testing.T) {
input := "leading-zero-sign-1 = -01\n" input := "leading-zero-sign-1 = -01\n"
testgenInvalid(t, input) testgenInvalid(t, input)
@@ -660,11 +525,6 @@ func TestTOMLTest_Invalid_Integer_LeadingZeroSign2(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Integer_LeadingZeroSign3(t *testing.T) {
input := "leading-zero-sign-3 = +0_1\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Integer_NegativeBin(t *testing.T) { func TestTOMLTest_Invalid_Integer_NegativeBin(t *testing.T) {
input := "negative-bin = -0b11010110\n" input := "negative-bin = -0b11010110\n"
testgenInvalid(t, input) testgenInvalid(t, input)
@@ -870,16 +730,11 @@ func TestTOMLTest_Invalid_String_BadConcat(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_String_BadEscape1(t *testing.T) { func TestTOMLTest_Invalid_String_BadEscape(t *testing.T) {
input := "invalid-escape = \"This string has a bad \\a escape character.\"\n" input := "invalid-escape = \"This string has a bad \\a escape character.\"\n"
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_String_BadEscape2(t *testing.T) {
input := "invalid-escape = \"This string has a bad \\ escape character.\"\n\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_String_BadMultiline(t *testing.T) { func TestTOMLTest_Invalid_String_BadMultiline(t *testing.T) {
input := "multi = \"first line\nsecond line\"\n" input := "multi = \"first line\nsecond line\"\n"
testgenInvalid(t, input) testgenInvalid(t, input)
@@ -950,21 +805,6 @@ func TestTOMLTest_Invalid_String_MissingQuotes(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_String_MultilineBadEscape1(t *testing.T) {
input := "k = \"\"\"t\\a\"\"\"\n\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_String_MultilineBadEscape2(t *testing.T) {
input := "# \\<Space> is not a valid escape.\nk = \"\"\"t\\ t\"\"\"\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_String_MultilineBadEscape3(t *testing.T) {
input := "# \\<Space> is not a valid escape.\nk = \"\"\"t\\ \"\"\"\n\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_String_MultilineEscapeSpace(t *testing.T) { func TestTOMLTest_Invalid_String_MultilineEscapeSpace(t *testing.T) {
input := "a = \"\"\"\n foo \\ \\n\n bar\"\"\"\n" input := "a = \"\"\"\n foo \\ \\n\n bar\"\"\"\n"
testgenInvalid(t, input) testgenInvalid(t, input)
@@ -985,6 +825,11 @@ func TestTOMLTest_Invalid_String_MultilineQuotes1(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_String_MultilineQuotes2(t *testing.T) {
input := "a = \"\"\"6 quotes: \"\"\"\"\"\"\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_String_NoClose(t *testing.T) { func TestTOMLTest_Invalid_String_NoClose(t *testing.T) {
input := "no-ending-quote = \"One time, at band camp\n" input := "no-ending-quote = \"One time, at band camp\n"
testgenInvalid(t, input) testgenInvalid(t, input)
@@ -1000,16 +845,6 @@ func TestTOMLTest_Invalid_String_WrongClose(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Table_AppendWithDottedKeys1(t *testing.T) {
input := "# First a.b.c defines a table: a.b.c = {z=9}\n#\n# Then we define a.b.c.t = \"str\" to add a str to the above table, making it:\n#\n# a.b.c = {z=9, t=\"...\"}\n#\n# While this makes sense, logically, it was decided this is not valid TOML as\n# it's too confusing/convoluted.\n# \n# See: https://github.com/toml-lang/toml/issues/846\n# https://github.com/toml-lang/toml/pull/859\n\n[a.b.c]\n z = 9\n\n[a]\n b.c.t = \"Using dotted keys to add to [a.b.c] after explicitly defining it above is not allowed\"\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Table_AppendWithDottedKeys2(t *testing.T) {
input := "# This is the same issue as in injection-1.toml, except that nests one level\n# deeper. See that file for a more complete description.\n\n[a.b.c.d]\n z = 9\n\n[a]\n b.c.d.k.t = \"Using dotted keys to add to [a.b.c.d] after explicitly defining it above is not allowed\"\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Table_ArrayEmpty(t *testing.T) { func TestTOMLTest_Invalid_Table_ArrayEmpty(t *testing.T) {
input := "[[]]\nname = \"Born to Run\"\n" input := "[[]]\nname = \"Born to Run\"\n"
testgenInvalid(t, input) testgenInvalid(t, input)
@@ -1025,16 +860,6 @@ func TestTOMLTest_Invalid_Table_ArrayMissingBracket(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Table_DuplicateKeyDottedTable(t *testing.T) {
input := "[fruit]\napple.color = \"red\"\n\n[fruit.apple] # INVALID\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Table_DuplicateKeyDottedTable2(t *testing.T) {
input := "[fruit]\napple.taste.sweet = true\n\n[fruit.apple.taste] # INVALID\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Table_DuplicateKeyTable(t *testing.T) { func TestTOMLTest_Invalid_Table_DuplicateKeyTable(t *testing.T) {
input := "[fruit]\ntype = \"apple\"\n\n[fruit.type]\napple = \"yes\"\n" input := "[fruit]\ntype = \"apple\"\n\n[fruit.type]\napple = \"yes\"\n"
testgenInvalid(t, input) testgenInvalid(t, input)
@@ -1070,6 +895,16 @@ func TestTOMLTest_Invalid_Table_EqualsSign(t *testing.T) {
testgenInvalid(t, input) testgenInvalid(t, input)
} }
func TestTOMLTest_Invalid_Table_Injection1(t *testing.T) {
input := "[a.b.c]\n z = 9\n[a]\n b.c.t = \"Using dotted keys to add to [a.b.c] after explicitly defining it above is not allowed\"\n \n# see https://github.com/toml-lang/toml/issues/846\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Table_Injection2(t *testing.T) {
input := "[a.b.c.d]\n z = 9\n[a]\n b.c.d.k.t = \"Using dotted keys to add to [a.b.c.d] after explicitly defining it above is not allowed\"\n \n# see https://github.com/toml-lang/toml/issues/846\n"
testgenInvalid(t, input)
}
func TestTOMLTest_Invalid_Table_Llbrace(t *testing.T) { func TestTOMLTest_Invalid_Table_Llbrace(t *testing.T) {
input := "[ [table]]\n" input := "[ [table]]\n"
testgenInvalid(t, input) testgenInvalid(t, input)
@@ -1236,14 +1071,8 @@ func TestTOMLTest_Valid_Comment_AtEof2(t *testing.T) {
} }
func TestTOMLTest_Valid_Comment_Everywhere(t *testing.T) { func TestTOMLTest_Valid_Comment_Everywhere(t *testing.T) {
input := "# Top comment.\n # Top comment.\n# Top comment.\n\n# [no-extraneous-groups-please]\n\n[group] # Comment\nanswer = 42 # Comment\n# no-extraneous-keys-please = 999\n# Inbetween comment.\nmore = [ # Comment\n # What about multiple # comments?\n # Can you handle it?\n #\n # Evil.\n# Evil.\n 42, 42, # Comments within arrays are fun.\n # What about multiple # comments?\n # Can you handle it?\n #\n # Evil.\n# Evil.\n# ] Did I fool you?\n] # Hopefully not.\n\n# Make sure the space between the datetime and \"#\" isn't lexed.\ndt = 1979-05-27T07:32:12-07:00 # c\nd = 1979-05-27 # Comment\n" input := "# Top comment.\n # Top comment.\n# Top comment.\n\n# [no-extraneous-groups-please]\n\n[group] # Comment\nanswer = 42 # Comment\n# no-extraneous-keys-please = 999\n# Inbetween comment.\nmore = [ # Comment\n # What about multiple # comments?\n # Can you handle it?\n #\n # Evil.\n# Evil.\n 42, 42, # Comments within arrays are fun.\n # What about multiple # comments?\n # Can you handle it?\n #\n # Evil.\n# Evil.\n# ] Did I fool you?\n] # Hopefully not.\n\n# Make sure the space between the datetime and \"#\" isn't lexed.\nd = 1979-05-27T07:32:12-07:00 # c\n"
jsonRef := "{\n \"group\": {\n \"answer\": {\n \"type\": \"integer\",\n \"value\": \"42\"\n },\n \"dt\": {\n \"type\": \"datetime\",\n \"value\": \"1979-05-27T07:32:12-07:00\"\n },\n \"d\": {\n \"type\": \"date-local\",\n \"value\": \"1979-05-27\"\n },\n \"more\": [\n {\n \"type\": \"integer\",\n \"value\": \"42\"\n },\n {\n \"type\": \"integer\",\n \"value\": \"42\"\n }\n ]\n }\n}\n" jsonRef := "{\n \"group\": {\n \"answer\": {\n \"type\": \"integer\",\n \"value\": \"42\"\n },\n \"d\": {\n \"type\": \"datetime\",\n \"value\": \"1979-05-27T07:32:12-07:00\"\n },\n \"more\": [\n {\n \"type\": \"integer\",\n \"value\": \"42\"\n },\n {\n \"type\": \"integer\",\n \"value\": \"42\"\n }\n ]\n }\n}\n"
testgenValid(t, input, jsonRef)
}
func TestTOMLTest_Valid_Comment_Noeol(t *testing.T) {
input := "# single comment without any eol characters"
jsonRef := "{}\n"
testgenValid(t, input, jsonRef) testgenValid(t, input, jsonRef)
} }
@@ -1278,8 +1107,8 @@ func TestTOMLTest_Valid_Datetime_Local(t *testing.T) {
} }
func TestTOMLTest_Valid_Datetime_Milliseconds(t *testing.T) { func TestTOMLTest_Valid_Datetime_Milliseconds(t *testing.T) {
input := "utc1 = 1987-07-05T17:45:56.1234Z\nutc2 = 1987-07-05T17:45:56.6Z\nwita1 = 1987-07-05T17:45:56.1234+08:00\nwita2 = 1987-07-05T17:45:56.6+08:00\n" input := "utc1 = 1987-07-05T17:45:56.123456Z\nutc2 = 1987-07-05T17:45:56.6Z\nwita1 = 1987-07-05T17:45:56.123456+08:00\nwita2 = 1987-07-05T17:45:56.6+08:00\n"
jsonRef := "{\n \"utc1\": {\n \"type\": \"datetime\",\n \"value\": \"1987-07-05T17:45:56.1234Z\"\n },\n \"utc2\": {\n \"type\": \"datetime\",\n \"value\": \"1987-07-05T17:45:56.6000Z\"\n },\n \"wita1\": {\n \"type\": \"datetime\",\n \"value\": \"1987-07-05T17:45:56.1234+08:00\"\n },\n \"wita2\": {\n \"type\": \"datetime\",\n \"value\": \"1987-07-05T17:45:56.6000+08:00\"\n }\n}\n" jsonRef := "{\n \"utc1\": {\n \"type\": \"datetime\",\n \"value\": \"1987-07-05T17:45:56.123456Z\"\n },\n \"utc2\": {\n \"type\": \"datetime\",\n \"value\": \"1987-07-05T17:45:56.600000Z\"\n },\n \"wita1\": {\n \"type\": \"datetime\",\n \"value\": \"1987-07-05T17:45:56.123456+08:00\"\n },\n \"wita2\": {\n \"type\": \"datetime\",\n \"value\": \"1987-07-05T17:45:56.600000+08:00\"\n }\n}\n"
testgenValid(t, input, jsonRef) testgenValid(t, input, jsonRef)
} }
@@ -1541,12 +1370,6 @@ func TestTOMLTest_Valid_String_Empty(t *testing.T) {
testgenValid(t, input, jsonRef) testgenValid(t, input, jsonRef)
} }
func TestTOMLTest_Valid_String_EscapeEsc(t *testing.T) {
input := "esc = \"\\e There is no escape! \\e\"\n"
jsonRef := "{\n \"esc\": {\n \"type\": \"string\",\n \"value\": \"\\u001b There is no escape! \\u001b\"\n }\n}\n"
testgenValid(t, input, jsonRef)
}
func TestTOMLTest_Valid_String_EscapeTricky(t *testing.T) { func TestTOMLTest_Valid_String_EscapeTricky(t *testing.T) {
input := "end_esc = \"String does not end here\\\" but ends here\\\\\"\nlit_end_esc = 'String ends here\\'\n\nmultiline_unicode = \"\"\"\n\\u00a0\"\"\"\n\nmultiline_not_unicode = \"\"\"\n\\\\u0041\"\"\"\n\nmultiline_end_esc = \"\"\"When will it end? \\\"\"\"...\"\"\\\" should be here\\\"\"\"\"\n\nlit_multiline_not_unicode = '''\n\\u007f'''\n\nlit_multiline_end = '''There is no escape\\'''\n" input := "end_esc = \"String does not end here\\\" but ends here\\\\\"\nlit_end_esc = 'String ends here\\'\n\nmultiline_unicode = \"\"\"\n\\u00a0\"\"\"\n\nmultiline_not_unicode = \"\"\"\n\\\\u0041\"\"\"\n\nmultiline_end_esc = \"\"\"When will it end? \\\"\"\"...\"\"\\\" should be here\\\"\"\"\"\n\nlit_multiline_not_unicode = '''\n\\u007f'''\n\nlit_multiline_end = '''There is no escape\\'''\n"
jsonRef := "{\n \"end_esc\": {\n \"type\": \"string\",\n \"value\": \"String does not end here\\\" but ends here\\\\\"\n },\n \"lit_end_esc\": {\n \"type\": \"string\",\n \"value\": \"String ends here\\\\\"\n },\n \"lit_multiline_end\": {\n \"type\": \"string\",\n \"value\": \"There is no escape\\\\\"\n },\n \"lit_multiline_not_unicode\": {\n \"type\": \"string\",\n \"value\": \"\\\\u007f\"\n },\n \"multiline_end_esc\": {\n \"type\": \"string\",\n \"value\": \"When will it end? \\\"\\\"\\\"...\\\"\\\"\\\" should be here\\\"\"\n },\n \"multiline_not_unicode\": {\n \"type\": \"string\",\n \"value\": \"\\\\u0041\"\n },\n \"multiline_unicode\": {\n \"type\": \"string\",\n \"value\": \"\u00a0\"\n }\n}\n" jsonRef := "{\n \"end_esc\": {\n \"type\": \"string\",\n \"value\": \"String does not end here\\\" but ends here\\\\\"\n },\n \"lit_end_esc\": {\n \"type\": \"string\",\n \"value\": \"String ends here\\\\\"\n },\n \"lit_multiline_end\": {\n \"type\": \"string\",\n \"value\": \"There is no escape\\\\\"\n },\n \"lit_multiline_not_unicode\": {\n \"type\": \"string\",\n \"value\": \"\\\\u007f\"\n },\n \"multiline_end_esc\": {\n \"type\": \"string\",\n \"value\": \"When will it end? \\\"\\\"\\\"...\\\"\\\"\\\" should be here\\\"\"\n },\n \"multiline_not_unicode\": {\n \"type\": \"string\",\n \"value\": \"\\\\u0041\"\n },\n \"multiline_unicode\": {\n \"type\": \"string\",\n \"value\": \"\u00a0\"\n }\n}\n"
@@ -1565,20 +1388,14 @@ func TestTOMLTest_Valid_String_Escapes(t *testing.T) {
testgenValid(t, input, jsonRef) testgenValid(t, input, jsonRef)
} }
func TestTOMLTest_Valid_String_MultilineEscapedCrlf(t *testing.T) {
input := "# The following line should be an unescaped backslash followed by a Windows\r\n# newline sequence (\"\\r\\n\")\r\n0=\"\"\"\\\r\n\"\"\"\r\n"
jsonRef := "{\n \"0\": {\n \"type\": \"string\",\n \"value\": \"\"\n }\n}\n"
testgenValid(t, input, jsonRef)
}
func TestTOMLTest_Valid_String_MultilineQuotes(t *testing.T) { func TestTOMLTest_Valid_String_MultilineQuotes(t *testing.T) {
input := "# Make sure that quotes inside multiline strings are allowed, including right\n# after the opening '''/\"\"\" and before the closing '''/\"\"\"\n\nlit_one = ''''one quote''''\nlit_two = '''''two quotes'''''\nlit_one_space = ''' 'one quote' '''\nlit_two_space = ''' ''two quotes'' '''\n\none = \"\"\"\"one quote\"\"\"\"\ntwo = \"\"\"\"\"two quotes\"\"\"\"\"\none_space = \"\"\" \"one quote\" \"\"\"\ntwo_space = \"\"\" \"\"two quotes\"\" \"\"\"\n\nmismatch1 = \"\"\"aaa'''bbb\"\"\"\nmismatch2 = '''aaa\"\"\"bbb'''\n\n# Three opening \"\"\", then one escaped \", then two \"\" (allowed), and then three\n# closing \"\"\"\nescaped = \"\"\"lol\\\"\"\"\"\"\"\n" input := "# Make sure that quotes inside multiline strings are allowed, including right\n# after the opening '''/\"\"\" and before the closing '''/\"\"\"\n\nlit_one = ''''one quote''''\nlit_two = '''''two quotes'''''\nlit_one_space = ''' 'one quote' '''\nlit_two_space = ''' ''two quotes'' '''\n\none = \"\"\"\"one quote\"\"\"\"\ntwo = \"\"\"\"\"two quotes\"\"\"\"\"\none_space = \"\"\" \"one quote\" \"\"\"\ntwo_space = \"\"\" \"\"two quotes\"\" \"\"\"\n\nmismatch1 = \"\"\"aaa'''bbb\"\"\"\nmismatch2 = '''aaa\"\"\"bbb'''\n"
jsonRef := "{\n \"escaped\": {\n \"type\": \"string\",\n \"value\": \"lol\\\"\\\"\\\"\"\n },\n \"lit_one\": {\n \"type\": \"string\",\n \"value\": \"'one quote'\"\n },\n \"lit_one_space\": {\n \"type\": \"string\",\n \"value\": \" 'one quote' \"\n },\n \"lit_two\": {\n \"type\": \"string\",\n \"value\": \"''two quotes''\"\n },\n \"lit_two_space\": {\n \"type\": \"string\",\n \"value\": \" ''two quotes'' \"\n },\n \"mismatch1\": {\n \"type\": \"string\",\n \"value\": \"aaa'''bbb\"\n },\n \"mismatch2\": {\n \"type\": \"string\",\n \"value\": \"aaa\\\"\\\"\\\"bbb\"\n },\n \"one\": {\n \"type\": \"string\",\n \"value\": \"\\\"one quote\\\"\"\n },\n \"one_space\": {\n \"type\": \"string\",\n \"value\": \" \\\"one quote\\\" \"\n },\n \"two\": {\n \"type\": \"string\",\n \"value\": \"\\\"\\\"two quotes\\\"\\\"\"\n },\n \"two_space\": {\n \"type\": \"string\",\n \"value\": \" \\\"\\\"two quotes\\\"\\\" \"\n }\n}\n" jsonRef := "{\n \"lit_one\": {\n \"type\": \"string\",\n \"value\": \"'one quote'\"\n },\n \"lit_one_space\": {\n \"type\": \"string\",\n \"value\": \" 'one quote' \"\n },\n \"lit_two\": {\n \"type\": \"string\",\n \"value\": \"''two quotes''\"\n },\n \"lit_two_space\": {\n \"type\": \"string\",\n \"value\": \" ''two quotes'' \"\n },\n \"mismatch1\": {\n \"type\": \"string\",\n \"value\": \"aaa'''bbb\"\n },\n \"mismatch2\": {\n \"type\": \"string\",\n \"value\": \"aaa\\\"\\\"\\\"bbb\"\n },\n \"one\": {\n \"type\": \"string\",\n \"value\": \"\\\"one quote\\\"\"\n },\n \"one_space\": {\n \"type\": \"string\",\n \"value\": \" \\\"one quote\\\" \"\n },\n \"two\": {\n \"type\": \"string\",\n \"value\": \"\\\"\\\"two quotes\\\"\\\"\"\n },\n \"two_space\": {\n \"type\": \"string\",\n \"value\": \" \\\"\\\"two quotes\\\"\\\" \"\n }\n}\n"
testgenValid(t, input, jsonRef) testgenValid(t, input, jsonRef)
} }
func TestTOMLTest_Valid_String_Multiline(t *testing.T) { func TestTOMLTest_Valid_String_Multiline(t *testing.T) {
input := "# NOTE: this file includes some literal tab characters.\n\nmultiline_empty_one = \"\"\"\"\"\"\n\n# A newline immediately following the opening delimiter will be trimmed.\nmultiline_empty_two = \"\"\"\n\"\"\"\n\n# \\ at the end of line trims newlines as well; note that last \\ is followed by\n# two spaces, which are ignored.\nmultiline_empty_three = \"\"\"\\\n \"\"\"\nmultiline_empty_four = \"\"\"\\\n \\\n \\ \n \"\"\"\n\nequivalent_one = \"The quick brown fox jumps over the lazy dog.\"\nequivalent_two = \"\"\"\nThe quick brown \\\n\n\n fox jumps over \\\n the lazy dog.\"\"\"\n\nequivalent_three = \"\"\"\\\n The quick brown \\\n fox jumps over \\\n the lazy dog.\\\n \"\"\"\n\nwhitespace-after-bs = \"\"\"\\\n The quick brown \\\n fox jumps over \\ \n the lazy dog.\\\t\n \"\"\"\n\nno-space = \"\"\"a\\\n b\"\"\"\n\n# Has tab character.\nkeep-ws-before = \"\"\"a \t\\\n b\"\"\"\n\nescape-bs-1 = \"\"\"a \\\\\nb\"\"\"\n\nescape-bs-2 = \"\"\"a \\\\\\\nb\"\"\"\n\nescape-bs-3 = \"\"\"a \\\\\\\\\n b\"\"\"\n" input := "# NOTE: this file includes some literal tab characters.\n\nmultiline_empty_one = \"\"\"\"\"\"\nmultiline_empty_two = \"\"\"\n\"\"\"\nmultiline_empty_three = \"\"\"\\\n \"\"\"\nmultiline_empty_four = \"\"\"\\\n \\\n \\ \n \"\"\"\n\nequivalent_one = \"The quick brown fox jumps over the lazy dog.\"\nequivalent_two = \"\"\"\nThe quick brown \\\n\n\n fox jumps over \\\n the lazy dog.\"\"\"\n\nequivalent_three = \"\"\"\\\n The quick brown \\\n fox jumps over \\\n the lazy dog.\\\n \"\"\"\n\nwhitespace-after-bs = \"\"\"\\\n The quick brown \\\n fox jumps over \\ \n the lazy dog.\\\t\n \"\"\"\n\nno-space = \"\"\"a\\\n b\"\"\"\n\nkeep-ws-before = \"\"\"a \t\\\n b\"\"\"\n\nescape-bs-1 = \"\"\"a \\\\\nb\"\"\"\n\nescape-bs-2 = \"\"\"a \\\\\\\nb\"\"\"\n\nescape-bs-3 = \"\"\"a \\\\\\\\\n b\"\"\"\n"
jsonRef := "{\n \"equivalent_one\": {\n \"type\": \"string\",\n \"value\": \"The quick brown fox jumps over the lazy dog.\"\n },\n \"equivalent_three\": {\n \"type\": \"string\",\n \"value\": \"The quick brown fox jumps over the lazy dog.\"\n },\n \"equivalent_two\": {\n \"type\": \"string\",\n \"value\": \"The quick brown fox jumps over the lazy dog.\"\n },\n \"escape-bs-1\": {\n \"type\": \"string\",\n \"value\": \"a \\\\\\nb\"\n },\n \"escape-bs-2\": {\n \"type\": \"string\",\n \"value\": \"a \\\\b\"\n },\n \"escape-bs-3\": {\n \"type\": \"string\",\n \"value\": \"a \\\\\\\\\\n b\"\n },\n \"keep-ws-before\": {\n \"type\": \"string\",\n \"value\": \"a \\tb\"\n },\n \"multiline_empty_four\": {\n \"type\": \"string\",\n \"value\": \"\"\n },\n \"multiline_empty_one\": {\n \"type\": \"string\",\n \"value\": \"\"\n },\n \"multiline_empty_three\": {\n \"type\": \"string\",\n \"value\": \"\"\n },\n \"multiline_empty_two\": {\n \"type\": \"string\",\n \"value\": \"\"\n },\n \"no-space\": {\n \"type\": \"string\",\n \"value\": \"ab\"\n },\n \"whitespace-after-bs\": {\n \"type\": \"string\",\n \"value\": \"The quick brown fox jumps over the lazy dog.\"\n }\n}\n" jsonRef := "{\n \"equivalent_one\": {\n \"type\": \"string\",\n \"value\": \"The quick brown fox jumps over the lazy dog.\"\n },\n \"equivalent_three\": {\n \"type\": \"string\",\n \"value\": \"The quick brown fox jumps over the lazy dog.\"\n },\n \"equivalent_two\": {\n \"type\": \"string\",\n \"value\": \"The quick brown fox jumps over the lazy dog.\"\n },\n \"escape-bs-1\": {\n \"type\": \"string\",\n \"value\": \"a \\\\\\nb\"\n },\n \"escape-bs-2\": {\n \"type\": \"string\",\n \"value\": \"a \\\\b\"\n },\n \"escape-bs-3\": {\n \"type\": \"string\",\n \"value\": \"a \\\\\\\\\\n b\"\n },\n \"keep-ws-before\": {\n \"type\": \"string\",\n \"value\": \"a \\tb\"\n },\n \"multiline_empty_four\": {\n \"type\": \"string\",\n \"value\": \"\"\n },\n \"multiline_empty_one\": {\n \"type\": \"string\",\n \"value\": \"\"\n },\n \"multiline_empty_three\": {\n \"type\": \"string\",\n \"value\": \"\"\n },\n \"multiline_empty_two\": {\n \"type\": \"string\",\n \"value\": \"\"\n },\n \"no-space\": {\n \"type\": \"string\",\n \"value\": \"ab\"\n },\n \"whitespace-after-bs\": {\n \"type\": \"string\",\n \"value\": \"The quick brown fox jumps over the lazy dog.\"\n }\n}\n"
testgenValid(t, input, jsonRef) testgenValid(t, input, jsonRef)
} }
@@ -1590,7 +1407,7 @@ func TestTOMLTest_Valid_String_Nl(t *testing.T) {
} }
func TestTOMLTest_Valid_String_RawMultiline(t *testing.T) { func TestTOMLTest_Valid_String_RawMultiline(t *testing.T) {
input := "# Single ' should be allowed.\noneline = '''This string has a ' quote character.'''\n\n# A newline immediately following the opening delimiter will be trimmed.\nfirstnl = '''\nThis string has a ' quote character.'''\n\n# All other whitespace and newline characters remain intact.\nmultiline = '''\nThis string\nhas ' a quote character\nand more than\none newline\nin it.'''\n" input := "oneline = '''This string has a ' quote character.'''\nfirstnl = '''\nThis string has a ' quote character.'''\nmultiline = '''\nThis string\nhas ' a quote character\nand more than\none newline\nin it.'''\n"
jsonRef := "{\n \"firstnl\": {\n \"type\": \"string\",\n \"value\": \"This string has a ' quote character.\"\n },\n \"multiline\": {\n \"type\": \"string\",\n \"value\": \"This string\\nhas ' a quote character\\nand more than\\none newline\\nin it.\"\n },\n \"oneline\": {\n \"type\": \"string\",\n \"value\": \"This string has a ' quote character.\"\n }\n}\n" jsonRef := "{\n \"firstnl\": {\n \"type\": \"string\",\n \"value\": \"This string has a ' quote character.\"\n },\n \"multiline\": {\n \"type\": \"string\",\n \"value\": \"This string\\nhas ' a quote character\\nand more than\\none newline\\nin it.\"\n },\n \"oneline\": {\n \"type\": \"string\",\n \"value\": \"This string has a ' quote character.\"\n }\n}\n"
testgenValid(t, input, jsonRef) testgenValid(t, input, jsonRef)
} }
+201 -188
View File
@@ -11,6 +11,7 @@ import (
"strings" "strings"
"sync/atomic" "sync/atomic"
"time" "time"
"unsafe"
"github.com/pelletier/go-toml/v2/internal/ast" "github.com/pelletier/go-toml/v2/internal/ast"
"github.com/pelletier/go-toml/v2/internal/danger" "github.com/pelletier/go-toml/v2/internal/danger"
@@ -42,27 +43,25 @@ func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r} return &Decoder{r: r}
} }
// DisallowUnknownFields causes the Decoder to return an error when the // SetStrict toggles decoding in stict mode.
// destination is a struct and the input contains a key that does not match a
// non-ignored field.
// //
// In that case, the Decoder returns a StrictMissingError that can be used to // When the decoder is in strict mode, it will record fields from the document
// retrieve the individual errors as well as generate a human readable // that could not be set on the target value. In that case, the decoder returns
// description of the missing fields. // a StrictMissingError that can be used to retrieve the individual errors as
func (d *Decoder) DisallowUnknownFields() *Decoder { // well as generate a human readable description of the missing fields.
d.strict = true func (d *Decoder) SetStrict(strict bool) *Decoder {
d.strict = strict
return d return d
} }
// Decode the whole content of r into v. // Decode the whole content of r into v.
// //
// By default, values in the document that don't exist in the target Go value // By default, values in the document that don't exist in the target Go value
// are ignored. See Decoder.DisallowUnknownFields() to change this behavior. // are ignored. See Decoder.SetStrict() to change this behavior.
// //
// When a TOML local date, time, or date-time is decoded into a time.Time, its // When a TOML local date, time, or date-time is decoded into a time.Time, its
// value is represented in time.Local timezone. Otherwise the approriate Local* // value is represented in time.Local timezone. Otherwise the approriate Local*
// structure is used. For time values, precision up to the nanosecond is // structure is used.
// supported by truncating extra digits.
// //
// Empty tables decoded in an interface{} create an empty initialized // Empty tables decoded in an interface{} create an empty initialized
// map[string]interface{}. // map[string]interface{}.
@@ -74,27 +73,22 @@ func (d *Decoder) DisallowUnknownFields() *Decoder {
// bounds for the target type (which includes negative numbers when decoding // bounds for the target type (which includes negative numbers when decoding
// into an unsigned int). // into an unsigned int).
// //
// If an error occurs while decoding the content of the document, this function // Type mapping
// returns a toml.DecodeError, providing context about the issue. When using
// strict mode and a field is missing, a `toml.StrictMissingError` is
// returned. In any other case, this function returns a standard Go error.
//
// # Type mapping
// //
// List of supported TOML types and their associated accepted Go types: // List of supported TOML types and their associated accepted Go types:
// //
// String -> string // String -> string
// Integer -> uint*, int*, depending on size // Integer -> uint*, int*, depending on size
// Float -> float*, depending on size // Float -> float*, depending on size
// Boolean -> bool // Boolean -> bool
// Offset Date-Time -> time.Time // Offset Date-Time -> time.Time
// Local Date-time -> LocalDateTime, time.Time // Local Date-time -> LocalDateTime, time.Time
// Local Date -> LocalDate, time.Time // Local Date -> LocalDate, time.Time
// Local Time -> LocalTime, time.Time // Local Time -> LocalTime, time.Time
// Array -> slice and array, depending on elements types // Array -> slice and array, depending on elements types
// Table -> map and struct // Table -> map and struct
// Inline Table -> same as Table // Inline Table -> same as Table
// Array of Tables -> same as Array and Table // Array of Tables -> same as Array and Table
func (d *Decoder) Decode(v interface{}) error { func (d *Decoder) Decode(v interface{}) error {
b, err := ioutil.ReadAll(d.r) b, err := ioutil.ReadAll(d.r)
if err != nil { if err != nil {
@@ -123,7 +117,7 @@ type decoder struct {
stashedExpr bool stashedExpr bool
// Skip expressions until a table is found. This is set to true when a // Skip expressions until a table is found. This is set to true when a
// table could not be created (missing field in map), so all KV expressions // table could not be create (missing field in map), so all KV expressions
// need to be skipped. // need to be skipped.
skipUntilTable bool skipUntilTable bool
@@ -138,23 +132,6 @@ type decoder struct {
// Strict mode // Strict mode
strict strict strict strict
// Current context for the error.
errorContext *errorContext
}
type errorContext struct {
Struct reflect.Type
Field []int
}
func (d *decoder) typeMismatchError(toml string, target reflect.Type) error {
if d.errorContext != nil && d.errorContext.Struct != nil {
ctx := d.errorContext
f := ctx.Struct.FieldByIndex(ctx.Field)
return fmt.Errorf("toml: cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type)
}
return fmt.Errorf("toml: cannot decode TOML %s into a Go value of type %s", toml, target)
} }
func (d *decoder) expr() *ast.Node { func (d *decoder) expr() *ast.Node {
@@ -322,12 +299,10 @@ func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Val
return v, nil return v, nil
case reflect.Slice: case reflect.Slice:
elemType := v.Type().Elem() elemType := v.Type().Elem()
var elem reflect.Value
if elemType.Kind() == reflect.Interface { if elemType.Kind() == reflect.Interface {
elem = makeMapStringInterface() elemType = mapStringInterfaceType
} else {
elem = reflect.New(elemType).Elem()
} }
elem := reflect.New(elemType).Elem()
elem2, err := d.handleArrayTable(key, elem) elem2, err := d.handleArrayTable(key, elem)
if err != nil { if err != nil {
return reflect.Value{}, err return reflect.Value{}, err
@@ -344,9 +319,9 @@ func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Val
elem := v.Index(idx) elem := v.Index(idx)
_, err := d.handleArrayTable(key, elem) _, err := d.handleArrayTable(key, elem)
return v, err return v, err
default:
return reflect.Value{}, fmt.Errorf("toml: cannot decode array table into a %s", v.Type())
} }
return d.handleArrayTable(key, v)
} }
// When parsing an array table expression, each part of the key needs to be // When parsing an array table expression, each part of the key needs to be
@@ -371,9 +346,7 @@ func (d *decoder) handleArrayTableCollection(key ast.Iterator, v reflect.Value)
if err != nil { if err != nil {
return reflect.Value{}, err return reflect.Value{}, err
} }
if elem.IsValid() { v.Elem().Set(elem)
v.Elem().Set(elem)
}
return v, nil return v, nil
case reflect.Slice: case reflect.Slice:
@@ -414,10 +387,9 @@ func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handle
elem = v.Elem() elem = v.Elem()
return d.handleKeyPart(key, elem, nextFn, makeFn) return d.handleKeyPart(key, elem, nextFn, makeFn)
case reflect.Map: case reflect.Map:
vt := v.Type()
// Create the key for the map element. Convert to key type. // Create the key for the map element. For now assume it's a string.
mk := reflect.ValueOf(string(key.Node().Data)).Convert(vt.Key()) mk := reflect.ValueOf(string(key.Node().Data))
// If the map does not exist, create it. // If the map does not exist, create it.
if v.IsNil() { if v.IsNil() {
@@ -434,6 +406,7 @@ func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handle
// map[string]interface{} or a []interface{} depending on whether // map[string]interface{} or a []interface{} depending on whether
// this is the last part of the array table key. // this is the last part of the array table key.
vt := v.Type()
t := vt.Elem() t := vt.Elem()
if t.Kind() == reflect.Interface { if t.Kind() == reflect.Interface {
mv = makeFn() mv = makeFn()
@@ -470,20 +443,12 @@ func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handle
v.SetMapIndex(mk, mv) v.SetMapIndex(mk, mv)
} }
case reflect.Struct: case reflect.Struct:
path, found := structFieldPath(v, string(key.Node().Data)) f, found := structField(v, string(key.Node().Data))
if !found { if !found {
d.skipUntilTable = true d.skipUntilTable = true
return reflect.Value{}, nil return reflect.Value{}, nil
} }
if d.errorContext == nil {
d.errorContext = new(errorContext)
}
t := v.Type()
d.errorContext.Struct = t
d.errorContext.Field = path
f := fieldByIndex(v, path)
x, err := nextFn(key, f) x, err := nextFn(key, f)
if err != nil || d.skipUntilTable { if err != nil || d.skipUntilTable {
return reflect.Value{}, err return reflect.Value{}, err
@@ -491,8 +456,6 @@ func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handle
if x.IsValid() { if x.IsValid() {
f.Set(x) f.Set(x)
} }
d.errorContext.Field = nil
d.errorContext.Struct = nil
case reflect.Interface: case reflect.Interface:
if v.Elem().IsValid() { if v.Elem().IsValid() {
v = v.Elem() v = v.Elem()
@@ -613,7 +576,7 @@ func (d *decoder) tryTextUnmarshaler(node *ast.Node, v reflect.Value) (bool, err
if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) { if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) {
err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data) err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data)
if err != nil { if err != nil {
return false, newDecodeError(d.p.Raw(node.Raw), "%w", err) return false, newDecodeError(d.p.Raw(node.Raw), "error calling UnmarshalText: %w", err)
} }
return true, nil return true, nil
@@ -658,62 +621,128 @@ func (d *decoder) handleValue(value *ast.Node, v reflect.Value) error {
} }
} }
func (d *decoder) unmarshalArray(array *ast.Node, v reflect.Value) error { type unmarshalArrayFn func(d *decoder, array *ast.Node, v reflect.Value) error
switch v.Kind() {
case reflect.Slice: var globalUnmarshalArrayFnCache atomic.Value // map[danger.TypeID]unmarshalArrayFn
if v.IsNil() {
v.Set(reflect.MakeSlice(v.Type(), 0, 16)) func unmarshalArrayFnForSlice(vt reflect.Type) unmarshalArrayFn {
} else { tid := danger.MakeTypeID(vt)
v.SetLen(0)
} cache, _ := globalUnmarshalArrayFnCache.Load().(map[danger.TypeID]unmarshalArrayFn)
case reflect.Array: fn, ok := cache[tid]
// arrays are always initialized
case reflect.Interface: if ok {
elem := v.Elem() return fn
if !elem.IsValid() {
elem = reflect.New(sliceInterfaceType).Elem()
elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16))
} else if elem.Kind() == reflect.Slice {
if elem.Type() != sliceInterfaceType {
elem = reflect.New(sliceInterfaceType).Elem()
elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16))
} else if !elem.CanSet() {
nelem := reflect.New(sliceInterfaceType).Elem()
nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap()))
reflect.Copy(nelem, elem)
elem = nelem
}
}
err := d.unmarshalArray(array, elem)
if err != nil {
return err
}
v.Set(elem)
return nil
default:
// TODO: use newDecodeError, but first the parser needs to fill
// array.Data.
return d.typeMismatchError("array", v.Type())
} }
elemType := v.Type().Elem() elemType := vt.Elem()
elemSize := elemType.Size()
it := array.Children() fn = func(d *decoder, array *ast.Node, v reflect.Value) error {
idx := 0 sp := (*danger.Slice)(unsafe.Pointer(v.UnsafeAddr()))
for it.Next() {
n := it.Node()
// TODO: optimize sp.Len = 0
if v.Kind() == reflect.Slice {
elem := reflect.New(elemType).Elem() it := array.Children()
for it.Next() {
n := it.Node()
idx := sp.Len
if sp.Len == sp.Cap {
c := sp.Cap
if c == 0 {
c = 16
} else {
c *= 2
}
*sp = danger.ExtendSlice(vt, sp, c)
}
datap := unsafe.Pointer(sp.Data)
elemp := danger.Stride(datap, elemSize, idx)
elem := reflect.NewAt(elemType, elemp).Elem()
err := d.handleValue(n, elem) err := d.handleValue(n, elem)
if err != nil { if err != nil {
return err return err
} }
v.Set(reflect.Append(v, elem)) sp.Len++
} else { // array }
if sp.Data == nil {
*sp = danger.ExtendSlice(vt, sp, 0)
}
return nil
}
newCache := make(map[danger.TypeID]unmarshalArrayFn, len(cache)+1)
newCache[tid] = fn
for k, v := range cache {
newCache[k] = v
}
globalUnmarshalArrayFnCache.Store(newCache)
return fn
}
func unmarshalArraySliceInterface(d *decoder, array *ast.Node, v reflect.Value) error {
sp := (*danger.Slice)(unsafe.Pointer(v.UnsafeAddr()))
sp.Len = 0
var x interface{}
it := array.Children()
for it.Next() {
n := it.Node()
idx := sp.Len
if sp.Len == sp.Cap {
c := sp.Cap
if c == 0 {
c = 16
} else {
c *= 2
}
*sp = danger.ExtendSlice(sliceInterfaceType, sp, c)
}
datap := unsafe.Pointer(sp.Data)
elemp := danger.Stride(datap, unsafe.Sizeof(x), idx)
elem := reflect.NewAt(sliceInterfaceType.Elem(), elemp).Elem()
err := d.handleValue(n, elem)
if err != nil {
return err
}
sp.Len++
}
if sp.Data == nil {
*sp = danger.ExtendSlice(sliceInterfaceType, sp, 0)
}
return nil
}
func (d *decoder) unmarshalArray(array *ast.Node, v reflect.Value) error {
switch v.Kind() {
case reflect.Slice:
fn := unmarshalArrayFnForSlice(v.Type())
return fn(d, array, v)
case reflect.Array:
// arrays are always initialized
it := array.Children()
idx := 0
for it.Next() {
n := it.Node()
if idx >= v.Len() { if idx >= v.Len() {
return nil return nil
} }
@@ -724,6 +753,39 @@ func (d *decoder) unmarshalArray(array *ast.Node, v reflect.Value) error {
} }
idx++ idx++
} }
case reflect.Interface:
elemIsSliceInterface := false
elem := v.Elem()
if !elem.IsValid() {
s := make([]interface{}, 0, 16)
elem = reflect.ValueOf(&s).Elem()
elemIsSliceInterface = true
} else if elem.Kind() == reflect.Slice {
if elem.Type() != sliceInterfaceType {
s := make([]interface{}, 0, 16)
elem = reflect.ValueOf(&s).Elem()
} else if !elem.CanSet() {
s := make([]interface{}, elem.Len(), elem.Cap())
nelem := reflect.ValueOf(&s).Elem()
reflect.Copy(nelem, elem)
elem = nelem
}
elemIsSliceInterface = true
}
var err error
if elemIsSliceInterface {
err = unmarshalArraySliceInterface(d, array, elem)
} else {
err = d.unmarshalArray(array, elem)
}
v.Set(elem)
return err
default:
// TODO: use newDecodeError, but first the parser needs to fill
// array.Data.
return fmt.Errorf("toml: cannot store array in Go type %s", v.Kind())
} }
return nil return nil
@@ -866,27 +928,12 @@ func (d *decoder) unmarshalFloat(value *ast.Node, v reflect.Value) error {
return nil return nil
} }
const (
maxInt = int64(^uint(0) >> 1)
minInt = -maxInt - 1
)
// Maximum value of uint for decoding. Currently the decoder parses the integer
// into an int64. As a result, on architectures where uint is 64 bits, the
// effective maximum uint we can decode is the maximum of int64. On
// architectures where uint is 32 bits, the maximum value we can decode is
// lower: the maximum of uint32. I didn't find a way to figure out this value at
// compile time, so it is computed during initialization.
var maxUint int64 = math.MaxInt64
func init() {
m := uint64(^uint(0))
if m < uint64(maxUint) {
maxUint = int64(m)
}
}
func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error { func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error {
const (
maxInt = int64(^uint(0) >> 1)
minInt = -maxInt - 1
)
i, err := parseInteger(value.Data) i, err := parseInteger(value.Data)
if err != nil { if err != nil {
return err return err
@@ -947,7 +994,7 @@ func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error {
r = reflect.ValueOf(uint8(i)) r = reflect.ValueOf(uint8(i))
case reflect.Uint: case reflect.Uint:
if i < 0 || i > maxUint { if i < 0 {
return fmt.Errorf("toml: negative number %d does not fit in an uint", i) return fmt.Errorf("toml: negative number %d does not fit in an uint", i)
} }
@@ -955,7 +1002,7 @@ func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error {
case reflect.Interface: case reflect.Interface:
r = reflect.ValueOf(i) r = reflect.ValueOf(i)
default: default:
return d.typeMismatchError("integer", v.Type()) return fmt.Errorf("toml: cannot store TOML integer into a Go %s", v.Kind())
} }
if !r.Type().AssignableTo(v.Type()) { if !r.Type().AssignableTo(v.Type()) {
@@ -1058,20 +1105,12 @@ func (d *decoder) handleKeyValuePart(key ast.Iterator, value *ast.Node, v reflec
v.SetMapIndex(mk, mv) v.SetMapIndex(mk, mv)
} }
case reflect.Struct: case reflect.Struct:
path, found := structFieldPath(v, string(key.Node().Data)) f, found := structField(v, string(key.Node().Data))
if !found { if !found {
d.skipUntilTable = true d.skipUntilTable = true
break break
} }
if d.errorContext == nil {
d.errorContext = new(errorContext)
}
t := v.Type()
d.errorContext.Struct = t
d.errorContext.Field = path
f := fieldByIndex(v, path)
x, err := d.handleKeyValueInner(key, value, f) x, err := d.handleKeyValueInner(key, value, f)
if err != nil { if err != nil {
return reflect.Value{}, err return reflect.Value{}, err
@@ -1080,8 +1119,6 @@ func (d *decoder) handleKeyValuePart(key ast.Iterator, value *ast.Node, v reflec
if x.IsValid() { if x.IsValid() {
f.Set(x) f.Set(x)
} }
d.errorContext.Struct = nil
d.errorContext.Field = nil
case reflect.Interface: case reflect.Interface:
v = v.Elem() v = v.Elem()
@@ -1135,30 +1172,16 @@ func initAndDereferencePointer(v reflect.Value) reflect.Value {
return elem return elem
} }
// Same as reflect.Value.FieldByIndex, but creates pointers if needed.
func fieldByIndex(v reflect.Value, path []int) reflect.Value {
for i, x := range path {
v = v.Field(x)
if i < len(path)-1 && v.Kind() == reflect.Pointer {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
}
return v
}
type fieldPathsMap = map[string][]int type fieldPathsMap = map[string][]int
var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap
func structFieldPath(v reflect.Value, name string) ([]int, bool) { func structField(v reflect.Value, name string) (reflect.Value, bool) {
t := v.Type() t := v.Type()
tid := danger.MakeTypeID(t)
cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap) cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap)
fieldPaths, ok := cache[danger.MakeTypeID(t)] fieldPaths, ok := cache[tid]
if !ok { if !ok {
fieldPaths = map[string][]int{} fieldPaths = map[string][]int{}
@@ -1170,7 +1193,7 @@ func structFieldPath(v reflect.Value, name string) ([]int, bool) {
}) })
newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1) newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1)
newCache[danger.MakeTypeID(t)] = fieldPaths newCache[tid] = fieldPaths
for k, v := range cache { for k, v := range cache {
newCache[k] = v newCache[k] = v
} }
@@ -1181,7 +1204,12 @@ func structFieldPath(v reflect.Value, name string) ([]int, bool) {
if !ok { if !ok {
path, ok = fieldPaths[strings.ToLower(name)] path, ok = fieldPaths[strings.ToLower(name)]
} }
return path, ok
if !ok {
return reflect.Value{}, false
}
return v.FieldByIndex(path), true
} }
func forEachField(t reflect.Type, path []int, do func(name string, path []int)) { func forEachField(t reflect.Type, path []int, do func(name string, path []int)) {
@@ -1197,28 +1225,13 @@ func forEachField(t reflect.Type, path []int, do func(name string, path []int))
fieldPath := append(path, i) fieldPath := append(path, i)
fieldPath = fieldPath[:len(fieldPath):len(fieldPath)] fieldPath = fieldPath[:len(fieldPath):len(fieldPath)]
name := f.Tag.Get("toml") if f.Anonymous {
if name == "-" { forEachField(f.Type, fieldPath, do)
continue continue
} }
if i := strings.IndexByte(name, ','); i >= 0 { name, ok := f.Tag.Lookup("toml")
name = name[:i] if !ok {
}
if f.Anonymous && name == "" {
t2 := f.Type
if t2.Kind() == reflect.Pointer {
t2 = t2.Elem()
}
if t2.Kind() == reflect.Struct {
forEachField(t2, fieldPath, do)
}
continue
}
if name == "" {
name = f.Name name = f.Name
} }
+39 -449
View File
@@ -16,7 +16,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func ExampleDecoder_DisallowUnknownFields() { func ExampleDecoder_SetStrict() {
type S struct { type S struct {
Key1 string Key1 string
Key3 string Key3 string
@@ -28,7 +28,7 @@ key3 = "value3"
` `
r := strings.NewReader(doc) r := strings.NewReader(doc)
d := toml.NewDecoder(r) d := toml.NewDecoder(r)
d.DisallowUnknownFields() d.SetStrict(true)
s := S{} s := S{}
err := d.Decode(&s) err := d.Decode(&s)
@@ -279,11 +279,6 @@ func TestUnmarshal_Floats(t *testing.T) {
input: `1.0_e2`, input: `1.0_e2`,
err: true, err: true,
}, },
{
desc: "leading zero in positive float",
input: `+0_0.0`,
err: true,
},
} }
type doc struct { type doc struct {
@@ -545,35 +540,6 @@ func TestUnmarshal(t *testing.T) {
} }
}, },
}, },
{
desc: "issue 739 - table redefinition",
input: `
[foo.bar.baz]
wibble = 'wobble'
[foo]
[foo.bar]
huey = 'dewey'
`,
gen: func() test {
m := map[string]interface{}{}
return test{
target: &m,
expected: &map[string]interface{}{
`foo`: map[string]interface{}{
"bar": map[string]interface{}{
"huey": "dewey",
"baz": map[string]interface{}{
"wibble": "wobble",
},
},
},
},
}
},
},
{ {
desc: "multiline basic string", desc: "multiline basic string",
input: `A = """\ input: `A = """\
@@ -605,7 +571,7 @@ huey = 'dewey'
}, },
{ {
desc: "multiline basic string with windows newline", desc: "multiline basic string with windows newline",
input: "A = \"\"\"\r\nTe\r\nst\"\"\"", input: "A = \"\"\"\r\nTest\"\"\"",
gen: func() test { gen: func() test {
type doc struct { type doc struct {
A string A string
@@ -613,7 +579,7 @@ huey = 'dewey'
return test{ return test{
target: &doc{}, target: &doc{},
expected: &doc{A: "Te\r\nst"}, expected: &doc{A: "Test"},
} }
}, },
}, },
@@ -698,36 +664,6 @@ huey = 'dewey'
} }
}, },
}, },
{
desc: "long string array into []string",
input: `A = ["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17"]`,
gen: func() test {
type doc struct {
A []string
}
return test{
target: &doc{},
expected: &doc{A: []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17"}},
}
},
},
{
desc: "long string array into []interface{}",
input: `A = ["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14",
"15","16","17"]`,
gen: func() test {
type doc struct {
A []interface{}
}
return test{
target: &doc{},
expected: &doc{A: []interface{}{"0", "1", "2", "3", "4", "5", "6",
"7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17"}},
}
},
},
{ {
desc: "standard table", desc: "standard table",
input: `[A] input: `[A]
@@ -971,7 +907,7 @@ B = "data"`,
"Name": "Hammer", "Name": "Hammer",
"Sku": int64(738594937), "Sku": int64(738594937),
}, },
map[string]interface{}{}, map[string]interface{}(nil),
map[string]interface{}{ map[string]interface{}{
"Name": "Nail", "Name": "Nail",
"Sku": int64(284758393), "Sku": int64(284758393),
@@ -1505,7 +1441,7 @@ B = "data"`,
target: &map[string]interface{}{}, target: &map[string]interface{}{},
expected: &map[string]interface{}{ expected: &map[string]interface{}{
"products": []interface{}{ "products": []interface{}{
map[string]interface{}{}, map[string]interface{}(nil),
}, },
}, },
} }
@@ -1735,28 +1671,6 @@ B = "data"`,
} }
}, },
}, },
{
desc: "kv that points to a slice",
input: "a.b.c = 'foo'",
gen: func() test {
doc := map[string][]string{}
return test{
target: &doc,
err: true,
}
},
},
{
desc: "kv that points to a pointer to a slice",
input: "a.b.c = 'foo'",
gen: func() test {
doc := map[string]*[]string{}
return test{
target: &doc,
err: true,
}
},
},
} }
for _, e := range examples { for _, e := range examples {
@@ -1846,20 +1760,6 @@ func TestUnmarshalOverflows(t *testing.T) {
} }
} }
func TestUnmarshalErrors(t *testing.T) {
type mystruct struct {
Bar string
}
data := `bar = 42`
s := mystruct{}
err := toml.Unmarshal([]byte(data), &s)
require.Error(t, err)
require.Equal(t, "toml: cannot decode TOML integer into struct field toml_test.mystruct.Bar of type string", err.Error())
}
func TestUnmarshalInvalidTarget(t *testing.T) { func TestUnmarshalInvalidTarget(t *testing.T) {
x := "foo" x := "foo"
err := toml.Unmarshal([]byte{}, x) err := toml.Unmarshal([]byte{}, x)
@@ -1898,7 +1798,8 @@ key2 = "missing2"
key3 = "missing3" key3 = "missing3"
key4 = "value4" key4 = "value4"
`, `,
expected: `2| key1 = "value1" expected: `
2| key1 = "value1"
3| key2 = "missing2" 3| key2 = "missing2"
| ~~~~ missing field | ~~~~ missing field
4| key3 = "missing3" 4| key3 = "missing3"
@@ -1908,7 +1809,8 @@ key4 = "value4"
3| key2 = "missing2" 3| key2 = "missing2"
4| key3 = "missing3" 4| key3 = "missing3"
| ~~~~ missing field | ~~~~ missing field
5| key4 = "value4"`, 5| key4 = "value4"
`,
target: &struct { target: &struct {
Key1 string Key1 string
Key4 string Key4 string
@@ -1917,8 +1819,10 @@ key4 = "value4"
{ {
desc: "multi-part key", desc: "multi-part key",
input: `a.short.key="foo"`, input: `a.short.key="foo"`,
expected: `1| a.short.key="foo" expected: `
| ~~~~~~~~~~~ missing field`, 1| a.short.key="foo"
| ~~~~~~~~~~~ missing field
`,
}, },
{ {
desc: "missing table", desc: "missing table",
@@ -1926,19 +1830,24 @@ key4 = "value4"
[foo] [foo]
bar = 42 bar = 42
`, `,
expected: `2| [foo] expected: `
2| [foo]
| ~~~ missing table | ~~~ missing table
3| bar = 42`, 3| bar = 42
`,
}, },
{ {
desc: "missing array table", desc: "missing array table",
input: ` input: `
[[foo]] [[foo]]
bar = 42`, bar = 42
expected: `2| [[foo]] `,
expected: `
2| [[foo]]
| ~~~ missing table | ~~~ missing table
3| bar = 42`, 3| bar = 42
`,
}, },
} }
@@ -1948,7 +1857,7 @@ bar = 42`,
t.Run("strict", func(t *testing.T) { t.Run("strict", func(t *testing.T) {
r := strings.NewReader(e.input) r := strings.NewReader(e.input)
d := toml.NewDecoder(r) d := toml.NewDecoder(r)
d.DisallowUnknownFields() d.SetStrict(true)
x := e.target x := e.target
if x == nil { if x == nil {
x = &struct{}{} x = &struct{}{}
@@ -1957,7 +1866,7 @@ bar = 42`,
var tsm *toml.StrictMissingError var tsm *toml.StrictMissingError
if errors.As(err, &tsm) { if errors.As(err, &tsm) {
assert.Equal(t, e.expected, tsm.String()) equalStringsIgnoreNewlines(t, e.expected, tsm.String())
} else { } else {
t.Fatalf("err should have been a *toml.StrictMissingError, but got %s (%T)", err, err) t.Fatalf("err should have been a *toml.StrictMissingError, but got %s (%T)", err, err)
} }
@@ -1966,6 +1875,7 @@ bar = 42`,
t.Run("default", func(t *testing.T) { t.Run("default", func(t *testing.T) {
r := strings.NewReader(e.input) r := strings.NewReader(e.input)
d := toml.NewDecoder(r) d := toml.NewDecoder(r)
d.SetStrict(false)
x := e.target x := e.target
if x == nil { if x == nil {
x = &struct{}{} x = &struct{}{}
@@ -2175,7 +2085,7 @@ xz_hash = "1a48f723fea1f17d786ce6eadd9d00914d38062d28fd9c455ed3c3801905b388"
expected := doc{ expected := doc{
Pkg: map[string]pkg{ Pkg: map[string]pkg{
"cargo": { "cargo": pkg{
Target: map[string]target{ Target: map[string]target{
"aarch64-apple-darwin": { "aarch64-apple-darwin": {
XZ_URL: "https://static.rust-lang.org/dist/2021-07-29/cargo-1.54.0-aarch64-apple-darwin.tar.xz", XZ_URL: "https://static.rust-lang.org/dist/2021-07-29/cargo-1.54.0-aarch64-apple-darwin.tar.xz",
@@ -2280,192 +2190,7 @@ func TestIssue666(t *testing.T) {
require.Error(t, err) require.Error(t, err)
} }
func TestIssue677(t *testing.T) { //nolint:funlen
doc := `
[Build]
Name = "publication build"
[[Build.Dependencies]]
Name = "command"
Program = "hugo"
`
type _tomlJob struct {
Dependencies []map[string]interface{}
}
type tomlParser struct {
Build *_tomlJob
}
p := tomlParser{}
err := toml.Unmarshal([]byte(doc), &p)
require.NoError(t, err)
expected := tomlParser{
Build: &_tomlJob{
Dependencies: []map[string]interface{}{
{
"Name": "command",
"Program": "hugo",
},
},
},
}
require.Equal(t, expected, p)
}
func TestIssue701(t *testing.T) {
// Expected behavior:
// Return an error since a cannot be modified. From the TOML spec:
//
// > Inline tables are fully self-contained and define all
// keys and sub-tables within them. Keys and sub-tables cannot
// be added outside the braces.
docs := []string{
`
a={}
[a.b]
z=0
`,
`
a={}
[[a.b]]
z=0
`,
}
for _, doc := range docs {
var v interface{}
err := toml.Unmarshal([]byte(doc), &v)
assert.Error(t, err)
}
}
func TestIssue703(t *testing.T) {
var v interface{}
err := toml.Unmarshal([]byte("[a]\nx.y=0\n[a.x]"), &v)
require.Error(t, err)
}
func TestIssue708(t *testing.T) {
v := map[string]string{}
err := toml.Unmarshal([]byte("0=\"\"\"\\\r\n\"\"\""), &v)
require.NoError(t, err)
require.Equal(t, map[string]string{"0": ""}, v)
}
func TestIssue710(t *testing.T) {
v := map[string]toml.LocalTime{}
err := toml.Unmarshal([]byte(`0=00:00:00.0000000000`), &v)
require.NoError(t, err)
require.Equal(t, map[string]toml.LocalTime{"0": {Precision: 9}}, v)
v1 := map[string]toml.LocalTime{}
err = toml.Unmarshal([]byte(`0=00:00:00.0000000001`), &v1)
require.NoError(t, err)
require.Equal(t, map[string]toml.LocalTime{"0": {Precision: 9}}, v1)
v2 := map[string]toml.LocalTime{}
err = toml.Unmarshal([]byte(`0=00:00:00.1111111119`), &v2)
require.NoError(t, err)
require.Equal(t, map[string]toml.LocalTime{"0": {Nanosecond: 111111111, Precision: 9}}, v2)
}
func TestIssue715(t *testing.T) {
var v interface{}
err := toml.Unmarshal([]byte("0=+"), &v)
require.Error(t, err)
err = toml.Unmarshal([]byte("0=-"), &v)
require.Error(t, err)
err = toml.Unmarshal([]byte("0=+A"), &v)
require.Error(t, err)
}
func TestIssue714(t *testing.T) {
var v interface{}
err := toml.Unmarshal([]byte("0."), &v)
require.Error(t, err)
err = toml.Unmarshal([]byte("0={0=0,"), &v)
require.Error(t, err)
}
func TestIssue772(t *testing.T) {
type FileHandling struct {
FilePattern string `toml:"pattern"`
}
type Config struct {
FileHandling `toml:"filehandling"`
}
var defaultConfigFile = []byte(`
[filehandling]
pattern = "reach-masterdev-"`)
config := Config{}
err := toml.Unmarshal(defaultConfigFile, &config)
require.NoError(t, err)
require.Equal(t, "reach-masterdev-", config.FileHandling.FilePattern)
}
func TestIssue774(t *testing.T) {
type ScpData struct {
Host string `json:"host"`
}
type GenConfig struct {
SCP []ScpData `toml:"scp" comment:"Array of Secure Copy Configurations"`
}
c := &GenConfig{}
c.SCP = []ScpData{{Host: "main.domain.com"}}
b, err := toml.Marshal(c)
require.NoError(t, err)
expected := `# Array of Secure Copy Configurations
[[scp]]
Host = 'main.domain.com'
`
require.Equal(t, expected, string(b))
}
func TestIssue799(t *testing.T) {
const testTOML = `
# notice the double brackets
[[test]]
answer = 42
`
var s struct {
// should be []map[string]int
Test map[string]int `toml:"test"`
}
err := toml.Unmarshal([]byte(testTOML), &s)
require.Error(t, err)
}
func TestIssue807(t *testing.T) {
type A struct {
Name string `toml:"name"`
}
type M struct {
*A
}
var m M
err := toml.Unmarshal([]byte(`name = 'foo'`), &m)
require.NoError(t, err)
require.Equal(t, "foo", m.Name)
}
func TestUnmarshalDecodeErrors(t *testing.T) { func TestUnmarshalDecodeErrors(t *testing.T) {
examples := []struct { examples := []struct {
desc string desc string
@@ -2480,10 +2205,18 @@ func TestUnmarshalDecodeErrors(t *testing.T) {
desc: "local time with fractional", desc: "local time with fractional",
data: `a = 11:22:33.x`, data: `a = 11:22:33.x`,
}, },
{
desc: "local time frac precision too large",
data: `a = 2021-05-09T11:22:33.99999999999`,
},
{ {
desc: "wrong time offset separator", desc: "wrong time offset separator",
data: `a = 1979-05-27T00:32:00.-07:00`, data: `a = 1979-05-27T00:32:00.-07:00`,
}, },
{
desc: "missing fractional with tz",
data: `a = 2021-05-09T11:22:33.99999999999`,
},
{ {
desc: "wrong time offset separator", desc: "wrong time offset separator",
data: `a = 1979-05-27T00:32:00Z07:00`, data: `a = 1979-05-27T00:32:00Z07:00`,
@@ -2493,25 +2226,9 @@ func TestUnmarshalDecodeErrors(t *testing.T) {
data: `flt8 = 224_617.445_991__228`, data: `flt8 = 224_617.445_991__228`,
}, },
{ {
desc: "float with double .", desc: "float with double _",
data: `flt8 = 1..2`, data: `flt8 = 1..2`,
}, },
{
desc: "number with plus sign and leading underscore",
data: `a = +_0`,
},
{
desc: "number with negative sign and leading underscore",
data: `a = -_0`,
},
{
desc: "exponent with plus sign and leading underscore",
data: `a = 0e+_0`,
},
{
desc: "exponent with negative sign and leading underscore",
data: `a = 0e-_0`,
},
{ {
desc: "int with wrong base", desc: "int with wrong base",
data: `a = 0f2`, data: `a = 0f2`,
@@ -2619,7 +2336,7 @@ world'`,
{ {
desc: "invalid seconds value", desc: "invalid seconds value",
data: `a=1979-05-27T12:45:99`, data: `a=1979-05-27T12:45:99`,
msg: `seconds cannot be greater 60`, msg: `seconds cannot be greater 59`,
}, },
{ {
desc: `binary with invalid digit`, desc: `binary with invalid digit`,
@@ -2823,70 +2540,14 @@ world'`,
desc: `invalid month`, desc: `invalid month`,
data: `a=2021-0--29`, data: `a=2021-0--29`,
}, },
{
desc: `zero is an invalid day`,
data: `a=2021-11-00`,
},
{
desc: `zero is an invalid month`,
data: `a=2021-00-11`,
},
{
desc: `invalid number of seconds digits with trailing digit`,
data: `a=0000-01-01 00:00:000000Z3`,
},
{
desc: `invalid zone offset hours`,
data: `a=0000-01-01 00:00:00+24:00`,
},
{
desc: `invalid zone offset minutes`,
data: `a=0000-01-01 00:00:00+00:60`,
},
{
desc: `invalid character in zone offset hours`,
data: `a=0000-01-01 00:00:00+0Z:00`,
},
{
desc: `invalid character in zone offset minutes`,
data: `a=0000-01-01 00:00:00+00:0Z`,
},
{
desc: `invalid number of seconds`,
data: `a=0000-01-01 00:00:00+27000`,
},
{ {
desc: `carriage return inside basic key`, desc: `carriage return inside basic key`,
data: "\"\r\"=42", data: "\"\r\"=42",
}, },
{
desc: `carriage return inside literal key`,
data: "'\r'=42",
},
{ {
desc: `carriage return inside basic string`, desc: `carriage return inside basic string`,
data: "A = \"\r\"", data: "A = \"\r\"",
}, },
{
desc: `carriage return inside basic multiline string`,
data: "a=\"\"\"\r\"\"\"",
},
{
desc: `carriage return at the trail of basic multiline string`,
data: "a=\"\"\"\r",
},
{
desc: `carriage return inside literal string`,
data: "A = '\r'",
},
{
desc: `carriage return inside multiline literal string`,
data: "a='''\r'''",
},
{
desc: `carriage return at trail of multiline literal string`,
data: "a='''\r",
},
{ {
desc: `carriage return in comment`, desc: `carriage return in comment`,
data: "# this is a test\ra=1", data: "# this is a test\ra=1",
@@ -2917,64 +2578,6 @@ world'`,
} }
} }
func TestOmitEmpty(t *testing.T) {
type inner struct {
private string
Skip string `toml:"-"`
V string
}
type elem struct {
Foo string `toml:",omitempty"`
Bar string `toml:",omitempty"`
Inner inner `toml:",omitempty"`
}
type doc struct {
X []elem `toml:",inline"`
}
d := doc{X: []elem{elem{
Foo: "test",
Inner: inner{
V: "alue",
},
}}}
b, err := toml.Marshal(d)
require.NoError(t, err)
require.Equal(t, "X = [{Foo = 'test', Inner = {V = 'alue'}}]\n", string(b))
}
func TestUnmarshalTags(t *testing.T) {
type doc struct {
Dash string `toml:"-,"`
Ignore string `toml:"-"`
A string `toml:"hello"`
B string `toml:"comma,omitempty"`
}
data := `
'-' = "dash"
Ignore = 'me'
hello = 'content'
comma = 'ok'
`
d := doc{}
expected := doc{
Dash: "dash",
Ignore: "",
A: "content",
B: "ok",
}
err := toml.Unmarshal([]byte(data), &d)
require.NoError(t, err)
require.Equal(t, expected, d)
}
func TestASCIIControlCharacters(t *testing.T) { func TestASCIIControlCharacters(t *testing.T) {
invalidCharacters := []byte{0x7F} invalidCharacters := []byte{0x7F}
for c := byte(0x0); c <= 0x08; c++ { for c := byte(0x0); c <= 0x08; c++ {
@@ -3288,16 +2891,3 @@ func TestUnmarshal_RecursiveTableArray(t *testing.T) {
}) })
} }
} }
func TestUnmarshalEmbedNonString(t *testing.T) {
type Foo []byte
type doc struct {
Foo
}
d := doc{}
err := toml.Unmarshal([]byte(`foo = 'bar'`), &d)
require.NoError(t, err)
require.Nil(t, d.Foo)
}