Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 530b363c2f | |||
| f09f77ab06 | |||
| 9702fae9b8 | |||
| 3cf1eb2312 | |||
| 2af3554f90 | |||
| 180c6ba2ba | |||
| dafc4173ef | |||
| f1a83be671 | |||
| 5aeb70b3f0 | |||
| 8384a5683c | |||
| 4369957cb4 | |||
| a0e8464967 | |||
| c57d0d559f | |||
| 644602b845 | |||
| 36df8eef6e | |||
| 18a2148713 | |||
| bc9958322f | |||
| 6d56ac8027 | |||
| 098464b61b | |||
| 85e2448ce5 |
@@ -19,7 +19,7 @@ jobs:
|
|||||||
dry-run: false
|
dry-run: false
|
||||||
language: go
|
language: go
|
||||||
- name: Upload Crash
|
- name: Upload Crash
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
if: failure() && steps.build.outcome == 'success'
|
if: failure() && steps.build.outcome == 'success'
|
||||||
with:
|
with:
|
||||||
name: artifacts
|
name: artifacts
|
||||||
|
|||||||
@@ -35,11 +35,11 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v3
|
uses: github/codeql-action/init@v4
|
||||||
with:
|
with:
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
@@ -47,10 +47,10 @@ jobs:
|
|||||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
# If this step fails, then you should remove it and run the build manually (see below)
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@v3
|
uses: github/codeql-action/autobuild@v4
|
||||||
|
|
||||||
# ℹ️ Command-line programs to run using the OS shell.
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
# 📚 https://git.io/JvXDl
|
# 📚 https://git.io/JvXDl
|
||||||
@@ -64,4 +64,4 @@ jobs:
|
|||||||
# make release
|
# make release
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@v3
|
uses: github/codeql-action/analyze@v4
|
||||||
|
|||||||
@@ -9,11 +9,11 @@ jobs:
|
|||||||
runs-on: "ubuntu-latest"
|
runs-on: "ubuntu-latest"
|
||||||
name: report
|
name: report
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Setup go
|
- name: Setup go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: "1.24"
|
go-version: "1.24"
|
||||||
- name: Run tests with coverage
|
- name: Run tests with coverage
|
||||||
|
|||||||
@@ -0,0 +1,36 @@
|
|||||||
|
name: Go Versions Compatibility Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
go_versions:
|
||||||
|
description: 'Go versions to test (space-separated, e.g., "1.21 1.22 1.23")'
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
type: string
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Run Go versions compatibility test
|
||||||
|
run: |
|
||||||
|
VERSIONS="${{ github.event.inputs.go_versions }}"
|
||||||
|
./test-go-versions.sh --output ./test-results $VERSIONS
|
||||||
|
|
||||||
|
- name: Upload test results
|
||||||
|
uses: actions/upload-artifact@v6
|
||||||
|
with:
|
||||||
|
name: go-versions-test-results
|
||||||
|
path: |
|
||||||
|
test-results/
|
||||||
|
retention-days: 30
|
||||||
@@ -16,11 +16,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: "1.24"
|
go-version: "1.24"
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
|
|||||||
@@ -16,11 +16,11 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
name: ${{ matrix.go }}/${{ matrix.os }}
|
name: ${{ matrix.go }}/${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Setup go ${{ matrix.go }}
|
- name: Setup go ${{ matrix.go }}
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
- name: Run unit tests
|
- name: Run unit tests
|
||||||
|
|||||||
@@ -5,3 +5,4 @@ cmd/tomljson/tomljson
|
|||||||
cmd/tomltestgen/tomltestgen
|
cmd/tomltestgen/tomltestgen
|
||||||
dist
|
dist
|
||||||
tests/
|
tests/
|
||||||
|
test-results
|
||||||
|
|||||||
@@ -0,0 +1,57 @@
|
|||||||
|
# Agent Guidelines for go-toml
|
||||||
|
|
||||||
|
This file provides guidelines for AI agents contributing to go-toml. All agents must follow these rules derived from [CONTRIBUTING.md](./CONTRIBUTING.md).
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
go-toml is a TOML library for Go. The goal is to provide an easy-to-use and efficient TOML implementation that gets the job done without getting in the way.
|
||||||
|
|
||||||
|
## Code Change Rules
|
||||||
|
|
||||||
|
### Backward Compatibility
|
||||||
|
|
||||||
|
- **No backward-incompatible changes** unless explicitly discussed and approved
|
||||||
|
- Avoid breaking people's programs unless absolutely necessary
|
||||||
|
|
||||||
|
### Testing Requirements
|
||||||
|
|
||||||
|
- **All bug fixes must include regression tests**
|
||||||
|
- **All new code must be tested**
|
||||||
|
- Run tests before submitting: `go test -race ./...`
|
||||||
|
- Test coverage must not decrease. Check with:
|
||||||
|
```bash
|
||||||
|
go test -covermode=atomic -coverprofile=coverage.out
|
||||||
|
go tool cover -func=coverage.out
|
||||||
|
```
|
||||||
|
- All lines of code touched by changes should be covered by tests
|
||||||
|
|
||||||
|
### Performance Requirements
|
||||||
|
|
||||||
|
- go-toml aims to stay efficient; avoid performance regressions
|
||||||
|
- Run benchmarks to verify: `go test ./... -bench=. -count=10`
|
||||||
|
- Compare results using [benchstat](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat)
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
|
||||||
|
- New features or feature extensions must include documentation
|
||||||
|
- Documentation lives in [README.md](./README.md) and throughout source code
|
||||||
|
|
||||||
|
### Code Style
|
||||||
|
|
||||||
|
- Follow existing code format and structure
|
||||||
|
- Code must pass `go fmt`
|
||||||
|
|
||||||
|
### Commit Messages
|
||||||
|
|
||||||
|
- Commit messages must explain **why** the change is needed
|
||||||
|
- Keep messages clear and informative even if details are in the PR description
|
||||||
|
|
||||||
|
## Pull Request Checklist
|
||||||
|
|
||||||
|
Before submitting:
|
||||||
|
|
||||||
|
1. Tests pass (`go test -race ./...`)
|
||||||
|
2. No backward-incompatible changes (unless discussed)
|
||||||
|
3. Relevant documentation added/updated
|
||||||
|
4. No performance regression (verify with benchmarks)
|
||||||
|
5. Title is clear and understandable for changelog
|
||||||
+51
-9
@@ -33,7 +33,7 @@ The documentation is present in the [README][readme] and thorough the source
|
|||||||
code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change
|
code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change
|
||||||
to the documentation, create a pull request with your proposed changes. For
|
to the documentation, create a pull request with your proposed changes. For
|
||||||
simple changes like that, the easiest way to go is probably the "Fork this
|
simple changes like that, the easiest way to go is probably the "Fork this
|
||||||
project and edit the file" button on Github, displayed at the top right of the
|
project and edit the file" button on GitHub, displayed at the top right of the
|
||||||
file. Unless it's a trivial change (for example a typo), provide a little bit of
|
file. Unless it's a trivial change (for example a typo), provide a little bit of
|
||||||
context in your pull request description or commit message.
|
context in your pull request description or commit message.
|
||||||
|
|
||||||
@@ -92,6 +92,48 @@ However, given GitHub's new policy to _not_ run Actions on pull requests until a
|
|||||||
maintainer clicks on button, it is highly recommended that you run them locally
|
maintainer clicks on button, it is highly recommended that you run them locally
|
||||||
as you make changes.
|
as you make changes.
|
||||||
|
|
||||||
|
### Test across Go versions
|
||||||
|
|
||||||
|
The repository includes tooling to test go-toml across multiple Go versions
|
||||||
|
(1.11 through 1.25) both locally and in GitHub Actions.
|
||||||
|
|
||||||
|
#### Local testing with Docker
|
||||||
|
|
||||||
|
Prerequisites: Docker installed and running, Bash shell, `rsync` command.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test all Go versions in parallel (default)
|
||||||
|
./test-go-versions.sh
|
||||||
|
|
||||||
|
# Test specific versions
|
||||||
|
./test-go-versions.sh 1.21 1.22 1.23
|
||||||
|
|
||||||
|
# Test sequentially (slower but uses less resources)
|
||||||
|
./test-go-versions.sh --sequential
|
||||||
|
|
||||||
|
# Verbose output with custom results directory
|
||||||
|
./test-go-versions.sh --verbose --output ./my-results 1.24 1.25
|
||||||
|
|
||||||
|
# Show all options
|
||||||
|
./test-go-versions.sh --help
|
||||||
|
```
|
||||||
|
|
||||||
|
The script creates Docker containers for each Go version and runs the full test
|
||||||
|
suite. Results are saved to a `test-results/` directory with individual logs and
|
||||||
|
a comprehensive summary report.
|
||||||
|
|
||||||
|
The script only exits with a non-zero status code if either of the two most
|
||||||
|
recent Go versions fail.
|
||||||
|
|
||||||
|
#### GitHub Actions testing (maintainers)
|
||||||
|
|
||||||
|
1. Go to the **Actions** tab in the GitHub repository
|
||||||
|
2. Select **"Go Versions Compatibility Test"** from the workflow list
|
||||||
|
3. Click **"Run workflow"**
|
||||||
|
4. Optionally customize:
|
||||||
|
- **Go versions**: Space-separated list (e.g., `1.21 1.22 1.23`)
|
||||||
|
- **Execution mode**: Parallel (faster) or sequential (more stable)
|
||||||
|
|
||||||
### Check coverage
|
### Check coverage
|
||||||
|
|
||||||
We use `go tool cover` to compute test coverage. Most code editors have a way to
|
We use `go tool cover` to compute test coverage. Most code editors have a way to
|
||||||
@@ -111,7 +153,7 @@ code lowers the coverage.
|
|||||||
|
|
||||||
Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's
|
Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's
|
||||||
builtin benchmark systems. Because of their noisy nature, containers provided by
|
builtin benchmark systems. Because of their noisy nature, containers provided by
|
||||||
Github Actions cannot be reliably used for benchmarking. As a result, you are
|
GitHub Actions cannot be reliably used for benchmarking. As a result, you are
|
||||||
responsible for checking that your changes do not incur a performance penalty.
|
responsible for checking that your changes do not incur a performance penalty.
|
||||||
You can run their following to execute benchmarks:
|
You can run their following to execute benchmarks:
|
||||||
|
|
||||||
@@ -168,13 +210,13 @@ Checklist:
|
|||||||
1. Decide on the next version number. Use semver. Review commits since last
|
1. Decide on the next version number. Use semver. Review commits since last
|
||||||
version to assess.
|
version to assess.
|
||||||
2. Tag release. For example:
|
2. Tag release. For example:
|
||||||
```
|
```
|
||||||
git checkout v2
|
git checkout v2
|
||||||
git pull
|
git pull
|
||||||
git tag v2.2.0
|
git tag v2.2.0
|
||||||
git push --tags
|
git push --tags
|
||||||
```
|
```
|
||||||
3. CI automatically builds a draft Github release. Review it and edit as
|
3. CI automatically builds a draft GitHub release. Review it and edit as
|
||||||
necessary. Look for "Other changes". That would indicate a pull request not
|
necessary. Look for "Other changes". That would indicate a pull request not
|
||||||
labeled properly. Tweak labels and pull request titles until changelog looks
|
labeled properly. Tweak labels and pull request titles until changelog looks
|
||||||
good for users.
|
good for users.
|
||||||
|
|||||||
@@ -107,7 +107,11 @@ type MyConfig struct {
|
|||||||
### Unmarshaling
|
### Unmarshaling
|
||||||
|
|
||||||
[`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its
|
[`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its
|
||||||
content. For example:
|
content.
|
||||||
|
|
||||||
|
Note that the struct variable names are _capitalized_, while the variables in the toml document are _lowercase_.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
doc := `
|
doc := `
|
||||||
@@ -133,6 +137,62 @@ fmt.Println("tags:", cfg.Tags)
|
|||||||
|
|
||||||
[unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal
|
[unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal
|
||||||
|
|
||||||
|
|
||||||
|
Here is an example using tables with some simple nesting:
|
||||||
|
|
||||||
|
```go
|
||||||
|
doc := `
|
||||||
|
age = 45
|
||||||
|
fruits = ["apple", "pear"]
|
||||||
|
|
||||||
|
# these are very important!
|
||||||
|
[my-variables]
|
||||||
|
first = 1
|
||||||
|
second = 0.2
|
||||||
|
third = "abc"
|
||||||
|
|
||||||
|
# this is not so important.
|
||||||
|
[my-variables.b]
|
||||||
|
bfirst = 123
|
||||||
|
`
|
||||||
|
|
||||||
|
var Document struct {
|
||||||
|
Age int
|
||||||
|
Fruits []string
|
||||||
|
|
||||||
|
Myvariables struct {
|
||||||
|
First int
|
||||||
|
Second float64
|
||||||
|
Third string
|
||||||
|
|
||||||
|
B struct {
|
||||||
|
Bfirst int
|
||||||
|
}
|
||||||
|
} `toml:"my-variables"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err := toml.Unmarshal([]byte(doc), &Document)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("age:", Document.Age)
|
||||||
|
fmt.Println("fruits:", Document.Fruits)
|
||||||
|
fmt.Println("my-variables.first:", Document.Myvariables.First)
|
||||||
|
fmt.Println("my-variables.second:", Document.Myvariables.Second)
|
||||||
|
fmt.Println("my-variables.third:", Document.Myvariables.Third)
|
||||||
|
fmt.Println("my-variables.B.Bfirst:", Document.Myvariables.B.Bfirst)
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// age: 45
|
||||||
|
// fruits: [apple pear]
|
||||||
|
// my-variables.first: 1
|
||||||
|
// my-variables.second: 0.2
|
||||||
|
// my-variables.third: abc
|
||||||
|
// my-variables.B.Bfirst: 123
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### Marshaling
|
### Marshaling
|
||||||
|
|
||||||
[`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure
|
[`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package benchmark_test
|
|||||||
import (
|
import (
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -74,7 +74,7 @@ func fixture(tb testing.TB, path string) []byte {
|
|||||||
gz, err := gzip.NewReader(f)
|
gz, err := gzip.NewReader(f)
|
||||||
assert.NoError(tb, err)
|
assert.NoError(tb, err)
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(gz)
|
buf, err := io.ReadAll(gz)
|
||||||
assert.NoError(tb, err)
|
assert.NoError(tb, err)
|
||||||
return buf
|
return buf
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package benchmark_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io/ioutil"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -59,7 +59,7 @@ func BenchmarkUnmarshal(b *testing.B) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
b.Run("ReferenceFile", func(b *testing.B) {
|
b.Run("ReferenceFile", func(b *testing.B) {
|
||||||
bytes, err := ioutil.ReadFile("benchmark.toml")
|
bytes, err := os.ReadFile("benchmark.toml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -165,7 +165,7 @@ func BenchmarkMarshal(b *testing.B) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
b.Run("ReferenceFile", func(b *testing.B) {
|
b.Run("ReferenceFile", func(b *testing.B) {
|
||||||
bytes, err := ioutil.ReadFile("benchmark.toml")
|
bytes, err := os.ReadFile("benchmark.toml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -344,7 +344,7 @@ type benchmarkDoc struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalReferenceFile(t *testing.T) {
|
func TestUnmarshalReferenceFile(t *testing.T) {
|
||||||
bytes, err := ioutil.ReadFile("benchmark.toml")
|
bytes, err := os.ReadFile("benchmark.toml")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
d := benchmarkDoc{}
|
d := benchmarkDoc{}
|
||||||
err = toml.Unmarshal(bytes, &d)
|
err = toml.Unmarshal(bytes, &d)
|
||||||
|
|||||||
@@ -106,6 +106,7 @@ func main() {
|
|||||||
for _, f := range dirContent {
|
for _, f := range dirContent {
|
||||||
filename := strings.TrimPrefix(f, "tests/valid/")
|
filename := strings.TrimPrefix(f, "tests/valid/")
|
||||||
name := kebabToCamel(strings.TrimSuffix(filename, ".toml"))
|
name := kebabToCamel(strings.TrimSuffix(filename, ".toml"))
|
||||||
|
name = strings.ReplaceAll(name, ".", "_")
|
||||||
|
|
||||||
log.Printf("> [%s] %s\n", "invalid", name)
|
log.Printf("> [%s] %s\n", "invalid", name)
|
||||||
|
|
||||||
@@ -126,6 +127,7 @@ func main() {
|
|||||||
for _, f := range dirContent {
|
for _, f := range dirContent {
|
||||||
filename := strings.TrimPrefix(f, "tests/valid/")
|
filename := strings.TrimPrefix(f, "tests/valid/")
|
||||||
name := kebabToCamel(strings.TrimSuffix(filename, ".toml"))
|
name := kebabToCamel(strings.TrimSuffix(filename, ".toml"))
|
||||||
|
name = strings.ReplaceAll(name, ".", "_")
|
||||||
|
|
||||||
log.Printf("> [%s] %s\n", "valid", name)
|
log.Printf("> [%s] %s\n", "valid", name)
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
|
||||||
"github.com/pelletier/go-toml/v2/unstable"
|
"github.com/pelletier/go-toml/v2/unstable"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -54,6 +53,17 @@ func (s *StrictMissingError) String() string {
|
|||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unwrap returns wrapped decode errors
|
||||||
|
//
|
||||||
|
// Implements errors.Join() interface.
|
||||||
|
func (s *StrictMissingError) Unwrap() []error {
|
||||||
|
var errs []error
|
||||||
|
for i := range s.Errors {
|
||||||
|
errs = append(errs, &s.Errors[i])
|
||||||
|
}
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
type Key []string
|
type Key []string
|
||||||
|
|
||||||
// Error returns the error message contained in the DecodeError.
|
// Error returns the error message contained in the DecodeError.
|
||||||
@@ -78,7 +88,7 @@ func (e *DecodeError) Key() Key {
|
|||||||
return e.key
|
return e.key
|
||||||
}
|
}
|
||||||
|
|
||||||
// decodeErrorFromHighlight creates a DecodeError referencing a highlighted
|
// wrapDecodeError creates a DecodeError referencing a highlighted
|
||||||
// range of bytes from document.
|
// range of bytes from document.
|
||||||
//
|
//
|
||||||
// highlight needs to be a sub-slice of document, or this function panics.
|
// highlight needs to be a sub-slice of document, or this function panics.
|
||||||
@@ -88,7 +98,7 @@ func (e *DecodeError) Key() Key {
|
|||||||
//
|
//
|
||||||
//nolint:funlen
|
//nolint:funlen
|
||||||
func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError {
|
func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError {
|
||||||
offset := danger.SubsliceOffset(document, de.Highlight)
|
offset := cap(document) - cap(de.Highlight)
|
||||||
|
|
||||||
errMessage := de.Error()
|
errMessage := de.Error()
|
||||||
errLine, errColumn := positionAtEnd(document[:offset])
|
errLine, errColumn := positionAtEnd(document[:offset])
|
||||||
|
|||||||
@@ -205,6 +205,21 @@ func TestDecodeError_Accessors(t *testing.T) {
|
|||||||
assert.Equal(t, "bar", e.String())
|
assert.Equal(t, "bar", e.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStrictErrorUnwrap(t *testing.T) {
|
||||||
|
fo := bytes.NewBufferString(`
|
||||||
|
Missing = 1
|
||||||
|
OtherMissing = 1
|
||||||
|
`)
|
||||||
|
var out struct{}
|
||||||
|
err := NewDecoder(fo).DisallowUnknownFields().Decode(&out)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
strictErr := &StrictMissingError{}
|
||||||
|
assert.True(t, errors.As(err, &strictErr))
|
||||||
|
|
||||||
|
assert.Equal(t, 2, len(strictErr.Unwrap()))
|
||||||
|
}
|
||||||
|
|
||||||
func ExampleDecodeError() {
|
func ExampleDecodeError() {
|
||||||
doc := `name = 123__456`
|
doc := `name = 123__456`
|
||||||
|
|
||||||
|
|||||||
+2
-2
@@ -1,7 +1,7 @@
|
|||||||
package toml_test
|
package toml_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func FuzzUnmarshal(f *testing.F) {
|
func FuzzUnmarshal(f *testing.F) {
|
||||||
file, err := ioutil.ReadFile("benchmark/benchmark.toml")
|
file, err := os.ReadFile("benchmark/benchmark.toml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ func Zero[T any](t testing.TB, value T, msgAndArgs ...any) {
|
|||||||
}
|
}
|
||||||
t.Helper()
|
t.Helper()
|
||||||
msg := formatMsgAndArgs("Expected zero value but got:", msgAndArgs...)
|
msg := formatMsgAndArgs("Expected zero value but got:", msgAndArgs...)
|
||||||
t.Fatalf("%s\n%s", msg, fmt.Sprintf("%v", value))
|
t.Fatalf("%s\n%v", msg, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NotZero[T any](t testing.TB, value T, msgAndArgs ...any) {
|
func NotZero[T any](t testing.TB, value T, msgAndArgs ...any) {
|
||||||
@@ -92,7 +92,7 @@ func NotZero[T any](t testing.TB, value T, msgAndArgs ...any) {
|
|||||||
}
|
}
|
||||||
t.Helper()
|
t.Helper()
|
||||||
msg := formatMsgAndArgs("Unexpected zero value:", msgAndArgs...)
|
msg := formatMsgAndArgs("Unexpected zero value:", msgAndArgs...)
|
||||||
t.Fatalf("%s\n%s", msg, fmt.Sprintf("%v", value))
|
t.Fatalf("%s\n%v", msg, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
func formatMsgAndArgs(msg string, args ...any) string {
|
func formatMsgAndArgs(msg string, args ...any) string {
|
||||||
|
|||||||
+3
-4
@@ -6,7 +6,6 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/pelletier/go-toml/v2"
|
"github.com/pelletier/go-toml/v2"
|
||||||
@@ -23,7 +22,7 @@ type Program struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Program) Execute() {
|
func (p *Program) Execute() {
|
||||||
flag.Usage = func() { fmt.Fprintf(os.Stderr, p.Usage) }
|
flag.Usage = func() { fmt.Fprint(os.Stderr, p.Usage) }
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
os.Exit(p.main(flag.Args(), os.Stdin, os.Stdout, os.Stderr))
|
os.Exit(p.main(flag.Args(), os.Stdin, os.Stdout, os.Stderr))
|
||||||
}
|
}
|
||||||
@@ -72,7 +71,7 @@ func (p *Program) runAllFilesInPlace(files []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Program) runFileInPlace(path string) error {
|
func (p *Program) runFileInPlace(path string) error {
|
||||||
in, err := ioutil.ReadFile(path)
|
in, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -84,5 +83,5 @@ func (p *Program) runFileInPlace(path string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return ioutil.WriteFile(path, out.Bytes(), 0600)
|
return os.WriteFile(path, out.Bytes(), 0600)
|
||||||
}
|
}
|
||||||
|
|||||||
+10
-11
@@ -4,7 +4,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -63,7 +62,7 @@ func TestProcessMainStdinDecodeErr(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessMainFileExists(t *testing.T) {
|
func TestProcessMainFileExists(t *testing.T) {
|
||||||
tmpfile, err := ioutil.TempFile("", "example")
|
tmpfile, err := os.CreateTemp("", "example")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer os.Remove(tmpfile.Name())
|
defer os.Remove(tmpfile.Name())
|
||||||
_, err = tmpfile.Write([]byte(`some data`))
|
_, err = tmpfile.Write([]byte(`some data`))
|
||||||
@@ -95,16 +94,16 @@ func TestProcessMainFileDoesNotExist(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessMainFilesInPlace(t *testing.T) {
|
func TestProcessMainFilesInPlace(t *testing.T) {
|
||||||
dir, err := ioutil.TempDir("", "")
|
dir, err := os.MkdirTemp("", "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
|
|
||||||
path1 := path.Join(dir, "file1")
|
path1 := path.Join(dir, "file1")
|
||||||
path2 := path.Join(dir, "file2")
|
path2 := path.Join(dir, "file2")
|
||||||
|
|
||||||
err = ioutil.WriteFile(path1, []byte("content 1"), 0600)
|
err = os.WriteFile(path1, []byte("content 1"), 0600)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = ioutil.WriteFile(path2, []byte("content 2"), 0600)
|
err = os.WriteFile(path2, []byte("content 2"), 0600)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
p := Program{
|
p := Program{
|
||||||
@@ -116,11 +115,11 @@ func TestProcessMainFilesInPlace(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(t, 0, exit)
|
assert.Equal(t, 0, exit)
|
||||||
|
|
||||||
v1, err := ioutil.ReadFile(path1)
|
v1, err := os.ReadFile(path1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "1", string(v1))
|
assert.Equal(t, "1", string(v1))
|
||||||
|
|
||||||
v2, err := ioutil.ReadFile(path2)
|
v2, err := os.ReadFile(path2)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "2", string(v2))
|
assert.Equal(t, "2", string(v2))
|
||||||
}
|
}
|
||||||
@@ -137,13 +136,13 @@ func TestProcessMainFilesInPlaceErrRead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessMainFilesInPlaceFailFn(t *testing.T) {
|
func TestProcessMainFilesInPlaceFailFn(t *testing.T) {
|
||||||
dir, err := ioutil.TempDir("", "")
|
dir, err := os.MkdirTemp("", "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
|
|
||||||
path1 := path.Join(dir, "file1")
|
path1 := path.Join(dir, "file1")
|
||||||
|
|
||||||
err = ioutil.WriteFile(path1, []byte("content 1"), 0600)
|
err = os.WriteFile(path1, []byte("content 1"), 0600)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
p := Program{
|
p := Program{
|
||||||
@@ -155,13 +154,13 @@ func TestProcessMainFilesInPlaceFailFn(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(t, -1, exit)
|
assert.Equal(t, -1, exit)
|
||||||
|
|
||||||
v1, err := ioutil.ReadFile(path1)
|
v1, err := os.ReadFile(path1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "content 1", string(v1))
|
assert.Equal(t, "content 1", string(v1))
|
||||||
}
|
}
|
||||||
|
|
||||||
func dummyFileFn(r io.Reader, w io.Writer) error {
|
func dummyFileFn(r io.Reader, w io.Writer) error {
|
||||||
b, err := ioutil.ReadAll(r)
|
b, err := io.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,65 +0,0 @@
|
|||||||
package danger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
const maxInt = uintptr(int(^uint(0) >> 1))
|
|
||||||
|
|
||||||
func SubsliceOffset(data []byte, subslice []byte) int {
|
|
||||||
datap := (*reflect.SliceHeader)(unsafe.Pointer(&data))
|
|
||||||
hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice))
|
|
||||||
|
|
||||||
if hlp.Data < datap.Data {
|
|
||||||
panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data))
|
|
||||||
}
|
|
||||||
offset := hlp.Data - datap.Data
|
|
||||||
|
|
||||||
if offset > maxInt {
|
|
||||||
panic(fmt.Errorf("slice offset larger than int (%d)", offset))
|
|
||||||
}
|
|
||||||
|
|
||||||
intoffset := int(offset)
|
|
||||||
|
|
||||||
if intoffset > datap.Len {
|
|
||||||
panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len))
|
|
||||||
}
|
|
||||||
|
|
||||||
if intoffset+hlp.Len > datap.Len {
|
|
||||||
panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len))
|
|
||||||
}
|
|
||||||
|
|
||||||
return intoffset
|
|
||||||
}
|
|
||||||
|
|
||||||
func BytesRange(start []byte, end []byte) []byte {
|
|
||||||
if start == nil || end == nil {
|
|
||||||
panic("cannot call BytesRange with nil")
|
|
||||||
}
|
|
||||||
startp := (*reflect.SliceHeader)(unsafe.Pointer(&start))
|
|
||||||
endp := (*reflect.SliceHeader)(unsafe.Pointer(&end))
|
|
||||||
|
|
||||||
if startp.Data > endp.Data {
|
|
||||||
panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data))
|
|
||||||
}
|
|
||||||
|
|
||||||
l := startp.Len
|
|
||||||
endLen := int(endp.Data-startp.Data) + endp.Len
|
|
||||||
if endLen > l {
|
|
||||||
l = endLen
|
|
||||||
}
|
|
||||||
|
|
||||||
if l > startp.Cap {
|
|
||||||
panic(fmt.Errorf("range length is larger than capacity"))
|
|
||||||
}
|
|
||||||
|
|
||||||
return start[:l]
|
|
||||||
}
|
|
||||||
|
|
||||||
func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer {
|
|
||||||
// TODO: replace with unsafe.Add when Go 1.17 is released
|
|
||||||
// https://github.com/golang/go/issues/40481
|
|
||||||
return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset))
|
|
||||||
}
|
|
||||||
@@ -1,176 +0,0 @@
|
|||||||
package danger_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/pelletier/go-toml/v2/internal/assert"
|
|
||||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSubsliceOffsetValid(t *testing.T) {
|
|
||||||
examples := []struct {
|
|
||||||
desc string
|
|
||||||
test func() ([]byte, []byte)
|
|
||||||
offset int
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
desc: "simple",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
data := []byte("hello")
|
|
||||||
return data, data[1:]
|
|
||||||
},
|
|
||||||
offset: 1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, e := range examples {
|
|
||||||
t.Run(e.desc, func(t *testing.T) {
|
|
||||||
d, s := e.test()
|
|
||||||
offset := danger.SubsliceOffset(d, s)
|
|
||||||
assert.Equal(t, e.offset, offset)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSubsliceOffsetInvalid(t *testing.T) {
|
|
||||||
examples := []struct {
|
|
||||||
desc string
|
|
||||||
test func() ([]byte, []byte)
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
desc: "unrelated arrays",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
return []byte("one"), []byte("two")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "slice starts before data",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
full := []byte("hello world")
|
|
||||||
return full[5:], full[1:]
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "slice starts after data",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
full := []byte("hello world")
|
|
||||||
return full[:3], full[5:]
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "slice ends after data",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
full := []byte("hello world")
|
|
||||||
return full[:5], full[3:8]
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, e := range examples {
|
|
||||||
t.Run(e.desc, func(t *testing.T) {
|
|
||||||
d, s := e.test()
|
|
||||||
assert.Panics(t, func() {
|
|
||||||
danger.SubsliceOffset(d, s)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStride(t *testing.T) {
|
|
||||||
a := []byte{1, 2, 3, 4}
|
|
||||||
x := &a[1]
|
|
||||||
n := (*byte)(danger.Stride(unsafe.Pointer(x), unsafe.Sizeof(byte(0)), 1))
|
|
||||||
assert.Equal(t, &a[2], n)
|
|
||||||
n = (*byte)(danger.Stride(unsafe.Pointer(x), unsafe.Sizeof(byte(0)), -1))
|
|
||||||
assert.Equal(t, &a[0], n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBytesRange(t *testing.T) {
|
|
||||||
type fn = func() ([]byte, []byte)
|
|
||||||
examples := []struct {
|
|
||||||
desc string
|
|
||||||
test fn
|
|
||||||
expected []byte
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
desc: "simple",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
full := []byte("hello world")
|
|
||||||
return full[1:3], full[6:8]
|
|
||||||
},
|
|
||||||
expected: []byte("ello wo"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "full",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
full := []byte("hello world")
|
|
||||||
return full[0:1], full[len(full)-1:]
|
|
||||||
},
|
|
||||||
expected: []byte("hello world"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "end before start",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
full := []byte("hello world")
|
|
||||||
return full[len(full)-1:], full[0:1]
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "nils",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
return nil, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "nils start",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
return nil, []byte("foo")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "nils end",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
return []byte("foo"), nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "start is end",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
full := []byte("hello world")
|
|
||||||
return full[1:3], full[1:3]
|
|
||||||
},
|
|
||||||
expected: []byte("el"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "end contained in start",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
full := []byte("hello world")
|
|
||||||
return full[1:7], full[2:4]
|
|
||||||
},
|
|
||||||
expected: []byte("ello w"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "different backing arrays",
|
|
||||||
test: func() ([]byte, []byte) {
|
|
||||||
one := []byte("hello world")
|
|
||||||
two := []byte("hello world")
|
|
||||||
return one, two
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, e := range examples {
|
|
||||||
t.Run(e.desc, func(t *testing.T) {
|
|
||||||
start, end := e.test()
|
|
||||||
if e.expected == nil {
|
|
||||||
assert.Panics(t, func() {
|
|
||||||
danger.BytesRange(start, end)
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
res := danger.BytesRange(start, end)
|
|
||||||
assert.Equal(t, e.expected, res)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
package danger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// typeID is used as key in encoder and decoder caches to enable using
|
|
||||||
// the optimize runtime.mapaccess2_fast64 function instead of the more
|
|
||||||
// expensive lookup if we were to use reflect.Type as map key.
|
|
||||||
//
|
|
||||||
// typeID holds the pointer to the reflect.Type value, which is unique
|
|
||||||
// in the program.
|
|
||||||
//
|
|
||||||
// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61
|
|
||||||
type TypeID unsafe.Pointer
|
|
||||||
|
|
||||||
func MakeTypeID(t reflect.Type) TypeID {
|
|
||||||
// reflect.Type has the fields:
|
|
||||||
// typ unsafe.Pointer
|
|
||||||
// ptr unsafe.Pointer
|
|
||||||
return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1])
|
|
||||||
}
|
|
||||||
@@ -2283,7 +2283,7 @@ func (c *Custom) UnmarshalTOML(v interface{}) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGithubIssue431(t *testing.T) {
|
func TestGitHubIssue431(t *testing.T) {
|
||||||
doc := `key = "value"`
|
doc := `key = "value"`
|
||||||
var c Config
|
var c Config
|
||||||
if err := toml.Unmarshal([]byte(doc), &c); err != nil {
|
if err := toml.Unmarshal([]byte(doc), &c); err != nil {
|
||||||
@@ -2321,7 +2321,7 @@ type config437 struct {
|
|||||||
} `toml:"HTTP"`
|
} `toml:"HTTP"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGithubIssue437(t *testing.T) {
|
func TestGitHubIssue437(t *testing.T) {
|
||||||
t.Skipf("unmarshalTOML not implemented")
|
t.Skipf("unmarshalTOML not implemented")
|
||||||
src := `
|
src := `
|
||||||
[HTTP]
|
[HTTP]
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
"github.com/pelletier/go-toml/v2"
|
"github.com/pelletier/go-toml/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Marshal is a helpfer function for calling toml.Marshal
|
// Marshal is a helper function for calling toml.Marshal
|
||||||
//
|
//
|
||||||
// Only needed to avoid package import loops.
|
// Only needed to avoid package import loops.
|
||||||
func Marshal(v interface{}) ([]byte, error) {
|
func Marshal(v interface{}) ([]byte, error) {
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package tracker
|
package tracker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/pelletier/go-toml/v2/internal/assert"
|
"github.com/pelletier/go-toml/v2/internal/assert"
|
||||||
)
|
)
|
||||||
@@ -13,8 +13,8 @@ func TestEntrySize(t *testing.T) {
|
|||||||
// and a very good reason.
|
// and a very good reason.
|
||||||
maxExpectedEntrySize := 48
|
maxExpectedEntrySize := 48
|
||||||
assert.True(t,
|
assert.True(t,
|
||||||
int(unsafe.Sizeof(entry{})) <= maxExpectedEntrySize,
|
int(reflect.TypeOf(entry{}).Size()) <= maxExpectedEntrySize,
|
||||||
"Expected entry to be less than or equal to %d, got: %d",
|
"Expected entry to be less than or equal to %d, got: %d",
|
||||||
maxExpectedEntrySize, int(unsafe.Sizeof(entry{})),
|
maxExpectedEntrySize, int(reflect.TypeOf(entry{}).Size()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
+37
-3
@@ -161,6 +161,8 @@ func (enc *Encoder) SetMarshalJsonNumbers(indent bool) *Encoder {
|
|||||||
//
|
//
|
||||||
// The "omitempty" option prevents empty values or groups from being emitted.
|
// The "omitempty" option prevents empty values or groups from being emitted.
|
||||||
//
|
//
|
||||||
|
// The "omitzero" option prevents zero values or groups from being emitted.
|
||||||
|
//
|
||||||
// The "commented" option prefixes the value and all its children with a comment
|
// The "commented" option prefixes the value and all its children with a comment
|
||||||
// symbol.
|
// symbol.
|
||||||
//
|
//
|
||||||
@@ -196,6 +198,7 @@ func (enc *Encoder) Encode(v interface{}) error {
|
|||||||
type valueOptions struct {
|
type valueOptions struct {
|
||||||
multiline bool
|
multiline bool
|
||||||
omitempty bool
|
omitempty bool
|
||||||
|
omitzero bool
|
||||||
commented bool
|
commented bool
|
||||||
comment string
|
comment string
|
||||||
}
|
}
|
||||||
@@ -384,6 +387,10 @@ func shouldOmitEmpty(options valueOptions, v reflect.Value) bool {
|
|||||||
return options.omitempty && isEmptyValue(v)
|
return options.omitempty && isEmptyValue(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func shouldOmitZero(options valueOptions, v reflect.Value) bool {
|
||||||
|
return options.omitzero && v.IsZero()
|
||||||
|
}
|
||||||
|
|
||||||
func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) {
|
func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
@@ -517,12 +524,26 @@ func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byt
|
|||||||
del = 0x7f
|
del = 0x7f
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, r := range []byte(v) {
|
bv := []byte(v)
|
||||||
|
for i := 0; i < len(bv); i++ {
|
||||||
|
r := bv[i]
|
||||||
switch r {
|
switch r {
|
||||||
case '\\':
|
case '\\':
|
||||||
b = append(b, `\\`...)
|
b = append(b, `\\`...)
|
||||||
case '"':
|
case '"':
|
||||||
b = append(b, `\"`...)
|
if multiline {
|
||||||
|
// Quotation marks do not need to be quoted in multiline strings unless
|
||||||
|
// it contains 3 consecutive. If 3+ quotes appear, quote all of them
|
||||||
|
// because it's visually better
|
||||||
|
if i+2 > len(bv) || bv[i+1] != '"' || bv[i+2] != '"' {
|
||||||
|
b = append(b, r)
|
||||||
|
} else {
|
||||||
|
b = append(b, `\"\"\"`...)
|
||||||
|
i += 2
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
b = append(b, `\"`...)
|
||||||
|
}
|
||||||
case '\b':
|
case '\b':
|
||||||
b = append(b, `\b`...)
|
b = append(b, `\b`...)
|
||||||
case '\f':
|
case '\f':
|
||||||
@@ -760,6 +781,7 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) {
|
|||||||
options := valueOptions{
|
options := valueOptions{
|
||||||
multiline: opts.multiline,
|
multiline: opts.multiline,
|
||||||
omitempty: opts.omitempty,
|
omitempty: opts.omitempty,
|
||||||
|
omitzero: opts.omitzero,
|
||||||
commented: opts.commented,
|
commented: opts.commented,
|
||||||
comment: fieldType.Tag.Get("comment"),
|
comment: fieldType.Tag.Get("comment"),
|
||||||
}
|
}
|
||||||
@@ -820,6 +842,7 @@ type tagOptions struct {
|
|||||||
multiline bool
|
multiline bool
|
||||||
inline bool
|
inline bool
|
||||||
omitempty bool
|
omitempty bool
|
||||||
|
omitzero bool
|
||||||
commented bool
|
commented bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -832,7 +855,7 @@ func parseTag(tag string) (string, tagOptions) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
raw := tag[idx+1:]
|
raw := tag[idx+1:]
|
||||||
tag = string(tag[:idx])
|
tag = tag[:idx]
|
||||||
for raw != "" {
|
for raw != "" {
|
||||||
var o string
|
var o string
|
||||||
i := strings.Index(raw, ",")
|
i := strings.Index(raw, ",")
|
||||||
@@ -848,6 +871,8 @@ func parseTag(tag string) (string, tagOptions) {
|
|||||||
opts.inline = true
|
opts.inline = true
|
||||||
case "omitempty":
|
case "omitempty":
|
||||||
opts.omitempty = true
|
opts.omitempty = true
|
||||||
|
case "omitzero":
|
||||||
|
opts.omitzero = true
|
||||||
case "commented":
|
case "commented":
|
||||||
opts.commented = true
|
opts.commented = true
|
||||||
}
|
}
|
||||||
@@ -882,6 +907,9 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro
|
|||||||
if shouldOmitEmpty(kv.Options, kv.Value) {
|
if shouldOmitEmpty(kv.Options, kv.Value) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if shouldOmitZero(kv.Options, kv.Value) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
hasNonEmptyKV = true
|
hasNonEmptyKV = true
|
||||||
|
|
||||||
ctx.setKey(kv.Key)
|
ctx.setKey(kv.Key)
|
||||||
@@ -901,6 +929,9 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro
|
|||||||
if shouldOmitEmpty(table.Options, table.Value) {
|
if shouldOmitEmpty(table.Options, table.Value) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if shouldOmitZero(table.Options, table.Value) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if first {
|
if first {
|
||||||
first = false
|
first = false
|
||||||
if hasNonEmptyKV {
|
if hasNonEmptyKV {
|
||||||
@@ -935,6 +966,9 @@ func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte
|
|||||||
if shouldOmitEmpty(kv.Options, kv.Value) {
|
if shouldOmitEmpty(kv.Options, kv.Value) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if shouldOmitZero(kv.Options, kv.Value) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if first {
|
if first {
|
||||||
first = false
|
first = false
|
||||||
|
|||||||
+118
-3
@@ -6,6 +6,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"net/netip"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -387,6 +388,54 @@ name = 'Alice'
|
|||||||
expected: `A = """
|
expected: `A = """
|
||||||
hello
|
hello
|
||||||
world"""
|
world"""
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "multi-line quotation",
|
||||||
|
v: struct {
|
||||||
|
A string `toml:",multiline"`
|
||||||
|
}{
|
||||||
|
A: "hello\n\"world\"",
|
||||||
|
},
|
||||||
|
expected: `A = """
|
||||||
|
hello
|
||||||
|
"world""""
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "multi-line triple quotation",
|
||||||
|
v: struct {
|
||||||
|
A string `toml:",multiline"`
|
||||||
|
}{
|
||||||
|
A: "hello\n\"\"\"world\"",
|
||||||
|
},
|
||||||
|
expected: `A = """
|
||||||
|
hello
|
||||||
|
\"\"\"world""""
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "multi-line triple quotation",
|
||||||
|
v: struct {
|
||||||
|
A string `toml:",multiline"`
|
||||||
|
}{
|
||||||
|
A: "hello\n\"world\"\"\"",
|
||||||
|
},
|
||||||
|
expected: `A = """
|
||||||
|
hello
|
||||||
|
"world\"\"\""""
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "multi-line sextuple quotation",
|
||||||
|
v: struct {
|
||||||
|
A string `toml:",multiline"`
|
||||||
|
}{
|
||||||
|
A: "hello\n\"\"\"\"\"\"world\"",
|
||||||
|
},
|
||||||
|
expected: `A = """
|
||||||
|
hello
|
||||||
|
\"\"\"\"\"\"world""""
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -1069,6 +1118,9 @@ func TestEncoderOmitempty(t *testing.T) {
|
|||||||
Ptr *string `toml:",omitempty,multiline"`
|
Ptr *string `toml:",omitempty,multiline"`
|
||||||
Iface interface{} `toml:",omitempty,multiline"`
|
Iface interface{} `toml:",omitempty,multiline"`
|
||||||
Struct struct{} `toml:",omitempty,multiline"`
|
Struct struct{} `toml:",omitempty,multiline"`
|
||||||
|
Inline struct {
|
||||||
|
String string `toml:",omitempty,multiline"`
|
||||||
|
} `toml:",inline"`
|
||||||
}
|
}
|
||||||
|
|
||||||
d := doc{}
|
d := doc{}
|
||||||
@@ -1076,7 +1128,68 @@ func TestEncoderOmitempty(t *testing.T) {
|
|||||||
b, err := toml.Marshal(d)
|
b, err := toml.Marshal(d)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
expected := ``
|
expected := `Inline = {}
|
||||||
|
`
|
||||||
|
|
||||||
|
assert.Equal(t, expected, string(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncoderOmitzero(t *testing.T) {
|
||||||
|
type doc struct {
|
||||||
|
String string `toml:",omitzero,multiline"`
|
||||||
|
Bool bool `toml:",omitzero,multiline"`
|
||||||
|
Int int `toml:",omitzero,multiline"`
|
||||||
|
Int8 int8 `toml:",omitzero,multiline"`
|
||||||
|
Int16 int16 `toml:",omitzero,multiline"`
|
||||||
|
Int32 int32 `toml:",omitzero,multiline"`
|
||||||
|
Int64 int64 `toml:",omitzero,multiline"`
|
||||||
|
Uint uint `toml:",omitzero,multiline"`
|
||||||
|
Uint8 uint8 `toml:",omitzero,multiline"`
|
||||||
|
Uint16 uint16 `toml:",omitzero,multiline"`
|
||||||
|
Uint32 uint32 `toml:",omitzero,multiline"`
|
||||||
|
Uint64 uint64 `toml:",omitzero,multiline"`
|
||||||
|
Float32 float32 `toml:",omitzero,multiline"`
|
||||||
|
Float64 float64 `toml:",omitzero,multiline"`
|
||||||
|
MapNil map[string]string `toml:",omitzero,multiline"`
|
||||||
|
Slice []string `toml:",omitzero,multiline"`
|
||||||
|
Ptr *string `toml:",omitzero,multiline"`
|
||||||
|
Iface interface{} `toml:",omitzero,multiline"`
|
||||||
|
Struct struct{} `toml:",omitzero,multiline"`
|
||||||
|
Time time.Time `toml:",omitzero,multiline"`
|
||||||
|
IP netip.Addr `toml:",omitzero,multiline"`
|
||||||
|
Inline struct {
|
||||||
|
String string `toml:",omitzero,multiline"`
|
||||||
|
} `toml:",inline"`
|
||||||
|
}
|
||||||
|
|
||||||
|
d := doc{}
|
||||||
|
|
||||||
|
b, err := toml.Marshal(d)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
expected := `Inline = {}
|
||||||
|
`
|
||||||
|
|
||||||
|
assert.Equal(t, expected, string(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncoderOmitzeroOpaqueStruct(t *testing.T) {
|
||||||
|
type doc struct {
|
||||||
|
Time time.Time `toml:",omitzero"`
|
||||||
|
IP netip.Addr `toml:",omitzero"`
|
||||||
|
}
|
||||||
|
|
||||||
|
d := doc{
|
||||||
|
Time: time.Date(2001, 2, 3, 4, 5, 6, 7, time.UTC),
|
||||||
|
IP: netip.MustParseAddr("192.168.178.35"),
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := toml.Marshal(d)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
expected := `Time = 2001-02-03T04:05:06.000000007Z
|
||||||
|
IP = '192.168.178.35'
|
||||||
|
`
|
||||||
|
|
||||||
assert.Equal(t, expected, string(b))
|
assert.Equal(t, expected, string(b))
|
||||||
}
|
}
|
||||||
@@ -1278,7 +1391,8 @@ func TestIssue786(t *testing.T) {
|
|||||||
config.Custom = []Custom{{Name: "omit", General: General{Randomize: false}}}
|
config.Custom = []Custom{{Name: "omit", General: General{Randomize: false}}}
|
||||||
config.Custom = append(config.Custom, Custom{Name: "present", General: General{From: "-2d", Randomize: true}})
|
config.Custom = append(config.Custom, Custom{Name: "present", General: General{From: "-2d", Randomize: true}})
|
||||||
encoder := toml.NewEncoder(buf)
|
encoder := toml.NewEncoder(buf)
|
||||||
encoder.Encode(config)
|
err = encoder.Encode(config)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
expected := `# from in graphite-web format, the local TZ is used
|
expected := `# from in graphite-web format, the local TZ is used
|
||||||
from = '-2d'
|
from = '-2d'
|
||||||
@@ -1324,7 +1438,8 @@ func TestMarshalIssue888(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
encoder := toml.NewEncoder(buf).SetIndentTables(true)
|
encoder := toml.NewEncoder(buf).SetIndentTables(true)
|
||||||
encoder.Encode(config)
|
err := encoder.Encode(config)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
expected := `# custom config
|
expected := `# custom config
|
||||||
[[Custom]]
|
[[Custom]]
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package toml
|
package toml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
|
||||||
"github.com/pelletier/go-toml/v2/internal/tracker"
|
"github.com/pelletier/go-toml/v2/internal/tracker"
|
||||||
"github.com/pelletier/go-toml/v2/unstable"
|
"github.com/pelletier/go-toml/v2/unstable"
|
||||||
)
|
)
|
||||||
@@ -103,5 +102,5 @@ func keyLocation(node *unstable.Node) []byte {
|
|||||||
end = k.Node().Data
|
end = k.Node().Data
|
||||||
}
|
}
|
||||||
|
|
||||||
return danger.BytesRange(start, end)
|
return start[:cap(start)-cap(end)+len(end)]
|
||||||
}
|
}
|
||||||
|
|||||||
Executable
+596
@@ -0,0 +1,596 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -uo pipefail
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Go versions to test (1.11 through 1.25)
|
||||||
|
GO_VERSIONS=(
|
||||||
|
"1.11"
|
||||||
|
"1.12"
|
||||||
|
"1.13"
|
||||||
|
"1.14"
|
||||||
|
"1.15"
|
||||||
|
"1.16"
|
||||||
|
"1.17"
|
||||||
|
"1.18"
|
||||||
|
"1.19"
|
||||||
|
"1.20"
|
||||||
|
"1.21"
|
||||||
|
"1.22"
|
||||||
|
"1.23"
|
||||||
|
"1.24"
|
||||||
|
"1.25"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Default values
|
||||||
|
PARALLEL=true
|
||||||
|
VERBOSE=false
|
||||||
|
OUTPUT_DIR="test-results"
|
||||||
|
DOCKER_TIMEOUT="10m"
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat << EOF
|
||||||
|
Usage: $0 [OPTIONS] [GO_VERSIONS...]
|
||||||
|
|
||||||
|
Test go-toml across multiple Go versions using Docker containers.
|
||||||
|
|
||||||
|
The script reports the lowest continuous supported Go version (where all subsequent
|
||||||
|
versions pass) and only exits with non-zero status if either of the two most recent
|
||||||
|
Go versions fail, indicating immediate attention is needed.
|
||||||
|
|
||||||
|
Note: For Go versions < 1.21, the script automatically updates go.mod to match the
|
||||||
|
target version, but older versions may still fail due to missing standard library
|
||||||
|
features (e.g., the 'slices' package introduced in Go 1.21).
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
-h, --help Show this help message
|
||||||
|
-s, --sequential Run tests sequentially instead of in parallel
|
||||||
|
-v, --verbose Enable verbose output
|
||||||
|
-o, --output DIR Output directory for test results (default: test-results)
|
||||||
|
-t, --timeout TIME Docker timeout for each test (default: 10m)
|
||||||
|
--list List available Go versions and exit
|
||||||
|
|
||||||
|
ARGUMENTS:
|
||||||
|
GO_VERSIONS Specific Go versions to test (default: all supported versions)
|
||||||
|
Examples: 1.21 1.22 1.23
|
||||||
|
|
||||||
|
EXAMPLES:
|
||||||
|
$0 # Test all Go versions in parallel
|
||||||
|
$0 --sequential # Test all Go versions sequentially
|
||||||
|
$0 1.21 1.22 1.23 # Test specific versions
|
||||||
|
$0 --verbose --output ./results 1.24 1.25 # Verbose output to custom directory
|
||||||
|
|
||||||
|
EXIT CODES:
|
||||||
|
0 Recent Go versions pass (good compatibility)
|
||||||
|
1 Recent Go versions fail (needs attention) or script error
|
||||||
|
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
log() {
|
||||||
|
echo -e "${BLUE}[$(date +'%H:%M:%S')]${NC} $*" >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
log_success() {
|
||||||
|
echo -e "${GREEN}[$(date +'%H:%M:%S')] ✓${NC} $*" >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
log_error() {
|
||||||
|
echo -e "${RED}[$(date +'%H:%M:%S')] ✗${NC} $*" >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
log_warning() {
|
||||||
|
echo -e "${YELLOW}[$(date +'%H:%M:%S')] ⚠${NC} $*" >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
-s|--sequential)
|
||||||
|
PARALLEL=false
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-v|--verbose)
|
||||||
|
VERBOSE=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-o|--output)
|
||||||
|
OUTPUT_DIR="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-t|--timeout)
|
||||||
|
DOCKER_TIMEOUT="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--list)
|
||||||
|
echo "Available Go versions:"
|
||||||
|
printf '%s\n' "${GO_VERSIONS[@]}"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
-*)
|
||||||
|
echo "Unknown option: $1" >&2
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
# Remaining arguments are Go versions
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# If specific versions provided, use those instead of defaults
|
||||||
|
if [[ $# -gt 0 ]]; then
|
||||||
|
GO_VERSIONS=("$@")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Validate Go versions
|
||||||
|
for version in "${GO_VERSIONS[@]}"; do
|
||||||
|
if ! [[ "$version" =~ ^1\.(1[1-9]|2[0-5])$ ]]; then
|
||||||
|
log_error "Invalid Go version: $version. Supported versions: 1.11-1.25"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check if Docker is available
|
||||||
|
if ! command -v docker &> /dev/null; then
|
||||||
|
log_error "Docker is required but not installed or not in PATH"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if Docker daemon is running
|
||||||
|
if ! docker info &> /dev/null; then
|
||||||
|
log_error "Docker daemon is not running"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create output directory
|
||||||
|
mkdir -p "$OUTPUT_DIR"
|
||||||
|
|
||||||
|
# Function to test a single Go version
|
||||||
|
test_go_version() {
|
||||||
|
local go_version="$1"
|
||||||
|
local container_name="go-toml-test-${go_version}"
|
||||||
|
local result_file="${OUTPUT_DIR}/go-${go_version}.txt"
|
||||||
|
local dockerfile_content
|
||||||
|
|
||||||
|
log "Testing Go $go_version..."
|
||||||
|
|
||||||
|
# Create a temporary Dockerfile for this version
|
||||||
|
# For Go versions < 1.21, we need to update go.mod to match the Go version
|
||||||
|
local needs_go_mod_update=false
|
||||||
|
if [[ $(echo "$go_version 1.21" | tr ' ' '\n' | sort -V | head -n1) == "$go_version" && "$go_version" != "1.21" ]]; then
|
||||||
|
needs_go_mod_update=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
dockerfile_content="FROM golang:${go_version}-alpine
|
||||||
|
|
||||||
|
# Install git (required for go mod)
|
||||||
|
RUN apk add --no-cache git
|
||||||
|
|
||||||
|
# Set working directory
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy source code
|
||||||
|
COPY . ."
|
||||||
|
|
||||||
|
# Add go.mod update step for older Go versions
|
||||||
|
if [[ "$needs_go_mod_update" == true ]]; then
|
||||||
|
dockerfile_content="$dockerfile_content
|
||||||
|
|
||||||
|
# Update go.mod to match Go version (required for Go < 1.21)
|
||||||
|
RUN if [ -f go.mod ]; then sed -i 's/^go [0-9]\\+\\.[0-9]\\+\\(\\.[0-9]\\+\\)\\?/go $go_version/' go.mod; fi
|
||||||
|
|
||||||
|
# Note: Go versions < 1.21 may fail due to missing standard library packages (e.g., slices)
|
||||||
|
# This is expected for projects that use Go 1.21+ features"
|
||||||
|
fi
|
||||||
|
|
||||||
|
dockerfile_content="$dockerfile_content
|
||||||
|
|
||||||
|
# Run tests
|
||||||
|
CMD [\"sh\", \"-c\", \"go version && echo '--- Running go test ./... ---' && go test ./...\"]"
|
||||||
|
|
||||||
|
# Create temporary directory for this test
|
||||||
|
local temp_dir
|
||||||
|
temp_dir=$(mktemp -d)
|
||||||
|
|
||||||
|
# Copy source to temp directory (excluding test results and git)
|
||||||
|
rsync -a --exclude="$OUTPUT_DIR" --exclude=".git" --exclude="*.test" . "$temp_dir/"
|
||||||
|
|
||||||
|
# Create Dockerfile in temp directory
|
||||||
|
echo "$dockerfile_content" > "$temp_dir/Dockerfile"
|
||||||
|
|
||||||
|
# Build and run container
|
||||||
|
local exit_code=0
|
||||||
|
local output
|
||||||
|
|
||||||
|
if $VERBOSE; then
|
||||||
|
log "Building Docker image for Go $go_version..."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Capture both stdout and stderr, and the exit code
|
||||||
|
if output=$(cd "$temp_dir" && timeout "$DOCKER_TIMEOUT" docker build -t "$container_name" . 2>&1 && \
|
||||||
|
timeout "$DOCKER_TIMEOUT" docker run --rm "$container_name" 2>&1); then
|
||||||
|
log_success "Go $go_version: PASSED"
|
||||||
|
echo "PASSED" > "${result_file}.status"
|
||||||
|
else
|
||||||
|
exit_code=$?
|
||||||
|
log_error "Go $go_version: FAILED (exit code: $exit_code)"
|
||||||
|
echo "FAILED" > "${result_file}.status"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Save full output
|
||||||
|
echo "$output" > "$result_file"
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
docker rmi "$container_name" &> /dev/null || true
|
||||||
|
rm -rf "$temp_dir"
|
||||||
|
|
||||||
|
if $VERBOSE; then
|
||||||
|
echo "--- Go $go_version output ---"
|
||||||
|
echo "$output"
|
||||||
|
echo "--- End Go $go_version output ---"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $exit_code
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to run tests in parallel
|
||||||
|
run_parallel() {
|
||||||
|
local pids=()
|
||||||
|
local failed_versions=()
|
||||||
|
|
||||||
|
log "Starting parallel tests for ${#GO_VERSIONS[@]} Go versions..."
|
||||||
|
|
||||||
|
# Start all tests in background
|
||||||
|
for version in "${GO_VERSIONS[@]}"; do
|
||||||
|
test_go_version "$version" &
|
||||||
|
pids+=($!)
|
||||||
|
done
|
||||||
|
|
||||||
|
# Wait for all tests to complete
|
||||||
|
for i in "${!pids[@]}"; do
|
||||||
|
local pid=${pids[$i]}
|
||||||
|
local version=${GO_VERSIONS[$i]}
|
||||||
|
|
||||||
|
if ! wait $pid; then
|
||||||
|
failed_versions+=("$version")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
return ${#failed_versions[@]}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to run tests sequentially
|
||||||
|
run_sequential() {
|
||||||
|
local failed_versions=()
|
||||||
|
|
||||||
|
log "Starting sequential tests for ${#GO_VERSIONS[@]} Go versions..."
|
||||||
|
|
||||||
|
for version in "${GO_VERSIONS[@]}"; do
|
||||||
|
if ! test_go_version "$version"; then
|
||||||
|
failed_versions+=("$version")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
return ${#failed_versions[@]}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main execution
|
||||||
|
main() {
|
||||||
|
local start_time
|
||||||
|
start_time=$(date +%s)
|
||||||
|
|
||||||
|
log "Starting Go version compatibility tests..."
|
||||||
|
log "Testing versions: ${GO_VERSIONS[*]}"
|
||||||
|
log "Output directory: $OUTPUT_DIR"
|
||||||
|
log "Parallel execution: $PARALLEL"
|
||||||
|
|
||||||
|
local failed_count
|
||||||
|
if $PARALLEL; then
|
||||||
|
run_parallel
|
||||||
|
failed_count=$?
|
||||||
|
else
|
||||||
|
run_sequential
|
||||||
|
failed_count=$?
|
||||||
|
fi
|
||||||
|
|
||||||
|
local end_time
|
||||||
|
end_time=$(date +%s)
|
||||||
|
local duration=$((end_time - start_time))
|
||||||
|
|
||||||
|
# Collect results for display
|
||||||
|
local passed_versions=()
|
||||||
|
local failed_versions=()
|
||||||
|
local unknown_versions=()
|
||||||
|
local passed_count=0
|
||||||
|
|
||||||
|
for version in "${GO_VERSIONS[@]}"; do
|
||||||
|
local status_file="${OUTPUT_DIR}/go-${version}.txt.status"
|
||||||
|
if [[ -f "$status_file" ]]; then
|
||||||
|
local status
|
||||||
|
status=$(cat "$status_file")
|
||||||
|
if [[ "$status" == "PASSED" ]]; then
|
||||||
|
passed_versions+=("$version")
|
||||||
|
((passed_count++))
|
||||||
|
else
|
||||||
|
failed_versions+=("$version")
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
unknown_versions+=("$version")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Generate summary report
|
||||||
|
local summary_file="${OUTPUT_DIR}/summary.txt"
|
||||||
|
{
|
||||||
|
echo "Go Version Compatibility Test Summary"
|
||||||
|
echo "====================================="
|
||||||
|
echo "Date: $(date)"
|
||||||
|
echo "Duration: ${duration}s"
|
||||||
|
echo "Parallel: $PARALLEL"
|
||||||
|
echo ""
|
||||||
|
echo "Results:"
|
||||||
|
|
||||||
|
for version in "${GO_VERSIONS[@]}"; do
|
||||||
|
local status_file="${OUTPUT_DIR}/go-${version}.txt.status"
|
||||||
|
if [[ -f "$status_file" ]]; then
|
||||||
|
local status
|
||||||
|
status=$(cat "$status_file")
|
||||||
|
if [[ "$status" == "PASSED" ]]; then
|
||||||
|
echo " Go $version: ✓ PASSED"
|
||||||
|
else
|
||||||
|
echo " Go $version: ✗ FAILED"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo " Go $version: ? UNKNOWN (no status file)"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Summary: $passed_count/${#GO_VERSIONS[@]} versions passed"
|
||||||
|
|
||||||
|
if [[ $failed_count -gt 0 ]]; then
|
||||||
|
echo ""
|
||||||
|
echo "Failed versions details:"
|
||||||
|
for version in "${failed_versions[@]}"; do
|
||||||
|
echo ""
|
||||||
|
echo "--- Go $version (FAILED) ---"
|
||||||
|
local result_file="${OUTPUT_DIR}/go-${version}.txt"
|
||||||
|
if [[ -f "$result_file" ]]; then
|
||||||
|
tail -n 30 "$result_file"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
} > "$summary_file"
|
||||||
|
|
||||||
|
# Find lowest continuous supported version and check recent versions
|
||||||
|
local lowest_continuous_version=""
|
||||||
|
local recent_versions_failed=false
|
||||||
|
|
||||||
|
# Sort versions to ensure proper order
|
||||||
|
local sorted_versions=()
|
||||||
|
for version in "${GO_VERSIONS[@]}"; do
|
||||||
|
sorted_versions+=("$version")
|
||||||
|
done
|
||||||
|
# Sort versions numerically (1.11, 1.12, ..., 1.25)
|
||||||
|
IFS=$'\n' sorted_versions=($(sort -V <<< "${sorted_versions[*]}"))
|
||||||
|
|
||||||
|
# Find lowest continuous supported version (all versions from this point onwards pass)
|
||||||
|
for version in "${sorted_versions[@]}"; do
|
||||||
|
local status_file="${OUTPUT_DIR}/go-${version}.txt.status"
|
||||||
|
local all_subsequent_pass=true
|
||||||
|
|
||||||
|
# Check if this version and all subsequent versions pass
|
||||||
|
local found_current=false
|
||||||
|
for check_version in "${sorted_versions[@]}"; do
|
||||||
|
if [[ "$check_version" == "$version" ]]; then
|
||||||
|
found_current=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$found_current" == true ]]; then
|
||||||
|
local check_status_file="${OUTPUT_DIR}/go-${check_version}.txt.status"
|
||||||
|
if [[ -f "$check_status_file" ]]; then
|
||||||
|
local status
|
||||||
|
status=$(cat "$check_status_file")
|
||||||
|
if [[ "$status" != "PASSED" ]]; then
|
||||||
|
all_subsequent_pass=false
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
all_subsequent_pass=false
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ "$all_subsequent_pass" == true ]]; then
|
||||||
|
lowest_continuous_version="$version"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check if the two most recent versions failed
|
||||||
|
local num_versions=${#sorted_versions[@]}
|
||||||
|
if [[ $num_versions -ge 2 ]]; then
|
||||||
|
local second_recent="${sorted_versions[$((num_versions-2))]}"
|
||||||
|
local most_recent="${sorted_versions[$((num_versions-1))]}"
|
||||||
|
|
||||||
|
local second_recent_status_file="${OUTPUT_DIR}/go-${second_recent}.txt.status"
|
||||||
|
local most_recent_status_file="${OUTPUT_DIR}/go-${most_recent}.txt.status"
|
||||||
|
|
||||||
|
local second_recent_failed=false
|
||||||
|
local most_recent_failed=false
|
||||||
|
|
||||||
|
if [[ -f "$second_recent_status_file" ]]; then
|
||||||
|
local status
|
||||||
|
status=$(cat "$second_recent_status_file")
|
||||||
|
if [[ "$status" != "PASSED" ]]; then
|
||||||
|
second_recent_failed=true
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
second_recent_failed=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f "$most_recent_status_file" ]]; then
|
||||||
|
local status
|
||||||
|
status=$(cat "$most_recent_status_file")
|
||||||
|
if [[ "$status" != "PASSED" ]]; then
|
||||||
|
most_recent_failed=true
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
most_recent_failed=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$second_recent_failed" == true || "$most_recent_failed" == true ]]; then
|
||||||
|
recent_versions_failed=true
|
||||||
|
fi
|
||||||
|
elif [[ $num_versions -eq 1 ]]; then
|
||||||
|
# Only one version tested, check if it's the most recent and failed
|
||||||
|
local only_version="${sorted_versions[0]}"
|
||||||
|
local only_status_file="${OUTPUT_DIR}/go-${only_version}.txt.status"
|
||||||
|
|
||||||
|
if [[ -f "$only_status_file" ]]; then
|
||||||
|
local status
|
||||||
|
status=$(cat "$only_status_file")
|
||||||
|
if [[ "$status" != "PASSED" ]]; then
|
||||||
|
recent_versions_failed=true
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
recent_versions_failed=true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Display summary
|
||||||
|
echo ""
|
||||||
|
log "Test completed in ${duration}s"
|
||||||
|
log "Summary report: $summary_file"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "========================================"
|
||||||
|
echo " FINAL RESULTS"
|
||||||
|
echo "========================================"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Display passed versions
|
||||||
|
if [[ ${#passed_versions[@]} -gt 0 ]]; then
|
||||||
|
log_success "PASSED (${#passed_versions[@]}/${#GO_VERSIONS[@]}):"
|
||||||
|
# Sort passed versions for display
|
||||||
|
local sorted_passed=()
|
||||||
|
for version in "${sorted_versions[@]}"; do
|
||||||
|
for passed_version in "${passed_versions[@]}"; do
|
||||||
|
if [[ "$version" == "$passed_version" ]]; then
|
||||||
|
sorted_passed+=("$version")
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
for version in "${sorted_passed[@]}"; do
|
||||||
|
echo -e " ${GREEN}✓${NC} Go $version"
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Display failed versions
|
||||||
|
if [[ ${#failed_versions[@]} -gt 0 ]]; then
|
||||||
|
log_error "FAILED (${#failed_versions[@]}/${#GO_VERSIONS[@]}):"
|
||||||
|
# Sort failed versions for display
|
||||||
|
local sorted_failed=()
|
||||||
|
for version in "${sorted_versions[@]}"; do
|
||||||
|
for failed_version in "${failed_versions[@]}"; do
|
||||||
|
if [[ "$version" == "$failed_version" ]]; then
|
||||||
|
sorted_failed+=("$version")
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
for version in "${sorted_failed[@]}"; do
|
||||||
|
echo -e " ${RED}✗${NC} Go $version"
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Show failure details
|
||||||
|
echo "========================================"
|
||||||
|
echo " FAILURE DETAILS"
|
||||||
|
echo "========================================"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
for version in "${sorted_failed[@]}"; do
|
||||||
|
echo -e "${RED}--- Go $version FAILURE LOGS (last 30 lines) ---${NC}"
|
||||||
|
local result_file="${OUTPUT_DIR}/go-${version}.txt"
|
||||||
|
if [[ -f "$result_file" ]]; then
|
||||||
|
tail -n 30 "$result_file" | sed 's/^/ /'
|
||||||
|
else
|
||||||
|
echo " No log file found: $result_file"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Display unknown versions
|
||||||
|
if [[ ${#unknown_versions[@]} -gt 0 ]]; then
|
||||||
|
log_warning "UNKNOWN (${#unknown_versions[@]}/${#GO_VERSIONS[@]}):"
|
||||||
|
for version in "${unknown_versions[@]}"; do
|
||||||
|
echo -e " ${YELLOW}?${NC} Go $version (no status file)"
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "========================================"
|
||||||
|
echo " COMPATIBILITY SUMMARY"
|
||||||
|
echo "========================================"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [[ -n "$lowest_continuous_version" ]]; then
|
||||||
|
log_success "Lowest continuous supported version: Go $lowest_continuous_version"
|
||||||
|
echo " (All versions from Go $lowest_continuous_version onwards pass)"
|
||||||
|
else
|
||||||
|
log_error "No continuous version support found"
|
||||||
|
echo " (No version has all subsequent versions passing)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "========================================"
|
||||||
|
echo "Full detailed logs available in: $OUTPUT_DIR"
|
||||||
|
echo "========================================"
|
||||||
|
|
||||||
|
# Determine exit code based on recent versions
|
||||||
|
if [[ "$recent_versions_failed" == true ]]; then
|
||||||
|
log_error "OVERALL RESULT: Recent Go versions failed - this needs attention!"
|
||||||
|
if [[ -n "$lowest_continuous_version" ]]; then
|
||||||
|
echo "Note: Continuous support starts from Go $lowest_continuous_version"
|
||||||
|
fi
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
log_success "OVERALL RESULT: Recent Go versions pass - compatibility looks good!"
|
||||||
|
if [[ -n "$lowest_continuous_version" ]]; then
|
||||||
|
echo "Continuous support starts from Go $lowest_continuous_version"
|
||||||
|
fi
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Trap to clean up on exit
|
||||||
|
cleanup() {
|
||||||
|
# Kill any remaining background processes
|
||||||
|
jobs -p | xargs -r kill 2>/dev/null || true
|
||||||
|
|
||||||
|
# Clean up any remaining Docker containers
|
||||||
|
docker ps -q --filter "name=go-toml-test-" | xargs -r docker stop 2>/dev/null || true
|
||||||
|
docker images -q --filter "reference=go-toml-test-*" | xargs -r docker rmi 2>/dev/null || true
|
||||||
|
}
|
||||||
|
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
# Run main function
|
||||||
|
main
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
//go:generate go run github.com/toml-lang/toml-test/cmd/toml-test@master -copy ./tests
|
//go:generate go run github.com/toml-lang/toml-test/cmd/toml-test@v1.6.0 -copy ./tests
|
||||||
//go:generate go run ./cmd/tomltestgen/main.go -o toml_testgen_test.go
|
//go:generate go run ./cmd/tomltestgen/main.go -r v1.6.0 -o toml_testgen_test.go
|
||||||
|
|
||||||
// This is a support file for toml_testgen_test.go
|
// This is a support file for toml_testgen_test.go
|
||||||
package toml_test
|
package toml_test
|
||||||
@@ -52,7 +52,7 @@ func testgenValid(t *testing.T, input string, jsonRef string) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
var actual interface{}
|
var actual interface{}
|
||||||
err = json.Unmarshal([]byte(j), &actual)
|
err = json.Unmarshal(j, &actual)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
testsuite.CmpJSON(t, "", ref, actual)
|
testsuite.CmpJSON(t, "", ref, actual)
|
||||||
|
|||||||
+382
-194
File diff suppressed because it is too large
Load Diff
+53
-10
@@ -12,7 +12,6 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
|
||||||
"github.com/pelletier/go-toml/v2/internal/tracker"
|
"github.com/pelletier/go-toml/v2/internal/tracker"
|
||||||
"github.com/pelletier/go-toml/v2/unstable"
|
"github.com/pelletier/go-toml/v2/unstable"
|
||||||
)
|
)
|
||||||
@@ -416,15 +415,39 @@ func (d *decoder) handleArrayTableCollection(key unstable.Iterator, v reflect.Va
|
|||||||
|
|
||||||
return v, nil
|
return v, nil
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
elem := v.Index(v.Len() - 1)
|
// Create a new element when the slice is empty; otherwise operate on
|
||||||
|
// the last element.
|
||||||
|
var (
|
||||||
|
elem reflect.Value
|
||||||
|
created bool
|
||||||
|
)
|
||||||
|
if v.Len() == 0 {
|
||||||
|
created = true
|
||||||
|
elemType := v.Type().Elem()
|
||||||
|
if elemType.Kind() == reflect.Interface {
|
||||||
|
elem = makeMapStringInterface()
|
||||||
|
} else {
|
||||||
|
elem = reflect.New(elemType).Elem()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
elem = v.Index(v.Len() - 1)
|
||||||
|
}
|
||||||
|
|
||||||
x, err := d.handleArrayTable(key, elem)
|
x, err := d.handleArrayTable(key, elem)
|
||||||
if err != nil || d.skipUntilTable {
|
if err != nil || d.skipUntilTable {
|
||||||
return reflect.Value{}, err
|
return reflect.Value{}, err
|
||||||
}
|
}
|
||||||
if x.IsValid() {
|
if x.IsValid() {
|
||||||
elem.Set(x)
|
if created {
|
||||||
|
elem = x
|
||||||
|
} else {
|
||||||
|
elem.Set(x)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if created {
|
||||||
|
return reflect.Append(v, elem), nil
|
||||||
|
}
|
||||||
return v, err
|
return v, err
|
||||||
case reflect.Array:
|
case reflect.Array:
|
||||||
idx := d.arrayIndex(false, v)
|
idx := d.arrayIndex(false, v)
|
||||||
@@ -1010,7 +1033,7 @@ func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error
|
|||||||
case reflect.Interface:
|
case reflect.Interface:
|
||||||
r = reflect.ValueOf(i)
|
r = reflect.ValueOf(i)
|
||||||
default:
|
default:
|
||||||
return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("integer", v.Type()))
|
return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("integer", v.Type()))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !r.Type().AssignableTo(v.Type()) {
|
if !r.Type().AssignableTo(v.Type()) {
|
||||||
@@ -1029,7 +1052,7 @@ func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error {
|
|||||||
case reflect.Interface:
|
case reflect.Interface:
|
||||||
v.Set(reflect.ValueOf(string(value.Data)))
|
v.Set(reflect.ValueOf(string(value.Data)))
|
||||||
default:
|
default:
|
||||||
return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("string", v.Type()))
|
return unstable.NewParserError(d.p.Raw(value.Raw), "%s", d.typeMismatchString("string", v.Type()))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1154,6 +1177,17 @@ func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node
|
|||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
path, found := structFieldPath(v, string(key.Node().Data))
|
path, found := structFieldPath(v, string(key.Node().Data))
|
||||||
if !found {
|
if !found {
|
||||||
|
// If no matching struct field is found but the target implements the
|
||||||
|
// unstable.Unmarshaler interface (and it is enabled), delegate the
|
||||||
|
// decoding of this value to the custom unmarshaler.
|
||||||
|
if d.unmarshalerInterface {
|
||||||
|
if v.CanAddr() && v.Addr().CanInterface() {
|
||||||
|
if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok {
|
||||||
|
return reflect.Value{}, outi.UnmarshalTOML(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Otherwise, keep previous behavior and skip until the next table.
|
||||||
d.skipUntilTable = true
|
d.skipUntilTable = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -1259,13 +1293,22 @@ func fieldByIndex(v reflect.Value, path []int) reflect.Value {
|
|||||||
|
|
||||||
type fieldPathsMap = map[string][]int
|
type fieldPathsMap = map[string][]int
|
||||||
|
|
||||||
var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap
|
var globalFieldPathsCache atomic.Value // map[uintptr]fieldPathsMap
|
||||||
|
|
||||||
func structFieldPath(v reflect.Value, name string) ([]int, bool) {
|
func structFieldPath(v reflect.Value, name string) ([]int, bool) {
|
||||||
t := v.Type()
|
t := v.Type()
|
||||||
|
// reflect.Type is an interface. We want to use the address of the underlying
|
||||||
|
// rtype as the key.
|
||||||
|
// This avoids using the interface as map key, which is slower.
|
||||||
|
//
|
||||||
|
// In the future this should be replaced by t.Pointer() if it becomes available.
|
||||||
|
//
|
||||||
|
// v.Type() returns a reflect.Type interface.
|
||||||
|
// reflect.ValueOf(t).Pointer() returns the address of the rtype.
|
||||||
|
tid := reflect.ValueOf(t).Pointer()
|
||||||
|
|
||||||
cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap)
|
cache, _ := globalFieldPathsCache.Load().(map[uintptr]fieldPathsMap)
|
||||||
fieldPaths, ok := cache[danger.MakeTypeID(t)]
|
fieldPaths, ok := cache[tid]
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
fieldPaths = map[string][]int{}
|
fieldPaths = map[string][]int{}
|
||||||
@@ -1276,8 +1319,8 @@ func structFieldPath(v reflect.Value, name string) ([]int, bool) {
|
|||||||
fieldPaths[strings.ToLower(name)] = path
|
fieldPaths[strings.ToLower(name)] = path
|
||||||
})
|
})
|
||||||
|
|
||||||
newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1)
|
newCache := make(map[uintptr]fieldPathsMap, len(cache)+1)
|
||||||
newCache[danger.MakeTypeID(t)] = fieldPaths
|
newCache[tid] = fieldPaths
|
||||||
for k, v := range cache {
|
for k, v := range cache {
|
||||||
newCache[k] = v
|
newCache[k] = v
|
||||||
}
|
}
|
||||||
|
|||||||
+251
-7
@@ -412,7 +412,7 @@ foo = "bar"`,
|
|||||||
assert: func(t *testing.T, test test) {
|
assert: func(t *testing.T, test test) {
|
||||||
// Despite the documentation:
|
// Despite the documentation:
|
||||||
// Pointer variable equality is determined based on the equality of the
|
// Pointer variable equality is determined based on the equality of the
|
||||||
// referenced values (as opposed to the memory addresses).
|
// referenced values (as opposed to the memory addresses).
|
||||||
// assert.Equal does not work properly with maps with pointer keys
|
// assert.Equal does not work properly with maps with pointer keys
|
||||||
// https://github.com/stretchr/testify/issues/1143
|
// https://github.com/stretchr/testify/issues/1143
|
||||||
expected := make(map[unmarshalTextKey]string)
|
expected := make(map[unmarshalTextKey]string)
|
||||||
@@ -2977,7 +2977,8 @@ func TestIssue931(t *testing.T) {
|
|||||||
Name = 'd'
|
Name = 'd'
|
||||||
`)
|
`)
|
||||||
|
|
||||||
toml.Unmarshal(b, &its)
|
err := toml.Unmarshal(b, &its)
|
||||||
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, items{[]item{{"c"}, {"d"}}}, its)
|
assert.Equal(t, items{[]item{{"c"}, {"d"}}}, its)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2998,7 +2999,8 @@ func TestIssue931Interface(t *testing.T) {
|
|||||||
Name = 'd'
|
Name = 'd'
|
||||||
`)
|
`)
|
||||||
|
|
||||||
toml.Unmarshal(b, &its)
|
err := toml.Unmarshal(b, &its)
|
||||||
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, items{[]interface{}{item{"Name": "c"}, item{"Name": "d"}}}, its)
|
assert.Equal(t, items{[]interface{}{item{"Name": "c"}, item{"Name": "d"}}}, its)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3024,7 +3026,8 @@ func TestIssue931SliceInterface(t *testing.T) {
|
|||||||
Name = 'd'
|
Name = 'd'
|
||||||
`)
|
`)
|
||||||
|
|
||||||
toml.Unmarshal(b, &its)
|
err := toml.Unmarshal(b, &its)
|
||||||
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, items{[]interface{}{item{"Name": "c"}, item{"Name": "d"}}}, its)
|
assert.Equal(t, items{[]interface{}{item{"Name": "c"}, item{"Name": "d"}}}, its)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3884,9 +3887,9 @@ func TestUnmarshal_Nil(t *testing.T) {
|
|||||||
{
|
{
|
||||||
desc: "simplest",
|
desc: "simplest",
|
||||||
input: `
|
input: `
|
||||||
[foo]
|
[foo]
|
||||||
[foo.foo]
|
[foo.foo]
|
||||||
`,
|
`,
|
||||||
expected: "[foo]\n[foo.foo]\n",
|
expected: "[foo]\n[foo.foo]\n",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -3998,3 +4001,244 @@ foo = "bar"`,
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type doc994 struct{}
|
||||||
|
|
||||||
|
func (d *doc994) UnmarshalTOML(value *unstable.Node) error {
|
||||||
|
return errors.New("expected-error")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIssue994(t *testing.T) {
|
||||||
|
var _ unstable.Unmarshaler = (*doc994)(nil)
|
||||||
|
tomlBytes := []byte(`foo = "bar"`)
|
||||||
|
var d doc994
|
||||||
|
err := toml.NewDecoder(bytes.NewReader(tomlBytes)).
|
||||||
|
EnableUnmarshalerInterface().
|
||||||
|
Decode(&d)
|
||||||
|
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
if err.Error() != "expected-error" {
|
||||||
|
t.Fatalf("expected error 'expected-error', got '%s'", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type doc994ok struct {
|
||||||
|
S string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *doc994ok) UnmarshalTOML(value *unstable.Node) error {
|
||||||
|
d.S = string(value.Data) + " from unmarshaler"
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIssue994_OK(t *testing.T) {
|
||||||
|
var _ unstable.Unmarshaler = (*doc994ok)(nil)
|
||||||
|
tomlBytes := []byte(`foo = "bar"`)
|
||||||
|
var d doc994ok
|
||||||
|
err := toml.NewDecoder(bytes.NewReader(tomlBytes)).
|
||||||
|
EnableUnmarshalerInterface().
|
||||||
|
Decode(&d)
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "bar from unmarshaler", d.S)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIssue995(t *testing.T) {
|
||||||
|
type AllowList struct {
|
||||||
|
Description string
|
||||||
|
Condition string
|
||||||
|
Commits []string
|
||||||
|
Paths []string
|
||||||
|
RegexTarget string
|
||||||
|
Regexes []string
|
||||||
|
StopWords []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Rule struct {
|
||||||
|
ID string
|
||||||
|
Description string
|
||||||
|
Regex string
|
||||||
|
SecretGroup int
|
||||||
|
Entropy interface{}
|
||||||
|
Keywords []string
|
||||||
|
Path string
|
||||||
|
Tags []string
|
||||||
|
AllowList *AllowList
|
||||||
|
Allowlists []AllowList
|
||||||
|
}
|
||||||
|
|
||||||
|
type GitleaksConfig struct {
|
||||||
|
Description string
|
||||||
|
Rules []Rule
|
||||||
|
Allowlist struct {
|
||||||
|
Commits []string
|
||||||
|
Paths []string
|
||||||
|
RegexTarget string
|
||||||
|
Regexes []string
|
||||||
|
StopWords []string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
doc := `
|
||||||
|
[[allowlists]]
|
||||||
|
description = "Exception for File "
|
||||||
|
files = [ '''app/src''']
|
||||||
|
|
||||||
|
[[rules.allowlists]]
|
||||||
|
description = "policies"
|
||||||
|
regexes = [
|
||||||
|
'''abc'''
|
||||||
|
]
|
||||||
|
`
|
||||||
|
|
||||||
|
var cfg GitleaksConfig
|
||||||
|
err := toml.Unmarshal([]byte(doc), &cfg)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Ensure no panic and that nested array table was created.
|
||||||
|
if len(cfg.Rules) == 0 {
|
||||||
|
t.Fatalf("expected Rules to contain at least one element after unmarshaling nested array table")
|
||||||
|
}
|
||||||
|
if len(cfg.Rules[0].Allowlists) != 1 {
|
||||||
|
t.Fatalf("expected first Rule to have exactly one allowlists entry, got %d", len(cfg.Rules[0].Allowlists))
|
||||||
|
}
|
||||||
|
assert.Equal(t, "policies", cfg.Rules[0].Allowlists[0].Description)
|
||||||
|
assert.Equal(t, []string{"abc"}, cfg.Rules[0].Allowlists[0].Regexes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIssue995_InterfaceSlice_MultiNested(t *testing.T) {
|
||||||
|
type Root struct {
|
||||||
|
Rules []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
doc := `
|
||||||
|
[[rules.allowlists]]
|
||||||
|
description = "a"
|
||||||
|
|
||||||
|
[[rules.allowlists]]
|
||||||
|
description = "b"
|
||||||
|
`
|
||||||
|
|
||||||
|
var r Root
|
||||||
|
err := toml.Unmarshal([]byte(doc), &r)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
if len(r.Rules) != 1 {
|
||||||
|
t.Fatalf("expected one element in Rules, got %d", len(r.Rules))
|
||||||
|
}
|
||||||
|
|
||||||
|
m, ok := r.Rules[0].(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected Rules[0] to be a map[string]any, got %T", r.Rules[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
als, ok := m["allowlists"].([]interface{})
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected allowlists to be []any, got %T", m["allowlists"])
|
||||||
|
}
|
||||||
|
if len(als) != 2 {
|
||||||
|
t.Fatalf("expected 2 allowlists entries, got %d", len(als))
|
||||||
|
}
|
||||||
|
|
||||||
|
a0, ok := als[0].(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected allowlists[0] to be map[string]any, got %T", als[0])
|
||||||
|
}
|
||||||
|
a1, ok := als[1].(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected allowlists[1] to be map[string]any, got %T", als[1])
|
||||||
|
}
|
||||||
|
assert.Equal(t, "a", a0["description"])
|
||||||
|
assert.Equal(t, "b", a1["description"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIssue995_MultiNestedConcrete(t *testing.T) {
|
||||||
|
type AllowList struct {
|
||||||
|
Description string
|
||||||
|
}
|
||||||
|
type Rule struct {
|
||||||
|
Allowlists []AllowList
|
||||||
|
}
|
||||||
|
type Root struct {
|
||||||
|
Rules []Rule
|
||||||
|
}
|
||||||
|
|
||||||
|
doc := `
|
||||||
|
[[rules.allowlists]]
|
||||||
|
description = "a"
|
||||||
|
|
||||||
|
[[rules.allowlists]]
|
||||||
|
description = "b"
|
||||||
|
`
|
||||||
|
|
||||||
|
var r Root
|
||||||
|
err := toml.Unmarshal([]byte(doc), &r)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
if len(r.Rules) != 1 {
|
||||||
|
t.Fatalf("expected one element in Rules, got %d", len(r.Rules))
|
||||||
|
}
|
||||||
|
assert.Equal(t, 2, len(r.Rules[0].Allowlists))
|
||||||
|
assert.Equal(t, "a", r.Rules[0].Allowlists[0].Description)
|
||||||
|
assert.Equal(t, "b", r.Rules[0].Allowlists[1].Description)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIssue995_PointerToSlice_Rules(t *testing.T) {
|
||||||
|
type AllowList struct{ Description string }
|
||||||
|
type Rule struct{ Allowlists []AllowList }
|
||||||
|
type Root struct{ Rules *[]Rule }
|
||||||
|
|
||||||
|
doc := `
|
||||||
|
[[rules.allowlists]]
|
||||||
|
description = "a"
|
||||||
|
|
||||||
|
[[rules.allowlists]]
|
||||||
|
description = "b"
|
||||||
|
`
|
||||||
|
|
||||||
|
var r Root
|
||||||
|
err := toml.Unmarshal([]byte(doc), &r)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
if r.Rules == nil {
|
||||||
|
t.Fatalf("expected Rules pointer to be initialized")
|
||||||
|
}
|
||||||
|
if len(*r.Rules) != 1 {
|
||||||
|
t.Fatalf("expected one element in Rules, got %d", len(*r.Rules))
|
||||||
|
}
|
||||||
|
rule := (*r.Rules)[0]
|
||||||
|
assert.Equal(t, 2, len(rule.Allowlists))
|
||||||
|
assert.Equal(t, "a", rule.Allowlists[0].Description)
|
||||||
|
assert.Equal(t, "b", rule.Allowlists[1].Description)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIssue995_SliceNonEmpty_UsesLastElement(t *testing.T) {
|
||||||
|
type AllowList struct{ Description string }
|
||||||
|
type Rule struct{ Allowlists []AllowList }
|
||||||
|
type Root struct{ Rules []Rule }
|
||||||
|
|
||||||
|
// Pre-initialize with one Rule; nested array table should populate
|
||||||
|
// the last element, not create a new one at this level.
|
||||||
|
var r Root
|
||||||
|
r.Rules = []Rule{{}}
|
||||||
|
|
||||||
|
doc := `
|
||||||
|
[[rules.allowlists]]
|
||||||
|
description = "a"
|
||||||
|
|
||||||
|
[[rules.allowlists]]
|
||||||
|
description = "b"
|
||||||
|
`
|
||||||
|
|
||||||
|
err := toml.Unmarshal([]byte(doc), &r)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
if len(r.Rules) != 1 {
|
||||||
|
t.Fatalf("expected one element in Rules, got %d", len(r.Rules))
|
||||||
|
}
|
||||||
|
assert.Equal(t, 2, len(r.Rules[0].Allowlists))
|
||||||
|
// Values presence check
|
||||||
|
got := []string{r.Rules[0].Allowlists[0].Description, r.Rules[0].Allowlists[1].Description}
|
||||||
|
if !(got[0] == "a" && got[1] == "b") && !(got[0] == "b" && got[1] == "a") {
|
||||||
|
t.Fatalf("unexpected values in allowlists: %v", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
+7
-22
@@ -2,9 +2,6 @@ package unstable
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Iterator over a sequence of nodes.
|
// Iterator over a sequence of nodes.
|
||||||
@@ -37,7 +34,7 @@ func (c *Iterator) Next() bool {
|
|||||||
// IsLast returns true if the current node of the iterator is the last
|
// IsLast returns true if the current node of the iterator is the last
|
||||||
// one. Subsequent calls to Next() will return false.
|
// one. Subsequent calls to Next() will return false.
|
||||||
func (c *Iterator) IsLast() bool {
|
func (c *Iterator) IsLast() bool {
|
||||||
return c.node.next == 0
|
return c.node.next == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Node returns a pointer to the node pointed at by the iterator.
|
// Node returns a pointer to the node pointed at by the iterator.
|
||||||
@@ -65,11 +62,9 @@ type Node struct {
|
|||||||
Raw Range // Raw bytes from the input.
|
Raw Range // Raw bytes from the input.
|
||||||
Data []byte // Node value (either allocated or referencing the input).
|
Data []byte // Node value (either allocated or referencing the input).
|
||||||
|
|
||||||
// References to other nodes, as offsets in the backing array
|
// References to other nodes.
|
||||||
// from this node. References can go backward, so those can be
|
next *Node // nil if last element
|
||||||
// negative.
|
child *Node // nil if no child
|
||||||
next int // 0 if last element
|
|
||||||
child int // 0 if no child
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Range of bytes in the document.
|
// Range of bytes in the document.
|
||||||
@@ -80,24 +75,14 @@ type Range struct {
|
|||||||
|
|
||||||
// Next returns a pointer to the next node, or nil if there is no next node.
|
// Next returns a pointer to the next node, or nil if there is no next node.
|
||||||
func (n *Node) Next() *Node {
|
func (n *Node) Next() *Node {
|
||||||
if n.next == 0 {
|
return n.next
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ptr := unsafe.Pointer(n)
|
|
||||||
size := unsafe.Sizeof(Node{})
|
|
||||||
return (*Node)(danger.Stride(ptr, size, n.next))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Child returns a pointer to the first child node of this node. Other children
|
// Child returns a pointer to the first child node of this node. Other children
|
||||||
// can be accessed calling Next on the first child. Returns an nil if this Node
|
// can be accessed calling Next on the first child. Returns nil if this Node
|
||||||
// has no child.
|
// has no child.
|
||||||
func (n *Node) Child() *Node {
|
func (n *Node) Child() *Node {
|
||||||
if n.child == 0 {
|
return n.child
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ptr := unsafe.Pointer(n)
|
|
||||||
size := unsafe.Sizeof(Node{})
|
|
||||||
return (*Node)(danger.Stride(ptr, size, n.child))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Valid returns true if the node's kind is set (not to Invalid).
|
// Valid returns true if the node's kind is set (not to Invalid).
|
||||||
|
|||||||
+79
-29
@@ -4,68 +4,118 @@ package unstable
|
|||||||
//
|
//
|
||||||
// It is immutable once constructed with Builder.
|
// It is immutable once constructed with Builder.
|
||||||
type root struct {
|
type root struct {
|
||||||
nodes []Node
|
first *Node
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterator over the top level nodes.
|
// Iterator over the top level nodes.
|
||||||
func (r *root) Iterator() Iterator {
|
func (r *root) Iterator() Iterator {
|
||||||
it := Iterator{}
|
return Iterator{node: r.first}
|
||||||
if len(r.nodes) > 0 {
|
|
||||||
it.node = &r.nodes[0]
|
|
||||||
}
|
|
||||||
return it
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *root) at(idx reference) *Node {
|
type reference struct {
|
||||||
return &r.nodes[idx]
|
*Node
|
||||||
}
|
}
|
||||||
|
|
||||||
type reference int
|
var invalidReference = reference{}
|
||||||
|
|
||||||
const invalidReference reference = -1
|
|
||||||
|
|
||||||
func (r reference) Valid() bool {
|
func (r reference) Valid() bool {
|
||||||
return r != invalidReference
|
return r.Node != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type builder struct {
|
type builder struct {
|
||||||
tree root
|
// chunks of nodes. Pointers to nodes are stable because we only append
|
||||||
lastIdx int
|
// to the last chunk, and chunks are allocated with fixed capacity.
|
||||||
|
chunks [][]Node
|
||||||
|
// current chunk index
|
||||||
|
chunkIdx int
|
||||||
|
|
||||||
|
// root node of the tree
|
||||||
|
root root
|
||||||
|
|
||||||
|
// last pushed node (for chaining)
|
||||||
|
last *Node
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const initialChunkSize = 16
|
||||||
|
const maxChunkSize = 2048
|
||||||
|
|
||||||
func (b *builder) Tree() *root {
|
func (b *builder) Tree() *root {
|
||||||
return &b.tree
|
return &b.root
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *builder) NodeAt(ref reference) *Node {
|
func (b *builder) NodeAt(ref reference) *Node {
|
||||||
return b.tree.at(ref)
|
return ref.Node
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *builder) Reset() {
|
func (b *builder) Reset() {
|
||||||
b.tree.nodes = b.tree.nodes[:0]
|
b.chunkIdx = 0
|
||||||
b.lastIdx = 0
|
for i := range b.chunks {
|
||||||
|
b.chunks[i] = b.chunks[i][:0]
|
||||||
|
}
|
||||||
|
b.root.first = nil
|
||||||
|
b.last = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *builder) ensureCapacity() {
|
||||||
|
if b.chunkIdx >= len(b.chunks) {
|
||||||
|
size := initialChunkSize
|
||||||
|
if len(b.chunks) > 0 {
|
||||||
|
lastCap := cap(b.chunks[len(b.chunks)-1])
|
||||||
|
size = lastCap * 2
|
||||||
|
if size > maxChunkSize {
|
||||||
|
size = maxChunkSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.chunks = append(b.chunks, make([]Node, 0, size))
|
||||||
|
}
|
||||||
|
if len(b.chunks[b.chunkIdx]) == cap(b.chunks[b.chunkIdx]) {
|
||||||
|
b.chunkIdx++
|
||||||
|
if b.chunkIdx >= len(b.chunks) {
|
||||||
|
size := initialChunkSize
|
||||||
|
if len(b.chunks) > 0 {
|
||||||
|
lastCap := cap(b.chunks[len(b.chunks)-1])
|
||||||
|
size = lastCap * 2
|
||||||
|
if size > maxChunkSize {
|
||||||
|
size = maxChunkSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.chunks = append(b.chunks, make([]Node, 0, size))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *builder) push(n Node) *Node {
|
||||||
|
b.ensureCapacity()
|
||||||
|
chunk := &b.chunks[b.chunkIdx]
|
||||||
|
*chunk = append(*chunk, n)
|
||||||
|
return &(*chunk)[len(*chunk)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *builder) Push(n Node) reference {
|
func (b *builder) Push(n Node) reference {
|
||||||
b.lastIdx = len(b.tree.nodes)
|
ptr := b.push(n)
|
||||||
b.tree.nodes = append(b.tree.nodes, n)
|
if b.root.first == nil {
|
||||||
return reference(b.lastIdx)
|
b.root.first = ptr
|
||||||
|
}
|
||||||
|
b.last = ptr
|
||||||
|
return reference{ptr}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *builder) PushAndChain(n Node) reference {
|
func (b *builder) PushAndChain(n Node) reference {
|
||||||
newIdx := len(b.tree.nodes)
|
ptr := b.push(n)
|
||||||
b.tree.nodes = append(b.tree.nodes, n)
|
if b.root.first == nil {
|
||||||
if b.lastIdx >= 0 {
|
b.root.first = ptr
|
||||||
b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx
|
|
||||||
}
|
}
|
||||||
b.lastIdx = newIdx
|
if b.last != nil {
|
||||||
return reference(b.lastIdx)
|
b.last.next = ptr
|
||||||
|
}
|
||||||
|
b.last = ptr
|
||||||
|
return reference{ptr}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *builder) AttachChild(parent reference, child reference) {
|
func (b *builder) AttachChild(parent reference, child reference) {
|
||||||
b.tree.nodes[parent].child = int(child) - int(parent)
|
parent.child = child.Node
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *builder) Chain(from reference, to reference) {
|
func (b *builder) Chain(from reference, to reference) {
|
||||||
b.tree.nodes[from].next = int(to) - int(from)
|
from.next = to.Node
|
||||||
}
|
}
|
||||||
|
|||||||
+3
-4
@@ -6,7 +6,6 @@ import (
|
|||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
"github.com/pelletier/go-toml/v2/internal/characters"
|
"github.com/pelletier/go-toml/v2/internal/characters"
|
||||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParserError describes an error relative to the content of the document.
|
// ParserError describes an error relative to the content of the document.
|
||||||
@@ -70,7 +69,7 @@ func (p *Parser) Data() []byte {
|
|||||||
// panics.
|
// panics.
|
||||||
func (p *Parser) Range(b []byte) Range {
|
func (p *Parser) Range(b []byte) Range {
|
||||||
return Range{
|
return Range{
|
||||||
Offset: uint32(danger.SubsliceOffset(p.data, b)),
|
Offset: uint32(cap(p.data) - cap(b)),
|
||||||
Length: uint32(len(b)),
|
Length: uint32(len(b)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -159,7 +158,7 @@ type Shape struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) position(b []byte) Position {
|
func (p *Parser) position(b []byte) Position {
|
||||||
offset := danger.SubsliceOffset(p.data, b)
|
offset := cap(p.data) - cap(b)
|
||||||
|
|
||||||
lead := p.data[:offset]
|
lead := p.data[:offset]
|
||||||
|
|
||||||
@@ -1076,7 +1075,7 @@ byteLoop:
|
|||||||
}
|
}
|
||||||
case c == 'T' || c == 't' || c == ':' || c == '.':
|
case c == 'T' || c == 't' || c == ':' || c == '.':
|
||||||
hasTime = true
|
hasTime = true
|
||||||
case c == '+' || c == '-' || c == 'Z' || c == 'z':
|
case c == '+' || c == 'Z' || c == 'z':
|
||||||
hasTz = true
|
hasTz = true
|
||||||
case c == ' ':
|
case c == ' ':
|
||||||
if !seenSpace && i+1 < len(b) && isDigit(b[i+1]) {
|
if !seenSpace && i+1 < len(b) && isDigit(b[i+1]) {
|
||||||
|
|||||||
Reference in New Issue
Block a user