diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 47aba6f24..f7d96f968 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,7 +10,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: 1.19.x + go-version: 1.21.x - uses: actions/checkout@v3 - name: Install Requirements run: | @@ -28,7 +28,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: 1.19.x + go-version: 1.21.x - uses: actions/checkout@v3 - name: Install Requirements run: | @@ -46,7 +46,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: 1.19.x + go-version: 1.21.x - uses: actions/checkout@v3 - name: Install Requirements shell: pwsh @@ -57,4 +57,4 @@ jobs: - name: Releasing env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: make github-release-windows \ No newline at end of file + run: make github-release-windows diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1fbedb1ae..fbcd10f8e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -6,11 +6,11 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: 1.19.x + go-version: 1.21.x - uses: actions/checkout@v3 - name: Install Requirements run: | - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.50.1 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.52.1 go mod vendor - name: Checking Format and Testing run: make check @@ -23,11 +23,11 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: 1.19.x + go-version: 1.21.x - uses: actions/checkout@v3 - name: Install Requirements run: | - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.50.1 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.52.1 go mod vendor - name: Checking Format and Testing run: make check @@ -40,12 +40,12 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: 1.19.x + go-version: 1.21.x - uses: actions/checkout@v3 - name: Install Requirements run: | choco install make - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.50.1 + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.1 go mod vendor - name: Testing run: | diff --git a/.goreleaser-darwin.yml b/.goreleaser-darwin.yml index 7c06a6c87..db4f07b17 100644 --- a/.goreleaser-darwin.yml +++ b/.goreleaser-darwin.yml @@ -88,6 +88,18 @@ builds: main: ./cmd/dmsgpty-cli/ ldflags: -s -w -X github.com/skycoin/skywire-utilities/pkg/buildinfo.version=v{{.Version}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.commit={{.ShortCommit}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.date={{.Date}} + - id: dmsgweb + binary: dmsgweb + goos: + - darwin + goarch: + - arm64 + - amd64 + env: + - CGO_ENABLED=0 + main: ./cmd/dmsgweb/ + ldflags: -s -w -X github.com/skycoin/skywire-utilities/pkg/buildinfo.version=v{{.Version}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.commit={{.ShortCommit}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.date={{.Date}} + archives: - id: archive format: tar.gz @@ -100,6 +112,7 @@ archives: - dmsgpty-host - dmsgcurl - dmsgpty-cli + - dmsgweb allow_different_binary_count: true checksum: diff --git a/.goreleaser-linux.yml b/.goreleaser-linux.yml index 3f4f3b719..ce52ac433 100644 --- a/.goreleaser-linux.yml +++ b/.goreleaser-linux.yml @@ -329,6 +329,58 @@ builds: main: ./cmd/dmsgpty-host/ ldflags: -s -w -linkmode external -extldflags '-static' -buildid= -X github.com/skycoin/skywire-utilities/pkg/buildinfo.version=v{{.Version}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.commit={{.ShortCommit}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.date={{.Date}} + - id: dmsgweb-amd64 + binary: dmsgweb + goos: + - linux + goarch: + - amd64 + env: + - CGO_ENABLED=1 + - CC=/home/runner/work/dmsg/dmsg/musl-data/x86_64-linux-musl-cross/bin/x86_64-linux-musl-gcc + main: ./cmd/dmsgweb/ + ldflags: -s -w -linkmode external -extldflags '-static' -buildid= -X github.com/skycoin/skywire-utilities/pkg/buildinfo.version=v{{.Version}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.commit={{.ShortCommit}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.date={{.Date}} + + - id: dmsgweb-arm64 + binary: dmsgweb + goos: + - linux + goarch: + - arm64 + env: + - CGO_ENABLED=1 + - CC=/home/runner/work/dmsg/dmsg/musl-data/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc + main: ./cmd/dmsgweb/ + ldflags: -s -w -linkmode external -extldflags '-static' -buildid= -X github.com/skycoin/skywire-utilities/pkg/buildinfo.version=v{{.Version}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.commit={{.ShortCommit}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.date={{.Date}} + + - id: dmsgweb-arm + binary: dmsgweb + goos: + - linux + goarch: + - arm + goarm: + - 6 + env: + - CGO_ENABLED=1 + - CC=/home/runner/work/dmsg/dmsg/musl-data/arm-linux-musleabi-cross/bin/arm-linux-musleabi-gcc + main: ./cmd/dmsgweb/ + ldflags: -s -w -linkmode external -extldflags '-static' -buildid= -X github.com/skycoin/skywire-utilities/pkg/buildinfo.version=v{{.Version}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.commit={{.ShortCommit}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.date={{.Date}} + + - id: dmsgweb-armhf + binary: dmsgweb + goos: + - linux + goarch: + - arm + goarm: + - 7 + env: + - CGO_ENABLED=1 + - CC=/home/runner/work/dmsg/dmsg/musl-data/arm-linux-musleabihf-cross/bin/arm-linux-musleabihf-gcc + main: ./cmd/dmsgweb/ + ldflags: -s -w -linkmode external -extldflags '-static' -buildid= -X github.com/skycoin/skywire-utilities/pkg/buildinfo.version=v{{.Version}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.commit={{.ShortCommit}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.date={{.Date}} + archives: - id: amd64 format: tar.gz @@ -341,6 +393,7 @@ archives: - dmsgpty-cli-amd64 - dmsgcurl-amd64 - dmsgpty-host-amd64 + - dmsgweb-amd64 - id: arm64 format: tar.gz @@ -353,6 +406,7 @@ archives: - dmsgpty-cli-arm64 - dmsgcurl-arm64 - dmsgpty-host-arm64 + - dmsgweb-arm64 - id: arm format: tar.gz @@ -365,6 +419,7 @@ archives: - dmsgpty-cli-arm - dmsgcurl-arm - dmsgpty-host-arm + - dmsgweb-arm - id: armhf format: tar.gz @@ -377,6 +432,7 @@ archives: - dmsgpty-cli-armhf - dmsgcurl-armhf - dmsgpty-host-armhf + - dmsgweb-armhf checksum: name_template: 'checksums.txt' diff --git a/.goreleaser-windows.yml b/.goreleaser-windows.yml index 246420e1b..5bc3e61c2 100644 --- a/.goreleaser-windows.yml +++ b/.goreleaser-windows.yml @@ -87,6 +87,18 @@ builds: main: ./cmd/dmsgpty-host/ ldflags: -s -w -X github.com/skycoin/skywire-utilities/pkg/buildinfo.version=v{{.Version}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.commit={{.ShortCommit}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.date={{.Date}} + - id: dmsgweb + binary: dmsgweb + goos: + - windows + goarch: + - amd64 + - 386 + env: + - CGO_ENABLED=0 + main: ./cmd/dmsgweb/ + ldflags: -s -w -X github.com/skycoin/skywire-utilities/pkg/buildinfo.version=v{{.Version}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.commit={{.ShortCommit}} -X github.com/skycoin/skywire-utilities/pkg/buildinfo.date={{.Date}} + archives: - id: archive format: zip diff --git a/CHANGELOG.md b/CHANGELOG.md index da19db275..1e282c144 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,26 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +updates may be generated with scripts/changelog.sh + +## 1.3.14 + +### Added +- add `dmsgweb` as new tools to release + +### Changed +- change `dmsgget` to `dmsgcurl` with new functionalities + +### Commits +- update skywire-utilities [#244](https://github.com/skycoin/dmsg/pull/244) +- add ConnectedServersPK method [#243](https://github.com/skycoin/dmsg/pull/243) +- improve logic on save file dmsgcurl [#242](https://github.com/skycoin/dmsg/pull/242) +- dmsgcurl [#238](https://github.com/skycoin/dmsg/pull/238) +- dmsg client using socks5 proxy basic example [#237](https://github.com/skycoin/dmsg/pull/237) +- Bump Go images for Docker to 1.20-alpine [#235](https://github.com/skycoin/dmsg/pull/235) +- Export RootCmds [#234](https://github.com/skycoin/dmsg/pull/234) +- Dmsgweb [#229](https://github.com/skycoin/dmsg/pull/229) + ## 1.3.0 @@ -12,4 +32,4 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. - add `gen` command to generate config, with two flag `-o` for output file and `-t` for using test env values ### Changed -- switch from AppVeyor to Github Action in CI process \ No newline at end of file +- switch from AppVeyor to Github Action in CI process diff --git a/cmd/dmsg-discovery/commands/root.go b/cmd/dmsg-discovery/commands/root.go index 5ba643670..9e3e3838a 100644 --- a/cmd/dmsg-discovery/commands/root.go +++ b/cmd/dmsg-discovery/commands/root.go @@ -153,6 +153,13 @@ var RootCmd = &cobra.Command{ defer closeDmsgDC() + go func() { + for { + a.DmsgServers = dmsgDC.ConnectedServersPK() + time.Sleep(time.Second) + } + }() + go updateServers(ctx, a, dClient, dmsgDC, log) go func() { diff --git a/cmd/dmsgweb/commands/dmsgweb.go b/cmd/dmsgweb/commands/dmsgweb.go index bdad1a58e..9571df26a 100644 --- a/cmd/dmsgweb/commands/dmsgweb.go +++ b/cmd/dmsgweb/commands/dmsgweb.go @@ -64,7 +64,6 @@ func (r *customResolver) Resolve(ctx context.Context, name string) (context.Cont var ( httpC http.Client - httpClient http.Client dmsgDisc string dmsgSessions int filterDomainSuffix string @@ -140,27 +139,19 @@ var RootCmd = &cobra.Command{ pk, sk = cipher.GenerateKeyPair() } - if addProxy != "" { - // Configure SOCKS5 proxy dialer - dialer, err := proxy.SOCKS5("tcp", addProxy, nil, proxy.Direct) - if err != nil { - log.Fatalf("Error creating SOCKS5 dialer: %v", err) - } - // Configure custom HTTP transport with SOCKS5 proxy - // Configure HTTP client with custom transport - httpClient = http.Client{ - Transport: &http.Transport{ - Dial: dialer.Dial, - }, - } - } - dmsgC, closeDmsg, err := startDmsg(ctx, pk, sk) if err != nil { dmsgWebLog.WithError(err).Fatal("failed to start dmsg") } defer closeDmsg() + go func() { + <-ctx.Done() + cancel() + closeDmsg() + os.Exit(0) //this should not be necessary + }() + httpC = http.Client{Transport: dmsghttp.MakeHTTPTransport(ctx, dmsgC)} // Create a SOCKS5 server with custom name resolution @@ -262,10 +253,6 @@ var RootCmd = &cobra.Command{ wg.Done() }() wg.Wait() - os.Exit(0) //this should not be necessary - // <-ctx.Done() - cancel() - closeDmsg() }, } diff --git a/docker/images/dmsg-discovery/Dockerfile b/docker/images/dmsg-discovery/Dockerfile index 39deacd40..464904c41 100755 --- a/docker/images/dmsg-discovery/Dockerfile +++ b/docker/images/dmsg-discovery/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20-alpine AS builder +FROM golang:1.21-alpine AS builder ARG CGO_ENABLED=0 ENV CGO_ENABLED=${CGO_ENABLED} \ diff --git a/docker/images/dmsg-server/Dockerfile b/docker/images/dmsg-server/Dockerfile index 7962c681c..42ed39334 100755 --- a/docker/images/dmsg-server/Dockerfile +++ b/docker/images/dmsg-server/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20-alpine AS builder +FROM golang:1.21-alpine AS builder ARG CGO_ENABLED=0 ENV CGO_ENABLED=${CGO_ENABLED} \ diff --git a/examples/proxified/main.go b/examples/proxified/main.go index 2a1fb9b5b..710eb1ed4 100644 --- a/examples/proxified/main.go +++ b/examples/proxified/main.go @@ -3,15 +3,17 @@ package main import ( "context" - "time" "net/http" - "github.com/skycoin/skywire-utilities/pkg/skyenv" + "time" + "github.com/skycoin/skywire-utilities/pkg/logging" + "github.com/skycoin/skywire-utilities/pkg/skyenv" "github.com/skycoin/skywire-utilities/pkg/cipher" + "golang.org/x/net/proxy" + "github.com/skycoin/dmsg/pkg/disc" dmsg "github.com/skycoin/dmsg/pkg/dmsg" - "golang.org/x/net/proxy" ) func main() { diff --git a/go.mod b/go.mod index 8a40f3b87..29d6cb0d2 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/skycoin/dmsg -go 1.18 +go 1.21 + +toolchain go1.21.3 require ( github.com/ActiveState/termtest/conpty v0.5.0 @@ -17,7 +19,7 @@ require ( github.com/sirupsen/logrus v1.8.1 github.com/skycoin/noise v0.0.0-20180327030543-2492fe189ae6 github.com/skycoin/skycoin v0.27.1 - github.com/skycoin/skywire-utilities v0.0.0-20231120175000-12be4345eb26 + github.com/skycoin/skywire-utilities v1.3.14 github.com/spf13/cobra v1.4.0 github.com/stretchr/testify v1.8.3 golang.org/x/net v0.10.0 diff --git a/go.sum b/go.sum index a10d5e66f..e7603075d 100644 --- a/go.sum +++ b/go.sum @@ -26,6 +26,7 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= @@ -35,6 +36,7 @@ github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SU github.com/go-chi/chi/v5 v5.0.8-0.20220103230436-7dbe9a0bd10f h1:6kLofhLkWj7lgCc+mvcVLnwhTzQYgL/yW/Y0e/JYwjg= github.com/go-chi/chi/v5 v5.0.8-0.20220103230436-7dbe9a0bd10f/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= @@ -100,8 +102,11 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= github.com/pires/go-proxyproto v0.6.2 h1:KAZ7UteSOt6urjme6ZldyFm4wDe/z0ZUP0Yv0Dos0d8= @@ -119,8 +124,8 @@ github.com/skycoin/noise v0.0.0-20180327030543-2492fe189ae6 h1:1Nc5EBY6pjfw1kwW0 github.com/skycoin/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:UXghlricA7J3aRD/k7p/zBObQfmBawwCxIVPVjz2Q3o= github.com/skycoin/skycoin v0.27.1 h1:HatxsRwVSPaV4qxH6290xPBmkH/HgiuAoY2qC+e8C9I= github.com/skycoin/skycoin v0.27.1/go.mod h1:78nHjQzd8KG0jJJVL/j0xMmrihXi70ti63fh8vXScJw= -github.com/skycoin/skywire-utilities v0.0.0-20231120175000-12be4345eb26 h1:5eFvqawoCK9vZnDlwDbOPUUy1Fxuw1Y0KPe03gwmdhs= -github.com/skycoin/skywire-utilities v0.0.0-20231120175000-12be4345eb26/go.mod h1:X5H+fKC3rD11/sm4t9V2FWy/aet7OdEilaO2Ar3waXY= +github.com/skycoin/skywire-utilities v1.3.14 h1:AzTV3oiij7b2VgpiZHJj/oy4Tojf22I+r50Riza8Xt0= +github.com/skycoin/skywire-utilities v1.3.14/go.mod h1:yFKWpL1bDRPKU3uK+cTF4PnYUMe+eyIj5N2bk4sF5Cw= github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -179,6 +184,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/dmsg-discovery/api/api.go b/internal/dmsg-discovery/api/api.go index 8ac21b3fc..2032cc7db 100644 --- a/internal/dmsg-discovery/api/api.go +++ b/internal/dmsg-discovery/api/api.go @@ -43,6 +43,7 @@ type API struct { startedAt time.Time enableLoadTesting bool dmsgAddr string + DmsgServers []string } // New returns a new API object, which can be started as a server @@ -65,6 +66,7 @@ func New(log logrus.FieldLogger, db store.Storer, m discmetrics.Metrics, testMod enableLoadTesting: enableLoadTesting, reqsInFlightCountMiddleware: metricsutil.NewRequestsInFlightCountMiddleware(), dmsgAddr: dmsgAddr, + DmsgServers: []string{}, } r.Use(middleware.RequestID) @@ -415,9 +417,10 @@ func (a *API) getAllServers() http.HandlerFunc { func (a *API) serviceHealth(w http.ResponseWriter, r *http.Request) { info := buildinfo.Get() a.writeJSON(w, r, http.StatusOK, httputil.HealthCheckResponse{ - BuildInfo: info, - StartedAt: a.startedAt, - DmsgAddr: a.dmsgAddr, + BuildInfo: info, + StartedAt: a.startedAt, + DmsgAddr: a.dmsgAddr, + DmsgServers: a.DmsgServers, }) } diff --git a/scripts/changelog.sh b/scripts/changelog.sh new file mode 100644 index 000000000..35cdb5ffe --- /dev/null +++ b/scripts/changelog.sh @@ -0,0 +1,14 @@ +#!/usr/bin/bash +## CHANGELOG GENERATOR SCRIPT +# supply range of pull requests since last release as arguments for sequence +[[ $1 == "" ]] && cat $0 && exit +for _i in $(seq $1 $2 | tac) ; do +_merged="$(curl -s https://github.com/skycoin/dmsg/pull/${_i} | grep 'Status: Merged')" +if [[ $_merged != "" ]] ; then +_title="$(curl -s https://github.com/skycoin/dmsg/pull/${_i} | grep '')" +_title="$(curl -s https://github.com/skycoin/dmsg/pull/${_i} | grep '<title>')" +_title=${_title//"<title>"/} +_title=${_title//"by"*/} +[[ ${_title} != "" ]] && echo "- ${_title} [#${_i}](https://github.com/skycoin/dmsg/pull/${_i})" +fi +done diff --git a/vendor/github.com/ActiveState/termtest/conpty/term_other.go b/vendor/github.com/ActiveState/termtest/conpty/term_other.go index daef1c079..834d06230 100644 --- a/vendor/github.com/ActiveState/termtest/conpty/term_other.go +++ b/vendor/github.com/ActiveState/termtest/conpty/term_other.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package conpty diff --git a/vendor/github.com/ActiveState/termtest/conpty/term_windows.go b/vendor/github.com/ActiveState/termtest/conpty/term_windows.go index df091b5bb..b4d2ad00d 100644 --- a/vendor/github.com/ActiveState/termtest/conpty/term_windows.go +++ b/vendor/github.com/ActiveState/termtest/conpty/term_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package conpty diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go index a67327972..3195074fd 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winterm diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go index 6055e33b9..2f297601b 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/api.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winterm diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go index cbec8f728..644d8b2b4 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winterm diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go index 3ee06ea72..6b4b8a1ef 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winterm diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go index 244b5fa25..1298544a3 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winterm diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go index 2d27fa1d0..03ab280c1 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winterm diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go index afa7635d7..3535349f0 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winterm diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go index 2d40fb75a..1e19ea0c3 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winterm diff --git a/vendor/github.com/VictoriaMetrics/metrics/counter.go b/vendor/github.com/VictoriaMetrics/metrics/counter.go index a7d954923..dfe947794 100644 --- a/vendor/github.com/VictoriaMetrics/metrics/counter.go +++ b/vendor/github.com/VictoriaMetrics/metrics/counter.go @@ -11,9 +11,9 @@ import ( // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned counter is safe to use from concurrent goroutines. func NewCounter(name string) *Counter { @@ -65,9 +65,9 @@ func (c *Counter) marshalTo(prefix string, w io.Writer) { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned counter is safe to use from concurrent goroutines. // diff --git a/vendor/github.com/VictoriaMetrics/metrics/floatcounter.go b/vendor/github.com/VictoriaMetrics/metrics/floatcounter.go index d01dd851e..f89879099 100644 --- a/vendor/github.com/VictoriaMetrics/metrics/floatcounter.go +++ b/vendor/github.com/VictoriaMetrics/metrics/floatcounter.go @@ -11,9 +11,9 @@ import ( // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned counter is safe to use from concurrent goroutines. func NewFloatCounter(name string) *FloatCounter { @@ -70,9 +70,9 @@ func (fc *FloatCounter) marshalTo(prefix string, w io.Writer) { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned FloatCounter is safe to use from concurrent goroutines. // diff --git a/vendor/github.com/VictoriaMetrics/metrics/gauge.go b/vendor/github.com/VictoriaMetrics/metrics/gauge.go index 05bf1473f..9084fc4d7 100644 --- a/vendor/github.com/VictoriaMetrics/metrics/gauge.go +++ b/vendor/github.com/VictoriaMetrics/metrics/gauge.go @@ -11,9 +11,9 @@ import ( // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // f must be safe for concurrent calls. // @@ -53,9 +53,9 @@ func (g *Gauge) marshalTo(prefix string, w io.Writer) { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned gauge is safe to use from concurrent goroutines. // diff --git a/vendor/github.com/VictoriaMetrics/metrics/histogram.go b/vendor/github.com/VictoriaMetrics/metrics/histogram.go index b0e8d575f..a57668177 100644 --- a/vendor/github.com/VictoriaMetrics/metrics/histogram.go +++ b/vendor/github.com/VictoriaMetrics/metrics/histogram.go @@ -25,20 +25,20 @@ var bucketMultiplier = math.Pow(10, 1.0/bucketsPerDecimal) // Each bucket contains a counter for values in the given range. // Each non-empty bucket is exposed via the following metric: // -// <metric_name>_bucket{<optional_tags>,vmrange="<start>...<end>"} <counter> +// <metric_name>_bucket{<optional_tags>,vmrange="<start>...<end>"} <counter> // // Where: // -// - <metric_name> is the metric name passed to NewHistogram -// - <optional_tags> is optional tags for the <metric_name>, which are passed to NewHistogram -// - <start> and <end> - start and end values for the given bucket -// - <counter> - the number of hits to the given bucket during Update* calls +// - <metric_name> is the metric name passed to NewHistogram +// - <optional_tags> is optional tags for the <metric_name>, which are passed to NewHistogram +// - <start> and <end> - start and end values for the given bucket +// - <counter> - the number of hits to the given bucket during Update* calls // // Histogram buckets can be converted to Prometheus-like buckets with `le` labels // with `prometheus_buckets(<metric_name>_bucket)` function from PromQL extensions in VictoriaMetrics. // (see https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/MetricsQL ): // -// prometheus_buckets(request_duration_bucket) +// prometheus_buckets(request_duration_bucket) // // Time series produced by the Histogram have better compression ratio comparing to // Prometheus histogram buckets with `le` labels, since they don't include counters @@ -143,9 +143,9 @@ func (h *Histogram) VisitNonZeroBuckets(f func(vmrange string, count uint64)) { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned histogram is safe to use from concurrent goroutines. func NewHistogram(name string) *Histogram { @@ -159,9 +159,9 @@ func NewHistogram(name string) *Histogram { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned histogram is safe to use from concurrent goroutines. // diff --git a/vendor/github.com/VictoriaMetrics/metrics/metrics.go b/vendor/github.com/VictoriaMetrics/metrics/metrics.go index c28c03613..82749f2d5 100644 --- a/vendor/github.com/VictoriaMetrics/metrics/metrics.go +++ b/vendor/github.com/VictoriaMetrics/metrics/metrics.go @@ -5,9 +5,9 @@ // // Usage: // -// 1. Register the required metrics via New* functions. -// 2. Expose them to `/metrics` page via WritePrometheus. -// 3. Update the registered metrics during application lifetime. +// 1. Register the required metrics via New* functions. +// 2. Expose them to `/metrics` page via WritePrometheus. +// 3. Update the registered metrics during application lifetime. // // The package has been extracted from https://victoriametrics.com/ package metrics @@ -34,10 +34,9 @@ var defaultSet = NewSet() // // The WritePrometheus func is usually called inside "/metrics" handler: // -// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) { -// metrics.WritePrometheus(w, true) -// }) -// +// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) { +// metrics.WritePrometheus(w, true) +// }) func WritePrometheus(w io.Writer, exposeProcessMetrics bool) { defaultSet.WritePrometheus(w) if exposeProcessMetrics { @@ -50,50 +49,81 @@ func WritePrometheus(w io.Writer, exposeProcessMetrics bool) { // The following `go_*` and `process_*` metrics are exposed for the currently // running process. Below is a short description for the exposed `process_*` metrics: // -// - process_cpu_seconds_system_total - CPU time spent in syscalls -// - process_cpu_seconds_user_total - CPU time spent in userspace -// - process_cpu_seconds_total - CPU time spent by the process -// - process_major_pagefaults_total - page faults resulted in disk IO -// - process_minor_pagefaults_total - page faults resolved without disk IO -// - process_resident_memory_bytes - recently accessed memory (aka RSS or resident memory) -// - process_resident_memory_peak_bytes - the maximum RSS memory usage -// - process_resident_memory_anon_bytes - RSS for memory-mapped files -// - process_resident_memory_file_bytes - RSS for memory allocated by the process -// - process_resident_memory_shared_bytes - RSS for memory shared between multiple processes -// - process_virtual_memory_bytes - virtual memory usage -// - process_virtual_memory_peak_bytes - the maximum virtual memory usage -// - process_num_threads - the number of threads -// - process_start_time_seconds - process start time as unix timestamp -// -// - process_io_read_bytes_total - the number of bytes read via syscalls -// - process_io_written_bytes_total - the number of bytes written via syscalls -// - process_io_read_syscalls_total - the number of read syscalls -// - process_io_write_syscalls_total - the number of write syscalls -// - process_io_storage_read_bytes_total - the number of bytes actually read from disk -// - process_io_storage_written_bytes_total - the number of bytes actually written to disk -// -// - go_memstats_alloc_bytes - memory usage for Go objects in the heap -// - go_memstats_alloc_bytes_total - the cumulative counter for total size of allocated Go objects -// - go_memstats_frees_total - the cumulative counter for number of freed Go objects -// - go_memstats_gc_cpu_fraction - the fraction of CPU spent in Go garbage collector -// - go_memstats_gc_sys_bytes - the size of Go garbage collector metadata -// - go_memstats_heap_alloc_bytes - the same as go_memstats_alloc_bytes -// - go_memstats_heap_idle_bytes - idle memory ready for new Go object allocations -// - go_memstats_heap_objects - the number of Go objects in the heap -// - go_memstats_heap_sys_bytes - memory requested for Go objects from the OS -// - go_memstats_mallocs_total - the number of allocations for Go objects -// - go_memstats_next_gc_bytes - the target heap size when the next garbage collection should start -// - go_memstats_stack_inuse_bytes - memory used for goroutine stacks -// - go_memstats_stack_sys_bytes - memory requested fromthe OS for goroutine stacks -// - go_memstats_sys_bytes - memory requested by Go runtime from the OS +// - process_cpu_seconds_system_total - CPU time spent in syscalls +// +// - process_cpu_seconds_user_total - CPU time spent in userspace +// +// - process_cpu_seconds_total - CPU time spent by the process +// +// - process_major_pagefaults_total - page faults resulted in disk IO +// +// - process_minor_pagefaults_total - page faults resolved without disk IO +// +// - process_resident_memory_bytes - recently accessed memory (aka RSS or resident memory) +// +// - process_resident_memory_peak_bytes - the maximum RSS memory usage +// +// - process_resident_memory_anon_bytes - RSS for memory-mapped files +// +// - process_resident_memory_file_bytes - RSS for memory allocated by the process +// +// - process_resident_memory_shared_bytes - RSS for memory shared between multiple processes +// +// - process_virtual_memory_bytes - virtual memory usage +// +// - process_virtual_memory_peak_bytes - the maximum virtual memory usage +// +// - process_num_threads - the number of threads +// +// - process_start_time_seconds - process start time as unix timestamp +// +// - process_io_read_bytes_total - the number of bytes read via syscalls +// +// - process_io_written_bytes_total - the number of bytes written via syscalls +// +// - process_io_read_syscalls_total - the number of read syscalls +// +// - process_io_write_syscalls_total - the number of write syscalls +// +// - process_io_storage_read_bytes_total - the number of bytes actually read from disk +// +// - process_io_storage_written_bytes_total - the number of bytes actually written to disk +// +// - go_memstats_alloc_bytes - memory usage for Go objects in the heap +// +// - go_memstats_alloc_bytes_total - the cumulative counter for total size of allocated Go objects +// +// - go_memstats_frees_total - the cumulative counter for number of freed Go objects +// +// - go_memstats_gc_cpu_fraction - the fraction of CPU spent in Go garbage collector +// +// - go_memstats_gc_sys_bytes - the size of Go garbage collector metadata +// +// - go_memstats_heap_alloc_bytes - the same as go_memstats_alloc_bytes +// +// - go_memstats_heap_idle_bytes - idle memory ready for new Go object allocations +// +// - go_memstats_heap_objects - the number of Go objects in the heap +// +// - go_memstats_heap_sys_bytes - memory requested for Go objects from the OS +// +// - go_memstats_mallocs_total - the number of allocations for Go objects +// +// - go_memstats_next_gc_bytes - the target heap size when the next garbage collection should start +// +// - go_memstats_stack_inuse_bytes - memory used for goroutine stacks +// +// - go_memstats_stack_sys_bytes - memory requested fromthe OS for goroutine stacks +// +// - go_memstats_sys_bytes - memory requested by Go runtime from the OS // // The WriteProcessMetrics func is usually called in combination with writing Set metrics // inside "/metrics" handler: // -// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) { -// mySet.WritePrometheus(w) -// metrics.WriteProcessMetrics(w) -// }) +// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) { +// mySet.WritePrometheus(w) +// metrics.WriteProcessMetrics(w) +// }) // // See also WrteFDMetrics. func WriteProcessMetrics(w io.Writer) { diff --git a/vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go b/vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go index 5e6ac935d..ca7167f80 100644 --- a/vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go +++ b/vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package metrics diff --git a/vendor/github.com/VictoriaMetrics/metrics/set.go b/vendor/github.com/VictoriaMetrics/metrics/set.go index ae55bb71c..c127ff973 100644 --- a/vendor/github.com/VictoriaMetrics/metrics/set.go +++ b/vendor/github.com/VictoriaMetrics/metrics/set.go @@ -58,9 +58,9 @@ func (s *Set) WritePrometheus(w io.Writer) { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned histogram is safe to use from concurrent goroutines. func (s *Set) NewHistogram(name string) *Histogram { @@ -75,9 +75,9 @@ func (s *Set) NewHistogram(name string) *Histogram { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned histogram is safe to use from concurrent goroutines. // @@ -116,9 +116,9 @@ func (s *Set) GetOrCreateHistogram(name string) *Histogram { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned counter is safe to use from concurrent goroutines. func (s *Set) NewCounter(name string) *Counter { @@ -133,9 +133,9 @@ func (s *Set) NewCounter(name string) *Counter { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned counter is safe to use from concurrent goroutines. // @@ -174,9 +174,9 @@ func (s *Set) GetOrCreateCounter(name string) *Counter { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned FloatCounter is safe to use from concurrent goroutines. func (s *Set) NewFloatCounter(name string) *FloatCounter { @@ -191,9 +191,9 @@ func (s *Set) NewFloatCounter(name string) *FloatCounter { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned FloatCounter is safe to use from concurrent goroutines. // @@ -233,9 +233,9 @@ func (s *Set) GetOrCreateFloatCounter(name string) *FloatCounter { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // f must be safe for concurrent calls. // @@ -257,9 +257,9 @@ func (s *Set) NewGauge(name string, f func() float64) *Gauge { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned gauge is safe to use from concurrent goroutines. // @@ -303,9 +303,9 @@ func (s *Set) GetOrCreateGauge(name string, f func() float64) *Gauge { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned summary is safe to use from concurrent goroutines. func (s *Set) NewSummary(name string) *Summary { @@ -318,9 +318,9 @@ func (s *Set) NewSummary(name string) *Summary { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned summary is safe to use from concurrent goroutines. func (s *Set) NewSummaryExt(name string, window time.Duration, quantiles []float64) *Summary { @@ -347,9 +347,9 @@ func (s *Set) NewSummaryExt(name string, window time.Duration, quantiles []float // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned summary is safe to use from concurrent goroutines. // @@ -365,9 +365,9 @@ func (s *Set) GetOrCreateSummary(name string) *Summary { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned summary is safe to use from concurrent goroutines. // diff --git a/vendor/github.com/VictoriaMetrics/metrics/summary.go b/vendor/github.com/VictoriaMetrics/metrics/summary.go index 0f01e9ae1..52183d22b 100644 --- a/vendor/github.com/VictoriaMetrics/metrics/summary.go +++ b/vendor/github.com/VictoriaMetrics/metrics/summary.go @@ -36,9 +36,9 @@ type Summary struct { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned summary is safe to use from concurrent goroutines. func NewSummary(name string) *Summary { @@ -51,9 +51,9 @@ func NewSummary(name string) *Summary { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned summary is safe to use from concurrent goroutines. func NewSummaryExt(name string, window time.Duration, quantiles []float64) *Summary { @@ -140,9 +140,9 @@ func (sm *Summary) updateQuantiles() { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned summary is safe to use from concurrent goroutines. // @@ -158,9 +158,9 @@ func GetOrCreateSummary(name string) *Summary { // name must be valid Prometheus-compatible metric with possible labels. // For instance, // -// * foo -// * foo{bar="baz"} -// * foo{bar="baz",aaa="b"} +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} // // The returned summary is safe to use from concurrent goroutines. // diff --git a/vendor/github.com/bytedance/sonic/api.go b/vendor/github.com/bytedance/sonic/api.go index a042476f1..5aec591db 100644 --- a/vendor/github.com/bytedance/sonic/api.go +++ b/vendor/github.com/bytedance/sonic/api.go @@ -17,170 +17,168 @@ package sonic import ( - `io` + "io" - `github.com/bytedance/sonic/ast` + "github.com/bytedance/sonic/ast" ) // Config is a combination of sonic/encoder.Options and sonic/decoder.Options type Config struct { - // EscapeHTML indicates encoder to escape all HTML characters - // after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape). - // WARNING: This hurts performance A LOT, USE WITH CARE. - EscapeHTML bool - - // SortMapKeys indicates encoder that the keys of a map needs to be sorted - // before serializing into JSON. - // WARNING: This hurts performance A LOT, USE WITH CARE. - SortMapKeys bool - - // CompactMarshaler indicates encoder that the output JSON from json.Marshaler - // is always compact and needs no validation - CompactMarshaler bool - - // NoQuoteTextMarshaler indicates encoder that the output text from encoding.TextMarshaler - // is always escaped string and needs no quoting - NoQuoteTextMarshaler bool - - // NoNullSliceOrMap indicates encoder that all empty Array or Object are encoded as '[]' or '{}', - // instead of 'null' - NoNullSliceOrMap bool - - // UseInt64 indicates decoder to unmarshal an integer into an interface{} as an - // int64 instead of as a float64. - UseInt64 bool - - // UseNumber indicates decoder to unmarshal a number into an interface{} as a - // json.Number instead of as a float64. - UseNumber bool - - // UseUnicodeErrors indicates decoder to return an error when encounter invalid - // UTF-8 escape sequences. - UseUnicodeErrors bool - - // DisallowUnknownFields indicates decoder to return an error when the destination - // is a struct and the input contains object keys which do not match any - // non-ignored, exported fields in the destination. - DisallowUnknownFields bool - - // CopyString indicates decoder to decode string values by copying instead of referring. - CopyString bool - - // ValidateString indicates decoder and encoder to valid string values: decoder will return errors - // when unescaped control chars(\u0000-\u001f) in the string value of JSON. - ValidateString bool + // EscapeHTML indicates encoder to escape all HTML characters + // after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape). + // WARNING: This hurts performance A LOT, USE WITH CARE. + EscapeHTML bool + + // SortMapKeys indicates encoder that the keys of a map needs to be sorted + // before serializing into JSON. + // WARNING: This hurts performance A LOT, USE WITH CARE. + SortMapKeys bool + + // CompactMarshaler indicates encoder that the output JSON from json.Marshaler + // is always compact and needs no validation + CompactMarshaler bool + + // NoQuoteTextMarshaler indicates encoder that the output text from encoding.TextMarshaler + // is always escaped string and needs no quoting + NoQuoteTextMarshaler bool + + // NoNullSliceOrMap indicates encoder that all empty Array or Object are encoded as '[]' or '{}', + // instead of 'null' + NoNullSliceOrMap bool + + // UseInt64 indicates decoder to unmarshal an integer into an interface{} as an + // int64 instead of as a float64. + UseInt64 bool + + // UseNumber indicates decoder to unmarshal a number into an interface{} as a + // json.Number instead of as a float64. + UseNumber bool + + // UseUnicodeErrors indicates decoder to return an error when encounter invalid + // UTF-8 escape sequences. + UseUnicodeErrors bool + + // DisallowUnknownFields indicates decoder to return an error when the destination + // is a struct and the input contains object keys which do not match any + // non-ignored, exported fields in the destination. + DisallowUnknownFields bool + + // CopyString indicates decoder to decode string values by copying instead of referring. + CopyString bool + + // ValidateString indicates decoder and encoder to valid string values: decoder will return errors + // when unescaped control chars(\u0000-\u001f) in the string value of JSON. + ValidateString bool } - + var ( - // ConfigDefault is the default config of APIs, aiming at efficiency and safty. - ConfigDefault = Config{}.Froze() - - // ConfigStd is the standard config of APIs, aiming at being compatible with encoding/json. - ConfigStd = Config{ - EscapeHTML : true, - SortMapKeys: true, - CompactMarshaler: true, - CopyString : true, - ValidateString : true, - }.Froze() - - // ConfigFastest is the fastest config of APIs, aiming at speed. - ConfigFastest = Config{ - NoQuoteTextMarshaler: true, - }.Froze() + // ConfigDefault is the default config of APIs, aiming at efficiency and safty. + ConfigDefault = Config{}.Froze() + + // ConfigStd is the standard config of APIs, aiming at being compatible with encoding/json. + ConfigStd = Config{ + EscapeHTML: true, + SortMapKeys: true, + CompactMarshaler: true, + CopyString: true, + ValidateString: true, + }.Froze() + + // ConfigFastest is the fastest config of APIs, aiming at speed. + ConfigFastest = Config{ + NoQuoteTextMarshaler: true, + }.Froze() ) - - + // API is a binding of specific config. // This interface is inspired by github.com/json-iterator/go, // and has same behaviors under equavilent config. type API interface { - // MarshalToString returns the JSON encoding string of v - MarshalToString(v interface{}) (string, error) - // Marshal returns the JSON encoding bytes of v. - Marshal(v interface{}) ([]byte, error) - // MarshalIndent returns the JSON encoding bytes with indent and prefix. - MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) - // UnmarshalFromString parses the JSON-encoded bytes and stores the result in the value pointed to by v. - UnmarshalFromString(str string, v interface{}) error - // Unmarshal parses the JSON-encoded string and stores the result in the value pointed to by v. - Unmarshal(data []byte, v interface{}) error - // NewEncoder create a Encoder holding writer - NewEncoder(writer io.Writer) Encoder - // NewDecoder create a Decoder holding reader - NewDecoder(reader io.Reader) Decoder - // Valid validates the JSON-encoded bytes and reportes if it is valid - Valid(data []byte) bool + // MarshalToString returns the JSON encoding string of v + MarshalToString(v interface{}) (string, error) + // Marshal returns the JSON encoding bytes of v. + Marshal(v interface{}) ([]byte, error) + // MarshalIndent returns the JSON encoding bytes with indent and prefix. + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + // UnmarshalFromString parses the JSON-encoded bytes and stores the result in the value pointed to by v. + UnmarshalFromString(str string, v interface{}) error + // Unmarshal parses the JSON-encoded string and stores the result in the value pointed to by v. + Unmarshal(data []byte, v interface{}) error + // NewEncoder create a Encoder holding writer + NewEncoder(writer io.Writer) Encoder + // NewDecoder create a Decoder holding reader + NewDecoder(reader io.Reader) Decoder + // Valid validates the JSON-encoded bytes and reportes if it is valid + Valid(data []byte) bool } // Encoder encodes JSON into io.Writer type Encoder interface { - // Encode writes the JSON encoding of v to the stream, followed by a newline character. - Encode(val interface{}) error - // SetEscapeHTML specifies whether problematic HTML characters - // should be escaped inside JSON quoted strings. - // The default behavior NOT ESCAPE - SetEscapeHTML(on bool) - // SetIndent instructs the encoder to format each subsequent encoded value - // as if indented by the package-level function Indent(dst, src, prefix, indent). - // Calling SetIndent("", "") disables indentation - SetIndent(prefix, indent string) + // Encode writes the JSON encoding of v to the stream, followed by a newline character. + Encode(val interface{}) error + // SetEscapeHTML specifies whether problematic HTML characters + // should be escaped inside JSON quoted strings. + // The default behavior NOT ESCAPE + SetEscapeHTML(on bool) + // SetIndent instructs the encoder to format each subsequent encoded value + // as if indented by the package-level function Indent(dst, src, prefix, indent). + // Calling SetIndent("", "") disables indentation + SetIndent(prefix, indent string) } // Decoder decodes JSON from io.Read type Decoder interface { - // Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. - Decode(val interface{}) error - // Buffered returns a reader of the data remaining in the Decoder's buffer. - // The reader is valid until the next call to Decode. - Buffered() io.Reader - // DisallowUnknownFields causes the Decoder to return an error when the destination is a struct - // and the input contains object keys which do not match any non-ignored, exported fields in the destination. - DisallowUnknownFields() - // More reports whether there is another element in the current array or object being parsed. - More() bool - // UseNumber causes the Decoder to unmarshal a number into an interface{} as a Number instead of as a float64. - UseNumber() + // Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. + Decode(val interface{}) error + // Buffered returns a reader of the data remaining in the Decoder's buffer. + // The reader is valid until the next call to Decode. + Buffered() io.Reader + // DisallowUnknownFields causes the Decoder to return an error when the destination is a struct + // and the input contains object keys which do not match any non-ignored, exported fields in the destination. + DisallowUnknownFields() + // More reports whether there is another element in the current array or object being parsed. + More() bool + // UseNumber causes the Decoder to unmarshal a number into an interface{} as a Number instead of as a float64. + UseNumber() } // Marshal returns the JSON encoding bytes of v. func Marshal(val interface{}) ([]byte, error) { - return ConfigDefault.Marshal(val) + return ConfigDefault.Marshal(val) } // MarshalString returns the JSON encoding string of v. func MarshalString(val interface{}) (string, error) { - return ConfigDefault.MarshalToString(val) + return ConfigDefault.MarshalToString(val) } // Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. // NOTICE: This API copies given buffer by default, // if you want to pass JSON more efficiently, use UnmarshalString instead. func Unmarshal(buf []byte, val interface{}) error { - return ConfigDefault.Unmarshal(buf, val) + return ConfigDefault.Unmarshal(buf, val) } // UnmarshalString is like Unmarshal, except buf is a string. func UnmarshalString(buf string, val interface{}) error { - return ConfigDefault.UnmarshalFromString(buf, val) + return ConfigDefault.UnmarshalFromString(buf, val) } // Get searches the given path from json, // and returns its representing ast.Node. // // Each path arg must be integer or string: -// - Integer is target index(>=0), means searching current node as array. -// - String is target key, means searching current node as object. +// - Integer is target index(>=0), means searching current node as array. +// - String is target key, means searching current node as object. // -// // Note, the api expects the json is well-formed at least, // otherwise it may return unexpected result. func Get(src []byte, path ...interface{}) (ast.Node, error) { - return GetFromString(string(src), path...) + return GetFromString(string(src), path...) } // GetFromString is same with Get except src is string, // which can reduce unnecessary memory copy. func GetFromString(src string, path ...interface{}) (ast.Node, error) { - return ast.NewSearcher(src).GetByPath(path...) -} \ No newline at end of file + return ast.NewSearcher(src).GetByPath(path...) +} diff --git a/vendor/github.com/bytedance/sonic/ast/api_amd64.go b/vendor/github.com/bytedance/sonic/ast/api_amd64.go index 3047f59c3..ace80c0c2 100644 --- a/vendor/github.com/bytedance/sonic/ast/api_amd64.go +++ b/vendor/github.com/bytedance/sonic/ast/api_amd64.go @@ -1,3 +1,4 @@ +//go:build amd64 && go1.15 && !go1.21 // +build amd64,go1.15,!go1.21 /* @@ -19,133 +20,133 @@ package ast import ( - `runtime` - `unsafe` - - `github.com/bytedance/sonic/encoder` - `github.com/bytedance/sonic/internal/native` - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/internal/rt` - uq `github.com/bytedance/sonic/unquote` - `github.com/chenzhuoyu/base64x` + "runtime" + "unsafe" + + "github.com/bytedance/sonic/encoder" + "github.com/bytedance/sonic/internal/native" + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/internal/rt" + uq "github.com/bytedance/sonic/unquote" + "github.com/chenzhuoyu/base64x" ) var typeByte = rt.UnpackEface(byte(0)).Type //go:nocheckptr func quote(buf *[]byte, val string) { - *buf = append(*buf, '"') - if len(val) == 0 { - *buf = append(*buf, '"') - return - } - - sp := rt.IndexChar(val, 0) - nb := len(val) - b := (*rt.GoSlice)(unsafe.Pointer(buf)) - - // input buffer - for nb > 0 { - // output buffer - dp := unsafe.Pointer(uintptr(b.Ptr) + uintptr(b.Len)) - dn := b.Cap - b.Len - // call native.Quote, dn is byte count it outputs - ret := native.Quote(sp, nb, dp, &dn, 0) - // update *buf length - b.Len += dn - - // no need more output - if ret >= 0 { - break - } - - // double buf size - *b = growslice(typeByte, *b, b.Cap*2) - // ret is the complement of consumed input - ret = ^ret - // update input buffer - nb -= ret - sp = unsafe.Pointer(uintptr(sp) + uintptr(ret)) - } - - runtime.KeepAlive(buf) - runtime.KeepAlive(sp) - *buf = append(*buf, '"') + *buf = append(*buf, '"') + if len(val) == 0 { + *buf = append(*buf, '"') + return + } + + sp := rt.IndexChar(val, 0) + nb := len(val) + b := (*rt.GoSlice)(unsafe.Pointer(buf)) + + // input buffer + for nb > 0 { + // output buffer + dp := unsafe.Pointer(uintptr(b.Ptr) + uintptr(b.Len)) + dn := b.Cap - b.Len + // call native.Quote, dn is byte count it outputs + ret := native.Quote(sp, nb, dp, &dn, 0) + // update *buf length + b.Len += dn + + // no need more output + if ret >= 0 { + break + } + + // double buf size + *b = growslice(typeByte, *b, b.Cap*2) + // ret is the complement of consumed input + ret = ^ret + // update input buffer + nb -= ret + sp = unsafe.Pointer(uintptr(sp) + uintptr(ret)) + } + + runtime.KeepAlive(buf) + runtime.KeepAlive(sp) + *buf = append(*buf, '"') } func unquote(src string) (string, types.ParsingError) { - return uq.String(src) + return uq.String(src) } func decodeBase64(src string) ([]byte, error) { - return base64x.StdEncoding.DecodeString(src) + return base64x.StdEncoding.DecodeString(src) } func encodeBase64(src []byte) string { - return base64x.StdEncoding.EncodeToString(src) + return base64x.StdEncoding.EncodeToString(src) } func (self *Parser) decodeValue() (val types.JsonState) { - sv := (*rt.GoString)(unsafe.Pointer(&self.s)) - self.p = native.Value(sv.Ptr, sv.Len, self.p, &val, 0) - return + sv := (*rt.GoString)(unsafe.Pointer(&self.s)) + self.p = native.Value(sv.Ptr, sv.Len, self.p, &val, 0) + return } func (self *Parser) skip() (int, types.ParsingError) { - fsm := types.NewStateMachine() - start := native.SkipOne(&self.s, &self.p, fsm, 0) - types.FreeStateMachine(fsm) - - if start < 0 { - return self.p, types.ParsingError(-start) - } - return start, 0 + fsm := types.NewStateMachine() + start := native.SkipOne(&self.s, &self.p, fsm, 0) + types.FreeStateMachine(fsm) + + if start < 0 { + return self.p, types.ParsingError(-start) + } + return start, 0 } func (self *Node) encodeInterface(buf *[]byte) error { - //WARN: NOT compatible with json.Encoder - return encoder.EncodeInto(buf, self.packAny(), 0) + //WARN: NOT compatible with json.Encoder + return encoder.EncodeInto(buf, self.packAny(), 0) } func (self *Parser) skipFast() (int, types.ParsingError) { - start := native.SkipOneFast(&self.s, &self.p) - if start < 0 { - return self.p, types.ParsingError(-start) - } - return start, 0 + start := native.SkipOneFast(&self.s, &self.p) + if start < 0 { + return self.p, types.ParsingError(-start) + } + return start, 0 } func (self *Parser) getByPath(path ...interface{}) (int, types.ParsingError) { - fsm := types.NewStateMachine() - start := native.GetByPath(&self.s, &self.p, &path, fsm) - types.FreeStateMachine(fsm) - runtime.KeepAlive(path) - if start < 0 { - return self.p, types.ParsingError(-start) - } - return start, 0 + fsm := types.NewStateMachine() + start := native.GetByPath(&self.s, &self.p, &path, fsm) + types.FreeStateMachine(fsm) + runtime.KeepAlive(path) + if start < 0 { + return self.p, types.ParsingError(-start) + } + return start, 0 } func (self *Searcher) GetByPath(path ...interface{}) (Node, error) { - var err types.ParsingError - var start int - - self.parser.p = 0 - start, err = self.parser.getByPath(path...) - if err != 0 { - // for compatibility with old version - if err == types.ERR_NOT_FOUND { - return Node{}, ErrNotExist - } - if err == types.ERR_UNSUPPORT_TYPE { - panic("path must be either int(>=0) or string") - } - return Node{}, self.parser.syntaxError(err) - } - - t := switchRawType(self.parser.s[start]) - if t == _V_NONE { - return Node{}, self.parser.ExportError(err) - } - return newRawNode(self.parser.s[start:self.parser.p], t), nil -} \ No newline at end of file + var err types.ParsingError + var start int + + self.parser.p = 0 + start, err = self.parser.getByPath(path...) + if err != 0 { + // for compatibility with old version + if err == types.ERR_NOT_FOUND { + return Node{}, ErrNotExist + } + if err == types.ERR_UNSUPPORT_TYPE { + panic("path must be either int(>=0) or string") + } + return Node{}, self.parser.syntaxError(err) + } + + t := switchRawType(self.parser.s[start]) + if t == _V_NONE { + return Node{}, self.parser.ExportError(err) + } + return newRawNode(self.parser.s[start:self.parser.p], t), nil +} diff --git a/vendor/github.com/bytedance/sonic/ast/api_compat.go b/vendor/github.com/bytedance/sonic/ast/api_compat.go index b18b5ae8c..831d7eff2 100644 --- a/vendor/github.com/bytedance/sonic/ast/api_compat.go +++ b/vendor/github.com/bytedance/sonic/ast/api_compat.go @@ -1,3 +1,4 @@ +//go:build !amd64 || go1.21 // +build !amd64 go1.21 /* @@ -19,102 +20,102 @@ package ast import ( - `encoding/base64` - `encoding/json` - `fmt` + "encoding/base64" + "encoding/json" + "fmt" - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/internal/rt" ) func quote(buf *[]byte, val string) { - quoteString(buf, val) + quoteString(buf, val) } func unquote(src string) (string, types.ParsingError) { - sp := rt.IndexChar(src, -1) - out, ok := unquoteBytes(rt.BytesFrom(sp, len(src)+2, len(src)+2)) - if !ok { - return "", types.ERR_INVALID_ESCAPE - } - return rt.Mem2Str(out), 0 + sp := rt.IndexChar(src, -1) + out, ok := unquoteBytes(rt.BytesFrom(sp, len(src)+2, len(src)+2)) + if !ok { + return "", types.ERR_INVALID_ESCAPE + } + return rt.Mem2Str(out), 0 } func decodeBase64(src string) ([]byte, error) { - return base64.StdEncoding.DecodeString(src) + return base64.StdEncoding.DecodeString(src) } func encodeBase64(src []byte) string { - return base64.StdEncoding.EncodeToString(src) + return base64.StdEncoding.EncodeToString(src) } func (self *Parser) decodeValue() (val types.JsonState) { - e, v := decodeValue(self.s, self.p) - if e < 0 { - return v - } - self.p = e - return v + e, v := decodeValue(self.s, self.p) + if e < 0 { + return v + } + self.p = e + return v } func (self *Parser) skip() (int, types.ParsingError) { - e, s := skipValue(self.s, self.p) - if e < 0 { - return self.p, types.ParsingError(-e) - } - self.p = e - return s, 0 + e, s := skipValue(self.s, self.p) + if e < 0 { + return self.p, types.ParsingError(-e) + } + self.p = e + return s, 0 } func (self *Parser) skipFast() (int, types.ParsingError) { - e, s := skipValueFast(self.s, self.p) - if e < 0 { - return self.p, types.ParsingError(-e) - } - self.p = e - return s, 0 + e, s := skipValueFast(self.s, self.p) + if e < 0 { + return self.p, types.ParsingError(-e) + } + self.p = e + return s, 0 } func (self *Node) encodeInterface(buf *[]byte) error { - out, err := json.Marshal(self.packAny()) - if err != nil { - return err - } - *buf = append(*buf, out...) - return nil + out, err := json.Marshal(self.packAny()) + if err != nil { + return err + } + *buf = append(*buf, out...) + return nil } func (self *Searcher) GetByPath(path ...interface{}) (Node, error) { - self.parser.p = 0 - - var err types.ParsingError - for _, p := range path { - if idx, ok := p.(int); ok && idx >= 0 { - if err = self.parser.searchIndex(idx); err != 0 { - return Node{}, self.parser.ExportError(err) - } - } else if key, ok := p.(string); ok { - if err = self.parser.searchKey(key); err != 0 { - return Node{}, self.parser.ExportError(err) - } - } else { - panic("path must be either int(>=0) or string") - } - } - - var start = self.parser.p - if start, err = self.parser.skip(); err != 0 { - return Node{}, self.parser.ExportError(err) - } - ns := len(self.parser.s) - if self.parser.p > ns || start >= ns || start>=self.parser.p { - return Node{}, fmt.Errorf("skip %d char out of json boundary", start) - } - - t := switchRawType(self.parser.s[start]) - if t == _V_NONE { - return Node{}, self.parser.ExportError(err) - } - - return newRawNode(self.parser.s[start:self.parser.p], t), nil -} \ No newline at end of file + self.parser.p = 0 + + var err types.ParsingError + for _, p := range path { + if idx, ok := p.(int); ok && idx >= 0 { + if err = self.parser.searchIndex(idx); err != 0 { + return Node{}, self.parser.ExportError(err) + } + } else if key, ok := p.(string); ok { + if err = self.parser.searchKey(key); err != 0 { + return Node{}, self.parser.ExportError(err) + } + } else { + panic("path must be either int(>=0) or string") + } + } + + var start = self.parser.p + if start, err = self.parser.skip(); err != 0 { + return Node{}, self.parser.ExportError(err) + } + ns := len(self.parser.s) + if self.parser.p > ns || start >= ns || start >= self.parser.p { + return Node{}, fmt.Errorf("skip %d char out of json boundary", start) + } + + t := switchRawType(self.parser.s[start]) + if t == _V_NONE { + return Node{}, self.parser.ExportError(err) + } + + return newRawNode(self.parser.s[start:self.parser.p], t), nil +} diff --git a/vendor/github.com/bytedance/sonic/ast/decode.go b/vendor/github.com/bytedance/sonic/ast/decode.go index 6a5f6fea3..790a915fc 100644 --- a/vendor/github.com/bytedance/sonic/ast/decode.go +++ b/vendor/github.com/bytedance/sonic/ast/decode.go @@ -17,559 +17,559 @@ package ast import ( - `encoding/base64` - `runtime` - `strconv` - `unsafe` + "encoding/base64" + "runtime" + "strconv" + "unsafe" - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/internal/rt" ) const _blankCharsMask = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n') const ( - bytesNull = "null" - bytesTrue = "true" - bytesFalse = "false" - bytesObject = "{}" - bytesArray = "[]" + bytesNull = "null" + bytesTrue = "true" + bytesFalse = "false" + bytesObject = "{}" + bytesArray = "[]" ) func isSpace(c byte) bool { - return (int(1<<c) & _blankCharsMask) != 0 + return (int(1<<c) & _blankCharsMask) != 0 } //go:nocheckptr func skipBlank(src string, pos int) int { - se := uintptr(rt.IndexChar(src, len(src))) - sp := uintptr(rt.IndexChar(src, pos)) - - for sp < se { - if !isSpace(*(*byte)(unsafe.Pointer(sp))) { - break - } - sp += 1 - } - if sp >= se { - return -int(types.ERR_EOF) - } - runtime.KeepAlive(src) - return int(sp - uintptr(rt.IndexChar(src, 0))) + se := uintptr(rt.IndexChar(src, len(src))) + sp := uintptr(rt.IndexChar(src, pos)) + + for sp < se { + if !isSpace(*(*byte)(unsafe.Pointer(sp))) { + break + } + sp += 1 + } + if sp >= se { + return -int(types.ERR_EOF) + } + runtime.KeepAlive(src) + return int(sp - uintptr(rt.IndexChar(src, 0))) } func decodeNull(src string, pos int) (ret int) { - ret = pos + 4 - if ret > len(src) { - return -int(types.ERR_EOF) - } - if src[pos:ret] == bytesNull { - return ret - } else { - return -int(types.ERR_INVALID_CHAR) - } + ret = pos + 4 + if ret > len(src) { + return -int(types.ERR_EOF) + } + if src[pos:ret] == bytesNull { + return ret + } else { + return -int(types.ERR_INVALID_CHAR) + } } func decodeTrue(src string, pos int) (ret int) { - ret = pos + 4 - if ret > len(src) { - return -int(types.ERR_EOF) - } - if src[pos:ret] == bytesTrue { - return ret - } else { - return -int(types.ERR_INVALID_CHAR) - } + ret = pos + 4 + if ret > len(src) { + return -int(types.ERR_EOF) + } + if src[pos:ret] == bytesTrue { + return ret + } else { + return -int(types.ERR_INVALID_CHAR) + } } func decodeFalse(src string, pos int) (ret int) { - ret = pos + 5 - if ret > len(src) { - return -int(types.ERR_EOF) - } - if src[pos:ret] == bytesFalse { - return ret - } - return -int(types.ERR_INVALID_CHAR) + ret = pos + 5 + if ret > len(src) { + return -int(types.ERR_EOF) + } + if src[pos:ret] == bytesFalse { + return ret + } + return -int(types.ERR_INVALID_CHAR) } //go:nocheckptr func decodeString(src string, pos int) (ret int, v string) { - ret, ep := skipString(src, pos) - if ep == -1 { - (*rt.GoString)(unsafe.Pointer(&v)).Ptr = rt.IndexChar(src, pos+1) - (*rt.GoString)(unsafe.Pointer(&v)).Len = ret - pos - 2 - return ret, v - } - - vv, ok := unquoteBytes(rt.Str2Mem(src[pos:ret])) - if !ok { - return -int(types.ERR_INVALID_CHAR), "" - } - - runtime.KeepAlive(src) - return ret, rt.Mem2Str(vv) + ret, ep := skipString(src, pos) + if ep == -1 { + (*rt.GoString)(unsafe.Pointer(&v)).Ptr = rt.IndexChar(src, pos+1) + (*rt.GoString)(unsafe.Pointer(&v)).Len = ret - pos - 2 + return ret, v + } + + vv, ok := unquoteBytes(rt.Str2Mem(src[pos:ret])) + if !ok { + return -int(types.ERR_INVALID_CHAR), "" + } + + runtime.KeepAlive(src) + return ret, rt.Mem2Str(vv) } func decodeBinary(src string, pos int) (ret int, v []byte) { - var vv string - ret, vv = decodeString(src, pos) - if ret < 0 { - return ret, nil - } - var err error - v, err = base64.StdEncoding.DecodeString(vv) - if err != nil { - return -int(types.ERR_INVALID_CHAR), nil - } - return ret, v + var vv string + ret, vv = decodeString(src, pos) + if ret < 0 { + return ret, nil + } + var err error + v, err = base64.StdEncoding.DecodeString(vv) + if err != nil { + return -int(types.ERR_INVALID_CHAR), nil + } + return ret, v } func isDigit(c byte) bool { - return c >= '0' && c <= '9' + return c >= '0' && c <= '9' } //go:nocheckptr func decodeInt64(src string, pos int) (ret int, v int64, err error) { - sp := uintptr(rt.IndexChar(src, pos)) - ss := uintptr(sp) - se := uintptr(rt.IndexChar(src, len(src))) - if uintptr(sp) >= se { - return -int(types.ERR_EOF), 0, nil - } - - if c := *(*byte)(unsafe.Pointer(sp)); c == '-' { - sp += 1 - } - if sp == se { - return -int(types.ERR_EOF), 0, nil - } - - for ; sp < se; sp += uintptr(1) { - if !isDigit(*(*byte)(unsafe.Pointer(sp))) { - break - } - } - - if sp < se { - if c := *(*byte)(unsafe.Pointer(sp)); c == '.' || c == 'e' || c == 'E' { - return -int(types.ERR_INVALID_NUMBER_FMT), 0, nil - } - } - - var vv string - ret = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) - (*rt.GoString)(unsafe.Pointer(&vv)).Ptr = unsafe.Pointer(ss) - (*rt.GoString)(unsafe.Pointer(&vv)).Len = ret - pos - - v, err = strconv.ParseInt(vv, 10, 64) - if err != nil { - //NOTICE: allow overflow here - if err.(*strconv.NumError).Err == strconv.ErrRange { - return ret, 0, err - } - return -int(types.ERR_INVALID_CHAR), 0, err - } - - runtime.KeepAlive(src) - return ret, v, nil + sp := uintptr(rt.IndexChar(src, pos)) + ss := uintptr(sp) + se := uintptr(rt.IndexChar(src, len(src))) + if uintptr(sp) >= se { + return -int(types.ERR_EOF), 0, nil + } + + if c := *(*byte)(unsafe.Pointer(sp)); c == '-' { + sp += 1 + } + if sp == se { + return -int(types.ERR_EOF), 0, nil + } + + for ; sp < se; sp += uintptr(1) { + if !isDigit(*(*byte)(unsafe.Pointer(sp))) { + break + } + } + + if sp < se { + if c := *(*byte)(unsafe.Pointer(sp)); c == '.' || c == 'e' || c == 'E' { + return -int(types.ERR_INVALID_NUMBER_FMT), 0, nil + } + } + + var vv string + ret = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) + (*rt.GoString)(unsafe.Pointer(&vv)).Ptr = unsafe.Pointer(ss) + (*rt.GoString)(unsafe.Pointer(&vv)).Len = ret - pos + + v, err = strconv.ParseInt(vv, 10, 64) + if err != nil { + //NOTICE: allow overflow here + if err.(*strconv.NumError).Err == strconv.ErrRange { + return ret, 0, err + } + return -int(types.ERR_INVALID_CHAR), 0, err + } + + runtime.KeepAlive(src) + return ret, v, nil } func isNumberChars(c byte) bool { - return (c >= '0' && c <= '9') || c == '+' || c == '-' || c == 'e' || c == 'E' || c == '.' + return (c >= '0' && c <= '9') || c == '+' || c == '-' || c == 'e' || c == 'E' || c == '.' } //go:nocheckptr func decodeFloat64(src string, pos int) (ret int, v float64, err error) { - sp := uintptr(rt.IndexChar(src, pos)) - ss := uintptr(sp) - se := uintptr(rt.IndexChar(src, len(src))) - if uintptr(sp) >= se { - return -int(types.ERR_EOF), 0, nil - } - - if c := *(*byte)(unsafe.Pointer(sp)); c == '-' { - sp += 1 - } - if sp == se { - return -int(types.ERR_EOF), 0, nil - } - - for ; sp < se; sp += uintptr(1) { - if !isNumberChars(*(*byte)(unsafe.Pointer(sp))) { - break - } - } - - var vv string - ret = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) - (*rt.GoString)(unsafe.Pointer(&vv)).Ptr = unsafe.Pointer(ss) - (*rt.GoString)(unsafe.Pointer(&vv)).Len = ret - pos - - v, err = strconv.ParseFloat(vv, 64) - if err != nil { - //NOTICE: allow overflow here - if err.(*strconv.NumError).Err == strconv.ErrRange { - return ret, 0, err - } - return -int(types.ERR_INVALID_CHAR), 0, err - } - - runtime.KeepAlive(src) - return ret, v, nil + sp := uintptr(rt.IndexChar(src, pos)) + ss := uintptr(sp) + se := uintptr(rt.IndexChar(src, len(src))) + if uintptr(sp) >= se { + return -int(types.ERR_EOF), 0, nil + } + + if c := *(*byte)(unsafe.Pointer(sp)); c == '-' { + sp += 1 + } + if sp == se { + return -int(types.ERR_EOF), 0, nil + } + + for ; sp < se; sp += uintptr(1) { + if !isNumberChars(*(*byte)(unsafe.Pointer(sp))) { + break + } + } + + var vv string + ret = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) + (*rt.GoString)(unsafe.Pointer(&vv)).Ptr = unsafe.Pointer(ss) + (*rt.GoString)(unsafe.Pointer(&vv)).Len = ret - pos + + v, err = strconv.ParseFloat(vv, 64) + if err != nil { + //NOTICE: allow overflow here + if err.(*strconv.NumError).Err == strconv.ErrRange { + return ret, 0, err + } + return -int(types.ERR_INVALID_CHAR), 0, err + } + + runtime.KeepAlive(src) + return ret, v, nil } func decodeValue(src string, pos int) (ret int, v types.JsonState) { - pos = skipBlank(src, pos) - if pos < 0 { - return pos, types.JsonState{Vt: types.ValueType(pos)} - } - switch c := src[pos]; c { - case 'n': - ret = decodeNull(src, pos) - if ret < 0 { - return ret, types.JsonState{Vt: types.ValueType(ret)} - } - return ret, types.JsonState{Vt: types.V_NULL} - case '"': - var ep int - ret, ep = skipString(src, pos) - if ret < 0 { - return ret, types.JsonState{Vt: types.ValueType(ret)} - } - return ret, types.JsonState{Vt: types.V_STRING, Iv: int64(pos + 1), Ep: ep} - case '{': - return pos + 1, types.JsonState{Vt: types.V_OBJECT} - case '[': - return pos + 1, types.JsonState{Vt: types.V_ARRAY} - case 't': - ret = decodeTrue(src, pos) - if ret < 0 { - return ret, types.JsonState{Vt: types.ValueType(ret)} - } - return ret, types.JsonState{Vt: types.V_TRUE} - case 'f': - ret = decodeFalse(src, pos) - if ret < 0 { - return ret, types.JsonState{Vt: types.ValueType(ret)} - } - return ret, types.JsonState{Vt: types.V_FALSE} - case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - var iv int64 - ret, iv, _ = decodeInt64(src, pos) - if ret >= 0 { - return ret, types.JsonState{Vt: types.V_INTEGER, Iv: iv, Ep: pos} - } else if ret != -int(types.ERR_INVALID_NUMBER_FMT) { - return ret, types.JsonState{Vt: types.ValueType(ret)} - } - var fv float64 - ret, fv, _ = decodeFloat64(src, pos) - if ret >= 0 { - return ret, types.JsonState{Vt: types.V_DOUBLE, Dv: fv, Ep: pos} - } else { - return ret, types.JsonState{Vt: types.ValueType(ret)} - } - default: - return -int(types.ERR_INVALID_CHAR), types.JsonState{Vt:-types.ValueType(types.ERR_INVALID_CHAR)} - } + pos = skipBlank(src, pos) + if pos < 0 { + return pos, types.JsonState{Vt: types.ValueType(pos)} + } + switch c := src[pos]; c { + case 'n': + ret = decodeNull(src, pos) + if ret < 0 { + return ret, types.JsonState{Vt: types.ValueType(ret)} + } + return ret, types.JsonState{Vt: types.V_NULL} + case '"': + var ep int + ret, ep = skipString(src, pos) + if ret < 0 { + return ret, types.JsonState{Vt: types.ValueType(ret)} + } + return ret, types.JsonState{Vt: types.V_STRING, Iv: int64(pos + 1), Ep: ep} + case '{': + return pos + 1, types.JsonState{Vt: types.V_OBJECT} + case '[': + return pos + 1, types.JsonState{Vt: types.V_ARRAY} + case 't': + ret = decodeTrue(src, pos) + if ret < 0 { + return ret, types.JsonState{Vt: types.ValueType(ret)} + } + return ret, types.JsonState{Vt: types.V_TRUE} + case 'f': + ret = decodeFalse(src, pos) + if ret < 0 { + return ret, types.JsonState{Vt: types.ValueType(ret)} + } + return ret, types.JsonState{Vt: types.V_FALSE} + case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + var iv int64 + ret, iv, _ = decodeInt64(src, pos) + if ret >= 0 { + return ret, types.JsonState{Vt: types.V_INTEGER, Iv: iv, Ep: pos} + } else if ret != -int(types.ERR_INVALID_NUMBER_FMT) { + return ret, types.JsonState{Vt: types.ValueType(ret)} + } + var fv float64 + ret, fv, _ = decodeFloat64(src, pos) + if ret >= 0 { + return ret, types.JsonState{Vt: types.V_DOUBLE, Dv: fv, Ep: pos} + } else { + return ret, types.JsonState{Vt: types.ValueType(ret)} + } + default: + return -int(types.ERR_INVALID_CHAR), types.JsonState{Vt: -types.ValueType(types.ERR_INVALID_CHAR)} + } } //go:nocheckptr func skipNumber(src string, pos int) (ret int) { - sp := uintptr(rt.IndexChar(src, pos)) - se := uintptr(rt.IndexChar(src, len(src))) - if uintptr(sp) >= se { - return -int(types.ERR_EOF) - } - - if c := *(*byte)(unsafe.Pointer(sp)); c == '-' { - sp += 1 - } - ss := sp - - var pointer bool - var exponent bool - var lastIsDigit bool - var nextNeedDigit = true - - for ; sp < se; sp += uintptr(1) { - c := *(*byte)(unsafe.Pointer(sp)) - if isDigit(c) { - lastIsDigit = true - nextNeedDigit = false - continue - } else if nextNeedDigit { - return -int(types.ERR_INVALID_CHAR) - } else if c == '.' { - if !lastIsDigit || pointer || exponent || sp == ss { - return -int(types.ERR_INVALID_CHAR) - } - pointer = true - lastIsDigit = false - nextNeedDigit = true - continue - } else if c == 'e' || c == 'E' { - if !lastIsDigit || exponent { - return -int(types.ERR_INVALID_CHAR) - } - if sp == se-1 { - return -int(types.ERR_EOF) - } - exponent = true - lastIsDigit = false - nextNeedDigit = false - continue - } else if c == '-' || c == '+' { - if prev := *(*byte)(unsafe.Pointer(sp - 1)); prev != 'e' && prev != 'E' { - return -int(types.ERR_INVALID_CHAR) - } - lastIsDigit = false - nextNeedDigit = true - continue - } else { - break - } - } - - if nextNeedDigit { - return -int(types.ERR_EOF) - } - - runtime.KeepAlive(src) - return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) + sp := uintptr(rt.IndexChar(src, pos)) + se := uintptr(rt.IndexChar(src, len(src))) + if uintptr(sp) >= se { + return -int(types.ERR_EOF) + } + + if c := *(*byte)(unsafe.Pointer(sp)); c == '-' { + sp += 1 + } + ss := sp + + var pointer bool + var exponent bool + var lastIsDigit bool + var nextNeedDigit = true + + for ; sp < se; sp += uintptr(1) { + c := *(*byte)(unsafe.Pointer(sp)) + if isDigit(c) { + lastIsDigit = true + nextNeedDigit = false + continue + } else if nextNeedDigit { + return -int(types.ERR_INVALID_CHAR) + } else if c == '.' { + if !lastIsDigit || pointer || exponent || sp == ss { + return -int(types.ERR_INVALID_CHAR) + } + pointer = true + lastIsDigit = false + nextNeedDigit = true + continue + } else if c == 'e' || c == 'E' { + if !lastIsDigit || exponent { + return -int(types.ERR_INVALID_CHAR) + } + if sp == se-1 { + return -int(types.ERR_EOF) + } + exponent = true + lastIsDigit = false + nextNeedDigit = false + continue + } else if c == '-' || c == '+' { + if prev := *(*byte)(unsafe.Pointer(sp - 1)); prev != 'e' && prev != 'E' { + return -int(types.ERR_INVALID_CHAR) + } + lastIsDigit = false + nextNeedDigit = true + continue + } else { + break + } + } + + if nextNeedDigit { + return -int(types.ERR_EOF) + } + + runtime.KeepAlive(src) + return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) } //go:nocheckptr func skipString(src string, pos int) (ret int, ep int) { - if pos+1 >= len(src) { - return -int(types.ERR_EOF), -1 - } - - sp := uintptr(rt.IndexChar(src, pos)) - se := uintptr(rt.IndexChar(src, len(src))) - - // not start with quote - if *(*byte)(unsafe.Pointer(sp)) != '"' { - return -int(types.ERR_INVALID_CHAR), -1 - } - sp += 1 - - ep = -1 - for sp < se { - c := *(*byte)(unsafe.Pointer(sp)) - if c == '\\' { - if ep == -1 { - ep = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) - } - sp += 2 - continue - } - sp += 1 - if c == '"' { - return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)), ep - } - } - - runtime.KeepAlive(src) - // not found the closed quote until EOF - return -int(types.ERR_EOF), -1 + if pos+1 >= len(src) { + return -int(types.ERR_EOF), -1 + } + + sp := uintptr(rt.IndexChar(src, pos)) + se := uintptr(rt.IndexChar(src, len(src))) + + // not start with quote + if *(*byte)(unsafe.Pointer(sp)) != '"' { + return -int(types.ERR_INVALID_CHAR), -1 + } + sp += 1 + + ep = -1 + for sp < se { + c := *(*byte)(unsafe.Pointer(sp)) + if c == '\\' { + if ep == -1 { + ep = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) + } + sp += 2 + continue + } + sp += 1 + if c == '"' { + return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)), ep + } + } + + runtime.KeepAlive(src) + // not found the closed quote until EOF + return -int(types.ERR_EOF), -1 } //go:nocheckptr func skipPair(src string, pos int, lchar byte, rchar byte) (ret int) { - if pos+1 >= len(src) { - return -int(types.ERR_EOF) - } - - sp := uintptr(rt.IndexChar(src, pos)) - se := uintptr(rt.IndexChar(src, len(src))) - - if *(*byte)(unsafe.Pointer(sp)) != lchar { - return -int(types.ERR_INVALID_CHAR) - } - - sp += 1 - nbrace := 1 - inquote := false - - for sp < se { - c := *(*byte)(unsafe.Pointer(sp)) - if c == '\\' { - sp += 2 - continue - } else if c == '"' { - inquote = !inquote - } else if c == lchar { - if !inquote { - nbrace += 1 - } - } else if c == rchar { - if !inquote { - nbrace -= 1 - if nbrace == 0 { - sp += 1 - break - } - } - } - sp += 1 - } - - if nbrace != 0 { - return -int(types.ERR_INVALID_CHAR) - } - - runtime.KeepAlive(src) - return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) + if pos+1 >= len(src) { + return -int(types.ERR_EOF) + } + + sp := uintptr(rt.IndexChar(src, pos)) + se := uintptr(rt.IndexChar(src, len(src))) + + if *(*byte)(unsafe.Pointer(sp)) != lchar { + return -int(types.ERR_INVALID_CHAR) + } + + sp += 1 + nbrace := 1 + inquote := false + + for sp < se { + c := *(*byte)(unsafe.Pointer(sp)) + if c == '\\' { + sp += 2 + continue + } else if c == '"' { + inquote = !inquote + } else if c == lchar { + if !inquote { + nbrace += 1 + } + } else if c == rchar { + if !inquote { + nbrace -= 1 + if nbrace == 0 { + sp += 1 + break + } + } + } + sp += 1 + } + + if nbrace != 0 { + return -int(types.ERR_INVALID_CHAR) + } + + runtime.KeepAlive(src) + return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) } func skipValueFast(src string, pos int) (ret int, start int) { - pos = skipBlank(src, pos) - if pos < 0 { - return pos, -1 - } - switch c := src[pos]; c { - case 'n': - ret = decodeNull(src, pos) - case '"': - ret, _ = skipString(src, pos) - case '{': - ret = skipPair(src, pos, '{', '}') - case '[': - ret = skipPair(src, pos, '[', ']') - case 't': - ret = decodeTrue(src, pos) - case 'f': - ret = decodeFalse(src, pos) - case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - ret = skipNumber(src, pos) - default: - ret = -int(types.ERR_INVALID_CHAR) - } - return ret, pos + pos = skipBlank(src, pos) + if pos < 0 { + return pos, -1 + } + switch c := src[pos]; c { + case 'n': + ret = decodeNull(src, pos) + case '"': + ret, _ = skipString(src, pos) + case '{': + ret = skipPair(src, pos, '{', '}') + case '[': + ret = skipPair(src, pos, '[', ']') + case 't': + ret = decodeTrue(src, pos) + case 'f': + ret = decodeFalse(src, pos) + case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + ret = skipNumber(src, pos) + default: + ret = -int(types.ERR_INVALID_CHAR) + } + return ret, pos } func skipValue(src string, pos int) (ret int, start int) { - pos = skipBlank(src, pos) - if pos < 0 { - return pos, -1 - } - switch c := src[pos]; c { - case 'n': - ret = decodeNull(src, pos) - case '"': - ret, _ = skipString(src, pos) - case '{': - ret, _ = skipObject(src, pos) - case '[': - ret, _ = skipArray(src, pos) - case 't': - ret = decodeTrue(src, pos) - case 'f': - ret = decodeFalse(src, pos) - case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - ret = skipNumber(src, pos) - default: - ret = -int(types.ERR_INVALID_CHAR) - } - return ret, pos + pos = skipBlank(src, pos) + if pos < 0 { + return pos, -1 + } + switch c := src[pos]; c { + case 'n': + ret = decodeNull(src, pos) + case '"': + ret, _ = skipString(src, pos) + case '{': + ret, _ = skipObject(src, pos) + case '[': + ret, _ = skipArray(src, pos) + case 't': + ret = decodeTrue(src, pos) + case 'f': + ret = decodeFalse(src, pos) + case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + ret = skipNumber(src, pos) + default: + ret = -int(types.ERR_INVALID_CHAR) + } + return ret, pos } func skipObject(src string, pos int) (ret int, start int) { - start = skipBlank(src, pos) - if start < 0 { - return start, -1 - } - - if src[start] != '{' { - return -int(types.ERR_INVALID_CHAR), -1 - } - - pos = start + 1 - pos = skipBlank(src, pos) - if pos < 0 { - return pos, -1 - } - if src[pos] == '}' { - return pos + 1, start - } - - for { - pos, _ = skipString(src, pos) - if pos < 0 { - return pos, -1 - } - - pos = skipBlank(src, pos) - if pos < 0 { - return pos, -1 - } - if src[pos] != ':' { - return -int(types.ERR_INVALID_CHAR), -1 - } - - pos++ - pos, _ = skipValue(src, pos) - if pos < 0 { - return pos, -1 - } - - pos = skipBlank(src, pos) - if pos < 0 { - return pos, -1 - } - if src[pos] == '}' { - return pos + 1, start - } - if src[pos] != ',' { - return -int(types.ERR_INVALID_CHAR), -1 - } - - pos++ - pos = skipBlank(src, pos) - if pos < 0 { - return pos, -1 - } - - } + start = skipBlank(src, pos) + if start < 0 { + return start, -1 + } + + if src[start] != '{' { + return -int(types.ERR_INVALID_CHAR), -1 + } + + pos = start + 1 + pos = skipBlank(src, pos) + if pos < 0 { + return pos, -1 + } + if src[pos] == '}' { + return pos + 1, start + } + + for { + pos, _ = skipString(src, pos) + if pos < 0 { + return pos, -1 + } + + pos = skipBlank(src, pos) + if pos < 0 { + return pos, -1 + } + if src[pos] != ':' { + return -int(types.ERR_INVALID_CHAR), -1 + } + + pos++ + pos, _ = skipValue(src, pos) + if pos < 0 { + return pos, -1 + } + + pos = skipBlank(src, pos) + if pos < 0 { + return pos, -1 + } + if src[pos] == '}' { + return pos + 1, start + } + if src[pos] != ',' { + return -int(types.ERR_INVALID_CHAR), -1 + } + + pos++ + pos = skipBlank(src, pos) + if pos < 0 { + return pos, -1 + } + + } } func skipArray(src string, pos int) (ret int, start int) { - start = skipBlank(src, pos) - if start < 0 { - return start, -1 - } - - if src[start] != '[' { - return -int(types.ERR_INVALID_CHAR), -1 - } - - pos = start + 1 - pos = skipBlank(src, pos) - if pos < 0 { - return pos, -1 - } - if src[pos] == ']' { - return pos + 1, start - } - - for { - pos, _ = skipValue(src, pos) - if pos < 0 { - return pos, -1 - } - - pos = skipBlank(src, pos) - if pos < 0 { - return pos, -1 - } - if src[pos] == ']' { - return pos + 1, start - } - if src[pos] != ',' { - return -int(types.ERR_INVALID_CHAR), -1 - } - pos++ - } + start = skipBlank(src, pos) + if start < 0 { + return start, -1 + } + + if src[start] != '[' { + return -int(types.ERR_INVALID_CHAR), -1 + } + + pos = start + 1 + pos = skipBlank(src, pos) + if pos < 0 { + return pos, -1 + } + if src[pos] == ']' { + return pos + 1, start + } + + for { + pos, _ = skipValue(src, pos) + if pos < 0 { + return pos, -1 + } + + pos = skipBlank(src, pos) + if pos < 0 { + return pos, -1 + } + if src[pos] == ']' { + return pos + 1, start + } + if src[pos] != ',' { + return -int(types.ERR_INVALID_CHAR), -1 + } + pos++ + } } diff --git a/vendor/github.com/bytedance/sonic/ast/encode.go b/vendor/github.com/bytedance/sonic/ast/encode.go index 1187e30c2..21b5f3b10 100644 --- a/vendor/github.com/bytedance/sonic/ast/encode.go +++ b/vendor/github.com/bytedance/sonic/ast/encode.go @@ -17,243 +17,254 @@ package ast import ( - `sync` - `unicode/utf8` + "sync" + "unicode/utf8" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) const ( - _MaxBuffer = 1024 // 1KB buffer size + _MaxBuffer = 1024 // 1KB buffer size ) func quoteString(e *[]byte, s string) { - *e = append(*e, '"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if safeSet[b] { - i++ - continue - } - if start < i { - *e = append(*e, s[start:i]...) - } - *e = append(*e, '\\') - switch b { - case '\\', '"': - *e = append(*e, b) - case '\n': - *e = append(*e, 'n') - case '\r': - *e = append(*e, 'r') - case '\t': - *e = append(*e, 't') - default: - // This encodes bytes < 0x20 except for \t, \n and \r. - // If escapeHTML is set, it also escapes <, >, and & - // because they can lead to security holes when - // user-controlled strings are rendered into JSON - // and served to some browsers. - *e = append(*e, `u00`...) - *e = append(*e, hex[b>>4]) - *e = append(*e, hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - // if c == utf8.RuneError && size == 1 { - // if start < i { - // e.Write(s[start:i]) - // } - // e.WriteString(`\ufffd`) - // i += size - // start = i - // continue - // } - if c == '\u2028' || c == '\u2029' { - if start < i { - *e = append(*e, s[start:i]...) - } - *e = append(*e, `\u202`...) - *e = append(*e, hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - *e = append(*e, s[start:]...) - } - *e = append(*e, '"') + *e = append(*e, '"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if safeSet[b] { + i++ + continue + } + if start < i { + *e = append(*e, s[start:i]...) + } + *e = append(*e, '\\') + switch b { + case '\\', '"': + *e = append(*e, b) + case '\n': + *e = append(*e, 'n') + case '\r': + *e = append(*e, 'r') + case '\t': + *e = append(*e, 't') + default: + // This encodes bytes < 0x20 except for \t, \n and \r. + // If escapeHTML is set, it also escapes <, >, and & + // because they can lead to security holes when + // user-controlled strings are rendered into JSON + // and served to some browsers. + *e = append(*e, `u00`...) + *e = append(*e, hex[b>>4]) + *e = append(*e, hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + // if c == utf8.RuneError && size == 1 { + // if start < i { + // e.Write(s[start:i]) + // } + // e.WriteString(`\ufffd`) + // i += size + // start = i + // continue + // } + if c == '\u2028' || c == '\u2029' { + if start < i { + *e = append(*e, s[start:i]...) + } + *e = append(*e, `\u202`...) + *e = append(*e, hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + *e = append(*e, s[start:]...) + } + *e = append(*e, '"') } -var bytesPool = sync.Pool{} +var bytesPool = sync.Pool{} func (self *Node) MarshalJSON() ([]byte, error) { - buf := newBuffer() - err := self.encode(buf) - if err != nil { - freeBuffer(buf) - return nil, err - } + buf := newBuffer() + err := self.encode(buf) + if err != nil { + freeBuffer(buf) + return nil, err + } - ret := make([]byte, len(*buf)) - copy(ret, *buf) - freeBuffer(buf) - return ret, err + ret := make([]byte, len(*buf)) + copy(ret, *buf) + freeBuffer(buf) + return ret, err } func newBuffer() *[]byte { - if ret := bytesPool.Get(); ret != nil { - return ret.(*[]byte) - } else { - buf := make([]byte, 0, _MaxBuffer) - return &buf - } + if ret := bytesPool.Get(); ret != nil { + return ret.(*[]byte) + } else { + buf := make([]byte, 0, _MaxBuffer) + return &buf + } } func freeBuffer(buf *[]byte) { - *buf = (*buf)[:0] - bytesPool.Put(buf) + *buf = (*buf)[:0] + bytesPool.Put(buf) } func (self *Node) encode(buf *[]byte) error { - if self.IsRaw() { - return self.encodeRaw(buf) - } - switch self.Type() { - case V_NONE : return ErrNotExist - case V_ERROR : return self.Check() - case V_NULL : return self.encodeNull(buf) - case V_TRUE : return self.encodeTrue(buf) - case V_FALSE : return self.encodeFalse(buf) - case V_ARRAY : return self.encodeArray(buf) - case V_OBJECT: return self.encodeObject(buf) - case V_STRING: return self.encodeString(buf) - case V_NUMBER: return self.encodeNumber(buf) - case V_ANY : return self.encodeInterface(buf) - default : return ErrUnsupportType - } + if self.IsRaw() { + return self.encodeRaw(buf) + } + switch self.Type() { + case V_NONE: + return ErrNotExist + case V_ERROR: + return self.Check() + case V_NULL: + return self.encodeNull(buf) + case V_TRUE: + return self.encodeTrue(buf) + case V_FALSE: + return self.encodeFalse(buf) + case V_ARRAY: + return self.encodeArray(buf) + case V_OBJECT: + return self.encodeObject(buf) + case V_STRING: + return self.encodeString(buf) + case V_NUMBER: + return self.encodeNumber(buf) + case V_ANY: + return self.encodeInterface(buf) + default: + return ErrUnsupportType + } } func (self *Node) encodeRaw(buf *[]byte) error { - raw, err := self.Raw() - if err != nil { - return err - } - *buf = append(*buf, raw...) - return nil + raw, err := self.Raw() + if err != nil { + return err + } + *buf = append(*buf, raw...) + return nil } func (self *Node) encodeNull(buf *[]byte) error { - *buf = append(*buf, bytesNull...) - return nil + *buf = append(*buf, bytesNull...) + return nil } func (self *Node) encodeTrue(buf *[]byte) error { - *buf = append(*buf, bytesTrue...) - return nil + *buf = append(*buf, bytesTrue...) + return nil } func (self *Node) encodeFalse(buf *[]byte) error { - *buf = append(*buf, bytesFalse...) - return nil + *buf = append(*buf, bytesFalse...) + return nil } func (self *Node) encodeNumber(buf *[]byte) error { - str := rt.StrFrom(self.p, self.v) - *buf = append(*buf, str...) - return nil + str := rt.StrFrom(self.p, self.v) + *buf = append(*buf, str...) + return nil } func (self *Node) encodeString(buf *[]byte) error { - if self.v == 0 { - *buf = append(*buf, '"', '"') - return nil - } + if self.v == 0 { + *buf = append(*buf, '"', '"') + return nil + } - quote(buf, rt.StrFrom(self.p, self.v)) - return nil + quote(buf, rt.StrFrom(self.p, self.v)) + return nil } func (self *Node) encodeArray(buf *[]byte) error { - if self.isLazy() { - if err := self.skipAllIndex(); err != nil { - return err - } - } + if self.isLazy() { + if err := self.skipAllIndex(); err != nil { + return err + } + } - nb := self.len() - if nb == 0 { - *buf = append(*buf, bytesArray...) - return nil - } - - *buf = append(*buf, '[') + nb := self.len() + if nb == 0 { + *buf = append(*buf, bytesArray...) + return nil + } - var p = (*Node)(self.p) - err := p.encode(buf) - if err != nil { - return err - } - for i := 1; i < nb; i++ { - *buf = append(*buf, ',') - p = p.unsafe_next() - err := p.encode(buf) - if err != nil { - return err - } - } + *buf = append(*buf, '[') - *buf = append(*buf, ']') - return nil + var p = (*Node)(self.p) + err := p.encode(buf) + if err != nil { + return err + } + for i := 1; i < nb; i++ { + *buf = append(*buf, ',') + p = p.unsafe_next() + err := p.encode(buf) + if err != nil { + return err + } + } + + *buf = append(*buf, ']') + return nil } func (self *Pair) encode(buf *[]byte) error { - if len(*buf) == 0 { - *buf = append(*buf, '"', '"', ':') - return self.Value.encode(buf) - } + if len(*buf) == 0 { + *buf = append(*buf, '"', '"', ':') + return self.Value.encode(buf) + } - quote(buf, self.Key) - *buf = append(*buf, ':') + quote(buf, self.Key) + *buf = append(*buf, ':') - return self.Value.encode(buf) + return self.Value.encode(buf) } func (self *Node) encodeObject(buf *[]byte) error { - if self.isLazy() { - if err := self.skipAllKey(); err != nil { - return err - } - } - - nb := self.len() - if nb == 0 { - *buf = append(*buf, bytesObject...) - return nil - } - - *buf = append(*buf, '{') + if self.isLazy() { + if err := self.skipAllKey(); err != nil { + return err + } + } + + nb := self.len() + if nb == 0 { + *buf = append(*buf, bytesObject...) + return nil + } - var p = (*Pair)(self.p) - err := p.encode(buf) - if err != nil { - return err - } - for i := 1; i < nb; i++ { - *buf = append(*buf, ',') - p = p.unsafe_next() - err := p.encode(buf) - if err != nil { - return err - } - } + *buf = append(*buf, '{') - *buf = append(*buf, '}') - return nil -} \ No newline at end of file + var p = (*Pair)(self.p) + err := p.encode(buf) + if err != nil { + return err + } + for i := 1; i < nb; i++ { + *buf = append(*buf, ',') + p = p.unsafe_next() + err := p.encode(buf) + if err != nil { + return err + } + } + + *buf = append(*buf, '}') + return nil +} diff --git a/vendor/github.com/bytedance/sonic/ast/error.go b/vendor/github.com/bytedance/sonic/ast/error.go index f4c441ae6..f994368fc 100644 --- a/vendor/github.com/bytedance/sonic/ast/error.go +++ b/vendor/github.com/bytedance/sonic/ast/error.go @@ -1,98 +1,98 @@ package ast import ( - `fmt` - `strings` - `unsafe` + "fmt" + "strings" + "unsafe" - `github.com/bytedance/sonic/internal/native/types` + "github.com/bytedance/sonic/internal/native/types" ) func (self *Parser) syntaxError(err types.ParsingError) SyntaxError { - return SyntaxError{ - Pos : self.p, - Src : self.s, - Code: err, - } + return SyntaxError{ + Pos: self.p, + Src: self.s, + Code: err, + } } func newSyntaxError(err SyntaxError) *Node { - msg := err.Description() - return &Node{ - t: V_ERROR, - v: int64(err.Code), - p: unsafe.Pointer(&msg), - } + msg := err.Description() + return &Node{ + t: V_ERROR, + v: int64(err.Code), + p: unsafe.Pointer(&msg), + } } type SyntaxError struct { - Pos int - Src string - Code types.ParsingError - Msg string + Pos int + Src string + Code types.ParsingError + Msg string } func (self SyntaxError) Error() string { - return fmt.Sprintf("%q", self.Description()) + return fmt.Sprintf("%q", self.Description()) } func (self SyntaxError) Description() string { - return "Syntax error " + self.description() + return "Syntax error " + self.description() } func (self SyntaxError) description() string { - i := 16 - p := self.Pos - i - q := self.Pos + i - - /* check for empty source */ - if self.Src == "" { - return fmt.Sprintf("no sources available: %#v", self) - } - - /* prevent slicing before the beginning */ - if p < 0 { - p, q, i = 0, q - p, i + p - } - - /* prevent slicing beyond the end */ - if n := len(self.Src); q > n { - n = q - n - q = len(self.Src) - - /* move the left bound if possible */ - if p > n { - i += n - p -= n - } - } - - /* left and right length */ - x := clamp_zero(i) - y := clamp_zero(q - p - i - 1) - - /* compose the error description */ - return fmt.Sprintf( - "at index %d: %s\n\n\t%s\n\t%s^%s\n", - self.Pos, - self.Message(), - self.Src[p:q], - strings.Repeat(".", x), - strings.Repeat(".", y), - ) + i := 16 + p := self.Pos - i + q := self.Pos + i + + /* check for empty source */ + if self.Src == "" { + return fmt.Sprintf("no sources available: %#v", self) + } + + /* prevent slicing before the beginning */ + if p < 0 { + p, q, i = 0, q-p, i+p + } + + /* prevent slicing beyond the end */ + if n := len(self.Src); q > n { + n = q - n + q = len(self.Src) + + /* move the left bound if possible */ + if p > n { + i += n + p -= n + } + } + + /* left and right length */ + x := clamp_zero(i) + y := clamp_zero(q - p - i - 1) + + /* compose the error description */ + return fmt.Sprintf( + "at index %d: %s\n\n\t%s\n\t%s^%s\n", + self.Pos, + self.Message(), + self.Src[p:q], + strings.Repeat(".", x), + strings.Repeat(".", y), + ) } func (self SyntaxError) Message() string { - if self.Msg == "" { - return self.Code.Message() - } - return self.Msg + if self.Msg == "" { + return self.Code.Message() + } + return self.Msg } func clamp_zero(v int) int { - if v < 0 { - return 0 - } else { - return v - } + if v < 0 { + return 0 + } else { + return v + } } diff --git a/vendor/github.com/bytedance/sonic/ast/iterator.go b/vendor/github.com/bytedance/sonic/ast/iterator.go index 03a25b4e9..06446252a 100644 --- a/vendor/github.com/bytedance/sonic/ast/iterator.go +++ b/vendor/github.com/bytedance/sonic/ast/iterator.go @@ -17,148 +17,148 @@ package ast import ( - `fmt` + "fmt" - `github.com/bytedance/sonic/internal/native/types` + "github.com/bytedance/sonic/internal/native/types" ) type Pair struct { - Key string - Value Node + Key string + Value Node } // Values returns iterator for array's children traversal func (self *Node) Values() (ListIterator, error) { - if err := self.should(types.V_ARRAY, "an array"); err != nil { - return ListIterator{}, err - } - return ListIterator{Iterator{p: self}}, nil + if err := self.should(types.V_ARRAY, "an array"); err != nil { + return ListIterator{}, err + } + return ListIterator{Iterator{p: self}}, nil } // Properties returns iterator for object's children traversal func (self *Node) Properties() (ObjectIterator, error) { - if err := self.should(types.V_OBJECT, "an object"); err != nil { - return ObjectIterator{}, err - } - return ObjectIterator{Iterator{p: self}}, nil + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return ObjectIterator{}, err + } + return ObjectIterator{Iterator{p: self}}, nil } type Iterator struct { - i int - p *Node + i int + p *Node } func (self *Iterator) Pos() int { - return self.i + return self.i } func (self *Iterator) Len() int { - return self.p.len() + return self.p.len() } // HasNext reports if it is the end of iteration or has error. func (self *Iterator) HasNext() bool { - if !self.p.isLazy() { - return self.p.Valid() && self.i < self.p.len() - } else if self.p.t == _V_ARRAY_LAZY { - return self.p.skipNextNode().Valid() - } else if self.p.t == _V_OBJECT_LAZY { - pair := self.p.skipNextPair() - if pair == nil { - return false - } - return pair.Value.Valid() - } - return false + if !self.p.isLazy() { + return self.p.Valid() && self.i < self.p.len() + } else if self.p.t == _V_ARRAY_LAZY { + return self.p.skipNextNode().Valid() + } else if self.p.t == _V_OBJECT_LAZY { + pair := self.p.skipNextPair() + if pair == nil { + return false + } + return pair.Value.Valid() + } + return false } // ListIterator is specialized iterator for V_ARRAY type ListIterator struct { - Iterator + Iterator } // ObjectIterator is specialized iterator for V_ARRAY type ObjectIterator struct { - Iterator + Iterator } -// Next scans through children of underlying V_ARRAY, +// Next scans through children of underlying V_ARRAY, // copies each child to v, and returns .HasNext(). func (self *ListIterator) Next(v *Node) bool { - if !self.HasNext() { - return false - } else { - *v, self.i = *self.p.nodeAt(self.i), self.i + 1 - return true - } + if !self.HasNext() { + return false + } else { + *v, self.i = *self.p.nodeAt(self.i), self.i+1 + return true + } } -// Next scans through children of underlying V_OBJECT, +// Next scans through children of underlying V_OBJECT, // copies each child to v, and returns .HasNext(). func (self *ObjectIterator) Next(p *Pair) bool { - if !self.HasNext() { - return false - } else { - *p, self.i = *self.p.pairAt(self.i), self.i + 1 - return true - } + if !self.HasNext() { + return false + } else { + *p, self.i = *self.p.pairAt(self.i), self.i+1 + return true + } } // Sequence represents scanning path of single-layer nodes. // Index indicates the value's order in both V_ARRAY and V_OBJECT json. // Key is the value's key (for V_OBJECT json only, otherwise it will be nil). type Sequence struct { - Index int - Key *string - // Level int + Index int + Key *string + // Level int } // String is string representation of one Sequence func (s Sequence) String() string { - k := "" - if s.Key != nil { - k = *s.Key - } - return fmt.Sprintf("Sequence(%d, %q)", s.Index, k) + k := "" + if s.Key != nil { + k = *s.Key + } + return fmt.Sprintf("Sequence(%d, %q)", s.Index, k) } type Scanner func(path Sequence, node *Node) bool -// ForEach scans one V_OBJECT node's children from JSON head to tail, +// ForEach scans one V_OBJECT node's children from JSON head to tail, // and pass the Sequence and Node of corresponding JSON value. // -// Especailly, if the node is not V_ARRAY or V_OBJECT, +// Especailly, if the node is not V_ARRAY or V_OBJECT, // the node itself will be returned and Sequence.Index == -1. func (self *Node) ForEach(sc Scanner) error { - switch self.itype() { - case types.V_ARRAY: - ns, err := self.UnsafeArray() - if err != nil { - return err - } - for i := range ns { - if !sc(Sequence{i, nil}, &ns[i]) { - return err - } - } - case types.V_OBJECT: - ns, err := self.UnsafeMap() - if err != nil { - return err - } - for i := range ns { - if !sc(Sequence{i, &ns[i].Key}, &ns[i].Value) { - return err - } - } - default: - sc(Sequence{-1, nil}, self) - } - return self.Check() + switch self.itype() { + case types.V_ARRAY: + ns, err := self.UnsafeArray() + if err != nil { + return err + } + for i := range ns { + if !sc(Sequence{i, nil}, &ns[i]) { + return err + } + } + case types.V_OBJECT: + ns, err := self.UnsafeMap() + if err != nil { + return err + } + for i := range ns { + if !sc(Sequence{i, &ns[i].Key}, &ns[i].Value) { + return err + } + } + default: + sc(Sequence{-1, nil}, self) + } + return self.Check() } type PairSlice []Pair func (self PairSlice) Sort() { - radixQsort(self, 0, maxDepth(len(self))) -} \ No newline at end of file + radixQsort(self, 0, maxDepth(len(self))) +} diff --git a/vendor/github.com/bytedance/sonic/ast/node.go b/vendor/github.com/bytedance/sonic/ast/node.go index 6b5ad8a3e..dbe2d52b4 100644 --- a/vendor/github.com/bytedance/sonic/ast/node.go +++ b/vendor/github.com/bytedance/sonic/ast/node.go @@ -17,493 +17,617 @@ package ast import ( - `encoding/json` - `fmt` - `strconv` - `unsafe` - `reflect` - - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/internal/rt` + "encoding/json" + "fmt" + "reflect" + "strconv" + "unsafe" + + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/internal/rt" ) const ( - _CAP_BITS = 32 - _LEN_MASK = 1 << _CAP_BITS - 1 + _CAP_BITS = 32 + _LEN_MASK = 1<<_CAP_BITS - 1 - _NODE_SIZE = unsafe.Sizeof(Node{}) - _PAIR_SIZE = unsafe.Sizeof(Pair{}) + _NODE_SIZE = unsafe.Sizeof(Node{}) + _PAIR_SIZE = unsafe.Sizeof(Pair{}) ) const ( - _V_NONE types.ValueType = 0 - _V_NODE_BASE types.ValueType = 1 << 5 - _V_LAZY types.ValueType = 1 << 7 - _V_RAW types.ValueType = 1 << 8 - _V_NUMBER = _V_NODE_BASE + 1 - _V_ANY = _V_NODE_BASE + 2 - _V_ARRAY_LAZY = _V_LAZY | types.V_ARRAY - _V_OBJECT_LAZY = _V_LAZY | types.V_OBJECT - _MASK_LAZY = _V_LAZY - 1 - _MASK_RAW = _V_RAW - 1 + _V_NONE types.ValueType = 0 + _V_NODE_BASE types.ValueType = 1 << 5 + _V_LAZY types.ValueType = 1 << 7 + _V_RAW types.ValueType = 1 << 8 + _V_NUMBER = _V_NODE_BASE + 1 + _V_ANY = _V_NODE_BASE + 2 + _V_ARRAY_LAZY = _V_LAZY | types.V_ARRAY + _V_OBJECT_LAZY = _V_LAZY | types.V_OBJECT + _MASK_LAZY = _V_LAZY - 1 + _MASK_RAW = _V_RAW - 1 ) const ( - V_NONE = 0 - V_ERROR = 1 - V_NULL = 2 - V_TRUE = 3 - V_FALSE = 4 - V_ARRAY = 5 - V_OBJECT = 6 - V_STRING = 7 - V_NUMBER = int(_V_NUMBER) - V_ANY = int(_V_ANY) + V_NONE = 0 + V_ERROR = 1 + V_NULL = 2 + V_TRUE = 3 + V_FALSE = 4 + V_ARRAY = 5 + V_OBJECT = 6 + V_STRING = 7 + V_NUMBER = int(_V_NUMBER) + V_ANY = int(_V_ANY) ) var ( - byteType = rt.UnpackType(reflect.TypeOf(byte(0))) + byteType = rt.UnpackType(reflect.TypeOf(byte(0))) ) type Node struct { - v int64 - t types.ValueType - p unsafe.Pointer + v int64 + t types.ValueType + p unsafe.Pointer } // UnmarshalJSON is just an adapter to json.Unmarshaler. // If you want better performance, use Searcher.GetByPath() directly func (self *Node) UnmarshalJSON(data []byte) (err error) { - *self, err = NewSearcher(string(data)).GetByPath() - return + *self, err = NewSearcher(string(data)).GetByPath() + return } /** Node Type Accessor **/ // Type returns json type represented by the node // It will be one of belows: -// V_NONE = 0 (empty node) -// V_ERROR = 1 (error node) -// V_NULL = 2 (json value `null`) -// V_TRUE = 3 (json value `true`) -// V_FALSE = 4 (json value `false`) -// V_ARRAY = 5 (json value array) -// V_OBJECT = 6 (json value object) -// V_STRING = 7 (json value string) -// V_NUMBER = 33 (json value number ) -// V_ANY = 34 (golang interface{}) +// +// V_NONE = 0 (empty node) +// V_ERROR = 1 (error node) +// V_NULL = 2 (json value `null`) +// V_TRUE = 3 (json value `true`) +// V_FALSE = 4 (json value `false`) +// V_ARRAY = 5 (json value array) +// V_OBJECT = 6 (json value object) +// V_STRING = 7 (json value string) +// V_NUMBER = 33 (json value number ) +// V_ANY = 34 (golang interface{}) func (self Node) Type() int { - return int(self.t & _MASK_LAZY & _MASK_RAW) + return int(self.t & _MASK_LAZY & _MASK_RAW) } func (self Node) itype() types.ValueType { - return self.t & _MASK_LAZY & _MASK_RAW + return self.t & _MASK_LAZY & _MASK_RAW } // Exists returns false only if the self is nil or empty node V_NONE func (self *Node) Exists() bool { - return self != nil && self.t != _V_NONE + return self != nil && self.t != _V_NONE } // Valid reports if self is NOT V_ERROR or nil func (self *Node) Valid() bool { - if self == nil { - return false - } - return self.t != V_ERROR + if self == nil { + return false + } + return self.t != V_ERROR } // Check checks if the node itself is valid, and return: // - ErrNotFound If the node is nil // - Its underlying error If the node is V_ERROR -func (self *Node) Check() error { - if self == nil { - return ErrNotExist - } else if self.t != V_ERROR { - return nil - } else { - return self - } +func (self *Node) Check() error { + if self == nil { + return ErrNotExist + } else if self.t != V_ERROR { + return nil + } else { + return self + } } // Error returns error message if the node is invalid func (self Node) Error() string { - if self.t != V_ERROR { - return "" - } else { - return *(*string)(self.p) - } + if self.t != V_ERROR { + return "" + } else { + return *(*string)(self.p) + } } // IsRaw returns true if node's underlying value is raw json func (self Node) IsRaw() bool { - return self.t&_V_RAW != 0 + return self.t&_V_RAW != 0 } func (self *Node) isLazy() bool { - return self != nil && self.t&_V_LAZY != 0 + return self != nil && self.t&_V_LAZY != 0 } func (self *Node) isAny() bool { - return self != nil && self.t == _V_ANY + return self != nil && self.t == _V_ANY } /** Simple Value Methods **/ // Raw returns json representation of the node, func (self *Node) Raw() (string, error) { - if !self.IsRaw() { - buf, err := self.MarshalJSON() - return rt.Mem2Str(buf), err - } - return rt.StrFrom(self.p, self.v), nil + if !self.IsRaw() { + buf, err := self.MarshalJSON() + return rt.Mem2Str(buf), err + } + return rt.StrFrom(self.p, self.v), nil } func (self *Node) checkRaw() error { - if err := self.Check(); err != nil { - return err - } - if self.IsRaw() { - self.parseRaw(false) - } - return nil + if err := self.Check(); err != nil { + return err + } + if self.IsRaw() { + self.parseRaw(false) + } + return nil } -// Bool returns bool value represented by this node, -// including types.V_TRUE|V_FALSE|V_NUMBER|V_STRING|V_ANY|V_NULL, +// Bool returns bool value represented by this node, +// including types.V_TRUE|V_FALSE|V_NUMBER|V_STRING|V_ANY|V_NULL, // V_NONE will return error func (self *Node) Bool() (bool, error) { - if err := self.checkRaw(); err != nil { - return false, err - } - switch self.t { - case types.V_TRUE : return true , nil - case types.V_FALSE : return false, nil - case types.V_NULL : return false, nil - case _V_NUMBER : - if i, err := numberToInt64(self); err == nil { - return i != 0, nil - } else if f, err := numberToFloat64(self); err == nil { - return f != 0, nil - } else { - return false, err - } - case types.V_STRING: return strconv.ParseBool(rt.StrFrom(self.p, self.v)) - case _V_ANY : - any := self.packAny() - switch v := any.(type) { - case bool : return v, nil - case int : return v != 0, nil - case int8 : return v != 0, nil - case int16 : return v != 0, nil - case int32 : return v != 0, nil - case int64 : return v != 0, nil - case uint : return v != 0, nil - case uint8 : return v != 0, nil - case uint16 : return v != 0, nil - case uint32 : return v != 0, nil - case uint64 : return v != 0, nil - case float32: return v != 0, nil - case float64: return v != 0, nil - case string : return strconv.ParseBool(v) - case json.Number: - if i, err := v.Int64(); err == nil { - return i != 0, nil - } else if f, err := v.Float64(); err == nil { - return f != 0, nil - } else { - return false, err - } - default: return false, ErrUnsupportType - } - default : return false, ErrUnsupportType - } -} - -// Int64 casts the node to int64 value, + if err := self.checkRaw(); err != nil { + return false, err + } + switch self.t { + case types.V_TRUE: + return true, nil + case types.V_FALSE: + return false, nil + case types.V_NULL: + return false, nil + case _V_NUMBER: + if i, err := numberToInt64(self); err == nil { + return i != 0, nil + } else if f, err := numberToFloat64(self); err == nil { + return f != 0, nil + } else { + return false, err + } + case types.V_STRING: + return strconv.ParseBool(rt.StrFrom(self.p, self.v)) + case _V_ANY: + any := self.packAny() + switch v := any.(type) { + case bool: + return v, nil + case int: + return v != 0, nil + case int8: + return v != 0, nil + case int16: + return v != 0, nil + case int32: + return v != 0, nil + case int64: + return v != 0, nil + case uint: + return v != 0, nil + case uint8: + return v != 0, nil + case uint16: + return v != 0, nil + case uint32: + return v != 0, nil + case uint64: + return v != 0, nil + case float32: + return v != 0, nil + case float64: + return v != 0, nil + case string: + return strconv.ParseBool(v) + case json.Number: + if i, err := v.Int64(); err == nil { + return i != 0, nil + } else if f, err := v.Float64(); err == nil { + return f != 0, nil + } else { + return false, err + } + default: + return false, ErrUnsupportType + } + default: + return false, ErrUnsupportType + } +} + +// Int64 casts the node to int64 value, // including V_NUMBER|V_TRUE|V_FALSE|V_ANY|V_STRING // V_NONE it will return error func (self *Node) Int64() (int64, error) { - if err := self.checkRaw(); err != nil { - return 0, err - } - switch self.t { - case _V_NUMBER, types.V_STRING : - if i, err := numberToInt64(self); err == nil { - return i, nil - } else if f, err := numberToFloat64(self); err == nil { - return int64(f), nil - } else { - return 0, err - } - case types.V_TRUE : return 1, nil - case types.V_FALSE : return 0, nil - case types.V_NULL : return 0, nil - case _V_ANY : - any := self.packAny() - switch v := any.(type) { - case bool : if v { return 1, nil } else { return 0, nil } - case int : return int64(v), nil - case int8 : return int64(v), nil - case int16 : return int64(v), nil - case int32 : return int64(v), nil - case int64 : return int64(v), nil - case uint : return int64(v), nil - case uint8 : return int64(v), nil - case uint16 : return int64(v), nil - case uint32 : return int64(v), nil - case uint64 : return int64(v), nil - case float32: return int64(v), nil - case float64: return int64(v), nil - case string : - if i, err := strconv.ParseInt(v, 10, 64); err == nil { - return i, nil - } else if f, err := strconv.ParseFloat(v, 64); err == nil { - return int64(f), nil - } else { - return 0, err - } - case json.Number: - if i, err := v.Int64(); err == nil { - return i, nil - } else if f, err := v.Float64(); err == nil { - return int64(f), nil - } else { - return 0, err - } - default: return 0, ErrUnsupportType - } - default : return 0, ErrUnsupportType - } + if err := self.checkRaw(); err != nil { + return 0, err + } + switch self.t { + case _V_NUMBER, types.V_STRING: + if i, err := numberToInt64(self); err == nil { + return i, nil + } else if f, err := numberToFloat64(self); err == nil { + return int64(f), nil + } else { + return 0, err + } + case types.V_TRUE: + return 1, nil + case types.V_FALSE: + return 0, nil + case types.V_NULL: + return 0, nil + case _V_ANY: + any := self.packAny() + switch v := any.(type) { + case bool: + if v { + return 1, nil + } else { + return 0, nil + } + case int: + return int64(v), nil + case int8: + return int64(v), nil + case int16: + return int64(v), nil + case int32: + return int64(v), nil + case int64: + return int64(v), nil + case uint: + return int64(v), nil + case uint8: + return int64(v), nil + case uint16: + return int64(v), nil + case uint32: + return int64(v), nil + case uint64: + return int64(v), nil + case float32: + return int64(v), nil + case float64: + return int64(v), nil + case string: + if i, err := strconv.ParseInt(v, 10, 64); err == nil { + return i, nil + } else if f, err := strconv.ParseFloat(v, 64); err == nil { + return int64(f), nil + } else { + return 0, err + } + case json.Number: + if i, err := v.Int64(); err == nil { + return i, nil + } else if f, err := v.Float64(); err == nil { + return int64(f), nil + } else { + return 0, err + } + default: + return 0, ErrUnsupportType + } + default: + return 0, ErrUnsupportType + } } // StrictInt64 exports underlying int64 value, including V_NUMBER, V_ANY func (self *Node) StrictInt64() (int64, error) { - if err := self.checkRaw(); err != nil { - return 0, err - } - switch self.t { - case _V_NUMBER : return numberToInt64(self) - case _V_ANY : - any := self.packAny() - switch v := any.(type) { - case int : return int64(v), nil - case int8 : return int64(v), nil - case int16 : return int64(v), nil - case int32 : return int64(v), nil - case int64 : return int64(v), nil - case uint : return int64(v), nil - case uint8 : return int64(v), nil - case uint16: return int64(v), nil - case uint32: return int64(v), nil - case uint64: return int64(v), nil - case json.Number: - if i, err := v.Int64(); err == nil { - return i, nil - } else { - return 0, err - } - default: return 0, ErrUnsupportType - } - default : return 0, ErrUnsupportType - } + if err := self.checkRaw(); err != nil { + return 0, err + } + switch self.t { + case _V_NUMBER: + return numberToInt64(self) + case _V_ANY: + any := self.packAny() + switch v := any.(type) { + case int: + return int64(v), nil + case int8: + return int64(v), nil + case int16: + return int64(v), nil + case int32: + return int64(v), nil + case int64: + return int64(v), nil + case uint: + return int64(v), nil + case uint8: + return int64(v), nil + case uint16: + return int64(v), nil + case uint32: + return int64(v), nil + case uint64: + return int64(v), nil + case json.Number: + if i, err := v.Int64(); err == nil { + return i, nil + } else { + return 0, err + } + default: + return 0, ErrUnsupportType + } + default: + return 0, ErrUnsupportType + } } func castNumber(v bool) json.Number { - if v { - return json.Number("1") - } else { - return json.Number("0") - } + if v { + return json.Number("1") + } else { + return json.Number("0") + } } -// Number casts node to float64, +// Number casts node to float64, // including V_NUMBER|V_TRUE|V_FALSE|V_ANY|V_STRING|V_NULL, // V_NONE it will return error func (self *Node) Number() (json.Number, error) { - if err := self.checkRaw(); err != nil { - return json.Number(""), err - } - switch self.t { - case _V_NUMBER : return toNumber(self) , nil - case types.V_STRING : - if _, err := numberToInt64(self); err == nil { - return toNumber(self), nil - } else if _, err := numberToFloat64(self); err == nil { - return toNumber(self), nil - } else { - return json.Number(""), err - } - case types.V_TRUE : return json.Number("1"), nil - case types.V_FALSE : return json.Number("0"), nil - case types.V_NULL : return json.Number("0"), nil - case _V_ANY : - any := self.packAny() - switch v := any.(type) { - case bool : return castNumber(v), nil - case int : return castNumber(v != 0), nil - case int8 : return castNumber(v != 0), nil - case int16 : return castNumber(v != 0), nil - case int32 : return castNumber(v != 0), nil - case int64 : return castNumber(v != 0), nil - case uint : return castNumber(v != 0), nil - case uint8 : return castNumber(v != 0), nil - case uint16 : return castNumber(v != 0), nil - case uint32 : return castNumber(v != 0), nil - case uint64 : return castNumber(v != 0), nil - case float32: return castNumber(v != 0), nil - case float64: return castNumber(v != 0), nil - case string : - if _, err := strconv.ParseFloat(v, 64); err == nil { - return json.Number(v), nil - } else { - return json.Number(""), err - } - case json.Number: return v, nil - default: return json.Number(""), ErrUnsupportType - } - default : return json.Number(""), ErrUnsupportType - } + if err := self.checkRaw(); err != nil { + return json.Number(""), err + } + switch self.t { + case _V_NUMBER: + return toNumber(self), nil + case types.V_STRING: + if _, err := numberToInt64(self); err == nil { + return toNumber(self), nil + } else if _, err := numberToFloat64(self); err == nil { + return toNumber(self), nil + } else { + return json.Number(""), err + } + case types.V_TRUE: + return json.Number("1"), nil + case types.V_FALSE: + return json.Number("0"), nil + case types.V_NULL: + return json.Number("0"), nil + case _V_ANY: + any := self.packAny() + switch v := any.(type) { + case bool: + return castNumber(v), nil + case int: + return castNumber(v != 0), nil + case int8: + return castNumber(v != 0), nil + case int16: + return castNumber(v != 0), nil + case int32: + return castNumber(v != 0), nil + case int64: + return castNumber(v != 0), nil + case uint: + return castNumber(v != 0), nil + case uint8: + return castNumber(v != 0), nil + case uint16: + return castNumber(v != 0), nil + case uint32: + return castNumber(v != 0), nil + case uint64: + return castNumber(v != 0), nil + case float32: + return castNumber(v != 0), nil + case float64: + return castNumber(v != 0), nil + case string: + if _, err := strconv.ParseFloat(v, 64); err == nil { + return json.Number(v), nil + } else { + return json.Number(""), err + } + case json.Number: + return v, nil + default: + return json.Number(""), ErrUnsupportType + } + default: + return json.Number(""), ErrUnsupportType + } } // Number exports underlying float64 value, including V_NUMBER, V_ANY of json.Number func (self *Node) StrictNumber() (json.Number, error) { - if err := self.checkRaw(); err != nil { - return json.Number(""), err - } - switch self.t { - case _V_NUMBER : return toNumber(self) , nil - case _V_ANY : - if v, ok := self.packAny().(json.Number); ok { - return v, nil - } else { - return json.Number(""), ErrUnsupportType - } - default : return json.Number(""), ErrUnsupportType - } -} - -// String cast node to string, + if err := self.checkRaw(); err != nil { + return json.Number(""), err + } + switch self.t { + case _V_NUMBER: + return toNumber(self), nil + case _V_ANY: + if v, ok := self.packAny().(json.Number); ok { + return v, nil + } else { + return json.Number(""), ErrUnsupportType + } + default: + return json.Number(""), ErrUnsupportType + } +} + +// String cast node to string, // including V_NUMBER|V_TRUE|V_FALSE|V_ANY|V_STRING|V_NULL, // V_NONE it will return error func (self *Node) String() (string, error) { - if err := self.checkRaw(); err != nil { - return "", err - } - switch self.t { - case types.V_NULL : return "" , nil - case types.V_TRUE : return "true" , nil - case types.V_FALSE : return "false", nil - case types.V_STRING, _V_NUMBER : return rt.StrFrom(self.p, self.v), nil - case _V_ANY : - any := self.packAny() - switch v := any.(type) { - case bool : return strconv.FormatBool(v), nil - case int : return strconv.Itoa(v), nil - case int8 : return strconv.Itoa(int(v)), nil - case int16 : return strconv.Itoa(int(v)), nil - case int32 : return strconv.Itoa(int(v)), nil - case int64 : return strconv.Itoa(int(v)), nil - case uint : return strconv.Itoa(int(v)), nil - case uint8 : return strconv.Itoa(int(v)), nil - case uint16 : return strconv.Itoa(int(v)), nil - case uint32 : return strconv.Itoa(int(v)), nil - case uint64 : return strconv.Itoa(int(v)), nil - case float32: return strconv.FormatFloat(float64(v), 'g', -1, 64), nil - case float64: return strconv.FormatFloat(float64(v), 'g', -1, 64), nil - case string : return v, nil - case json.Number: return v.String(), nil - default: return "", ErrUnsupportType - } - default : return "" , ErrUnsupportType - } + if err := self.checkRaw(); err != nil { + return "", err + } + switch self.t { + case types.V_NULL: + return "", nil + case types.V_TRUE: + return "true", nil + case types.V_FALSE: + return "false", nil + case types.V_STRING, _V_NUMBER: + return rt.StrFrom(self.p, self.v), nil + case _V_ANY: + any := self.packAny() + switch v := any.(type) { + case bool: + return strconv.FormatBool(v), nil + case int: + return strconv.Itoa(v), nil + case int8: + return strconv.Itoa(int(v)), nil + case int16: + return strconv.Itoa(int(v)), nil + case int32: + return strconv.Itoa(int(v)), nil + case int64: + return strconv.Itoa(int(v)), nil + case uint: + return strconv.Itoa(int(v)), nil + case uint8: + return strconv.Itoa(int(v)), nil + case uint16: + return strconv.Itoa(int(v)), nil + case uint32: + return strconv.Itoa(int(v)), nil + case uint64: + return strconv.Itoa(int(v)), nil + case float32: + return strconv.FormatFloat(float64(v), 'g', -1, 64), nil + case float64: + return strconv.FormatFloat(float64(v), 'g', -1, 64), nil + case string: + return v, nil + case json.Number: + return v.String(), nil + default: + return "", ErrUnsupportType + } + default: + return "", ErrUnsupportType + } } // StrictString returns string value (unescaped), includeing V_STRING, V_ANY of string. // In other cases, it will return empty string. func (self *Node) StrictString() (string, error) { - if err := self.checkRaw(); err != nil { - return "", err - } - switch self.t { - case types.V_STRING : return rt.StrFrom(self.p, self.v), nil - case _V_ANY : - if v, ok := self.packAny().(string); ok { - return v, nil - } else { - return "", ErrUnsupportType - } - default : return "", ErrUnsupportType - } -} - -// Float64 cast node to float64, + if err := self.checkRaw(); err != nil { + return "", err + } + switch self.t { + case types.V_STRING: + return rt.StrFrom(self.p, self.v), nil + case _V_ANY: + if v, ok := self.packAny().(string); ok { + return v, nil + } else { + return "", ErrUnsupportType + } + default: + return "", ErrUnsupportType + } +} + +// Float64 cast node to float64, // including V_NUMBER|V_TRUE|V_FALSE|V_ANY|V_STRING|V_NULL, // V_NONE it will return error func (self *Node) Float64() (float64, error) { - if err := self.checkRaw(); err != nil { - return 0.0, err - } - switch self.t { - case _V_NUMBER, types.V_STRING : return numberToFloat64(self) - case types.V_TRUE : return 1.0, nil - case types.V_FALSE : return 0.0, nil - case types.V_NULL : return 0.0, nil - case _V_ANY : - any := self.packAny() - switch v := any.(type) { - case bool : - if v { - return 1.0, nil - } else { - return 0.0, nil - } - case int : return float64(v), nil - case int8 : return float64(v), nil - case int16 : return float64(v), nil - case int32 : return float64(v), nil - case int64 : return float64(v), nil - case uint : return float64(v), nil - case uint8 : return float64(v), nil - case uint16 : return float64(v), nil - case uint32 : return float64(v), nil - case uint64 : return float64(v), nil - case float32: return float64(v), nil - case float64: return float64(v), nil - case string : - if f, err := strconv.ParseFloat(v, 64); err == nil { - return float64(f), nil - } else { - return 0, err - } - case json.Number: - if f, err := v.Float64(); err == nil { - return float64(f), nil - } else { - return 0, err - } - default : return 0, ErrUnsupportType - } - default : return 0.0, ErrUnsupportType - } -} - -// Float64 exports underlying float64 value, includeing V_NUMBER, V_ANY + if err := self.checkRaw(); err != nil { + return 0.0, err + } + switch self.t { + case _V_NUMBER, types.V_STRING: + return numberToFloat64(self) + case types.V_TRUE: + return 1.0, nil + case types.V_FALSE: + return 0.0, nil + case types.V_NULL: + return 0.0, nil + case _V_ANY: + any := self.packAny() + switch v := any.(type) { + case bool: + if v { + return 1.0, nil + } else { + return 0.0, nil + } + case int: + return float64(v), nil + case int8: + return float64(v), nil + case int16: + return float64(v), nil + case int32: + return float64(v), nil + case int64: + return float64(v), nil + case uint: + return float64(v), nil + case uint8: + return float64(v), nil + case uint16: + return float64(v), nil + case uint32: + return float64(v), nil + case uint64: + return float64(v), nil + case float32: + return float64(v), nil + case float64: + return float64(v), nil + case string: + if f, err := strconv.ParseFloat(v, 64); err == nil { + return float64(f), nil + } else { + return 0, err + } + case json.Number: + if f, err := v.Float64(); err == nil { + return float64(f), nil + } else { + return 0, err + } + default: + return 0, ErrUnsupportType + } + default: + return 0.0, ErrUnsupportType + } +} + +// Float64 exports underlying float64 value, includeing V_NUMBER, V_ANY func (self *Node) StrictFloat64() (float64, error) { - if err := self.checkRaw(); err != nil { - return 0.0, err - } - switch self.t { - case _V_NUMBER : return numberToFloat64(self) - case _V_ANY : - any := self.packAny() - switch v := any.(type) { - case float32 : return float64(v), nil - case float64 : return float64(v), nil - default : return 0, ErrUnsupportType - } - default : return 0.0, ErrUnsupportType - } + if err := self.checkRaw(); err != nil { + return 0.0, err + } + switch self.t { + case _V_NUMBER: + return numberToFloat64(self) + case _V_ANY: + any := self.packAny() + switch v := any.(type) { + case float32: + return float64(v), nil + case float64: + return float64(v), nil + default: + return 0, ErrUnsupportType + } + default: + return 0.0, ErrUnsupportType + } } /** Sequencial Value Methods **/ @@ -511,183 +635,183 @@ func (self *Node) StrictFloat64() (float64, error) { // Len returns children count of a array|object|string node // For partially loaded node, it also works but only counts the parsed children func (self *Node) Len() (int, error) { - if err := self.checkRaw(); err != nil { - return 0, err - } - if self.t == types.V_ARRAY || self.t == types.V_OBJECT || self.t == _V_ARRAY_LAZY || self.t == _V_OBJECT_LAZY { - return int(self.v & _LEN_MASK), nil - } else if self.t == types.V_STRING { - return int(self.v), nil - } else if self.t == _V_NONE || self.t == types.V_NULL { - return 0, nil - } else { - return 0, ErrUnsupportType - } + if err := self.checkRaw(); err != nil { + return 0, err + } + if self.t == types.V_ARRAY || self.t == types.V_OBJECT || self.t == _V_ARRAY_LAZY || self.t == _V_OBJECT_LAZY { + return int(self.v & _LEN_MASK), nil + } else if self.t == types.V_STRING { + return int(self.v), nil + } else if self.t == _V_NONE || self.t == types.V_NULL { + return 0, nil + } else { + return 0, ErrUnsupportType + } } func (self Node) len() int { - return int(self.v & _LEN_MASK) + return int(self.v & _LEN_MASK) } // Cap returns malloc capacity of a array|object node for children func (self *Node) Cap() (int, error) { - if err := self.checkRaw(); err != nil { - return 0, err - } - if self.t == types.V_ARRAY || self.t == types.V_OBJECT || self.t == _V_ARRAY_LAZY || self.t == _V_OBJECT_LAZY { - return int(self.v >> _CAP_BITS), nil - } else if self.t == _V_NONE || self.t == types.V_NULL { - return 0, nil - } else { - return 0, ErrUnsupportType - } + if err := self.checkRaw(); err != nil { + return 0, err + } + if self.t == types.V_ARRAY || self.t == types.V_OBJECT || self.t == _V_ARRAY_LAZY || self.t == _V_OBJECT_LAZY { + return int(self.v >> _CAP_BITS), nil + } else if self.t == _V_NONE || self.t == types.V_NULL { + return 0, nil + } else { + return 0, ErrUnsupportType + } } func (self Node) cap() int { - return int(self.v >> _CAP_BITS) + return int(self.v >> _CAP_BITS) } // Set sets the node of given key under self, and reports if the key has existed. // // If self is V_NONE or V_NULL, it becomes V_OBJECT and sets the node at the key. func (self *Node) Set(key string, node Node) (bool, error) { - if self != nil && (self.t == _V_NONE || self.t == types.V_NULL) { - *self = NewObject([]Pair{{key, node}}) - return false, nil - } - - if err := node.Check(); err != nil { - return false, err - } - - p := self.Get(key) - if !p.Exists() { - l := self.len() - c := self.cap() - if l == c { - // TODO: maybe change append size in future - c += _DEFAULT_NODE_CAP - mem := unsafe_NewArray(_PAIR_TYPE, c) - memmove(mem, self.p, _PAIR_SIZE * uintptr(l)) - self.p = mem - } - v := self.pairAt(l) - v.Key = key - v.Value = node - self.setCapAndLen(c, l+1) - return false, nil - - } else if err := p.Check(); err != nil { - return false, err - } - - *p = node - return true, nil + if self != nil && (self.t == _V_NONE || self.t == types.V_NULL) { + *self = NewObject([]Pair{{key, node}}) + return false, nil + } + + if err := node.Check(); err != nil { + return false, err + } + + p := self.Get(key) + if !p.Exists() { + l := self.len() + c := self.cap() + if l == c { + // TODO: maybe change append size in future + c += _DEFAULT_NODE_CAP + mem := unsafe_NewArray(_PAIR_TYPE, c) + memmove(mem, self.p, _PAIR_SIZE*uintptr(l)) + self.p = mem + } + v := self.pairAt(l) + v.Key = key + v.Value = node + self.setCapAndLen(c, l+1) + return false, nil + + } else if err := p.Check(); err != nil { + return false, err + } + + *p = node + return true, nil } // SetAny wraps val with V_ANY node, and Set() the node. func (self *Node) SetAny(key string, val interface{}) (bool, error) { - return self.Set(key, NewAny(val)) + return self.Set(key, NewAny(val)) } // Unset remove the node of given key under object parent, and reports if the key has existed. func (self *Node) Unset(key string) (bool, error) { - self.must(types.V_OBJECT, "an object") - p, i := self.skipKey(key) - if !p.Exists() { - return false, nil - } else if err := p.Check(); err != nil { - return false, err - } - - self.removePair(i) - return true, nil + self.must(types.V_OBJECT, "an object") + p, i := self.skipKey(key) + if !p.Exists() { + return false, nil + } else if err := p.Check(); err != nil { + return false, err + } + + self.removePair(i) + return true, nil } // SetByIndex sets the node of given index, and reports if the key has existed. // // The index must be within self's children. func (self *Node) SetByIndex(index int, node Node) (bool, error) { - if err := node.Check(); err != nil { - return false, err - } + if err := node.Check(); err != nil { + return false, err + } - p := self.Index(index) - if !p.Exists() { - return false, ErrNotExist - } else if err := p.Check(); err != nil { - return false, err - } + p := self.Index(index) + if !p.Exists() { + return false, ErrNotExist + } else if err := p.Check(); err != nil { + return false, err + } - *p = node - return true, nil + *p = node + return true, nil } // SetAny wraps val with V_ANY node, and SetByIndex() the node. func (self *Node) SetAnyByIndex(index int, val interface{}) (bool, error) { - return self.SetByIndex(index, NewAny(val)) + return self.SetByIndex(index, NewAny(val)) } // UnsetByIndex remove the node of given index func (self *Node) UnsetByIndex(index int) (bool, error) { - var p *Node - it := self.itype() - if it == types.V_ARRAY { - p = self.Index(index) - }else if it == types.V_OBJECT { - pr := self.skipIndexPair(index) - if pr == nil { - return false, ErrNotExist - } - p = &pr.Value - } else { - return false, ErrUnsupportType - } - - if !p.Exists() { - return false, ErrNotExist - } - - if it == types.V_ARRAY { - self.removeNode(index) - }else if it == types.V_OBJECT { - self.removePair(index) - } - return true, nil + var p *Node + it := self.itype() + if it == types.V_ARRAY { + p = self.Index(index) + } else if it == types.V_OBJECT { + pr := self.skipIndexPair(index) + if pr == nil { + return false, ErrNotExist + } + p = &pr.Value + } else { + return false, ErrUnsupportType + } + + if !p.Exists() { + return false, ErrNotExist + } + + if it == types.V_ARRAY { + self.removeNode(index) + } else if it == types.V_OBJECT { + self.removePair(index) + } + return true, nil } // Add appends the given node under self. // // If self is V_NONE or V_NULL, it becomes V_ARRAY and sets the node at index 0. func (self *Node) Add(node Node) error { - if self != nil && (self.t == _V_NONE || self.t == types.V_NULL) { - *self = NewArray([]Node{node}) - return nil - } + if self != nil && (self.t == _V_NONE || self.t == types.V_NULL) { + *self = NewArray([]Node{node}) + return nil + } - if err := self.should(types.V_ARRAY, "an array"); err != nil { - return err - } - if err := self.skipAllIndex(); err != nil { - return err - } + if err := self.should(types.V_ARRAY, "an array"); err != nil { + return err + } + if err := self.skipAllIndex(); err != nil { + return err + } - var p rt.GoSlice - p.Cap = self.cap() - p.Len = self.len() - p.Ptr = self.p + var p rt.GoSlice + p.Cap = self.cap() + p.Len = self.len() + p.Ptr = self.p - s := *(*[]Node)(unsafe.Pointer(&p)) - s = append(s, node) + s := *(*[]Node)(unsafe.Pointer(&p)) + s = append(s, node) - self.p = unsafe.Pointer(&s[0]) - self.setCapAndLen(cap(s), len(s)) - return nil + self.p = unsafe.Pointer(&s[0]) + self.setCapAndLen(cap(s), len(s)) + return nil } // SetAny wraps val with V_ANY node, and Add() the node. func (self *Node) AddAny(val interface{}) error { - return self.Add(NewAny(val)) + return self.Add(NewAny(val)) } // GetByPath load given path on demands, @@ -696,1113 +820,1137 @@ func (self *Node) AddAny(val interface{}) error { // Note, the api expects the json is well-formed at least, // otherwise it may return unexpected result. func (self *Node) GetByPath(path ...interface{}) *Node { - if !self.Valid() { - return self - } - var s = self - for _, p := range path { - switch p := p.(type) { - case int: - s = s.Index(p) - if !s.Valid() { - return s - } - case string: - s = s.Get(p) - if !s.Valid() { - return s - } - default: - panic("path must be either int or string") - } - } - return s + if !self.Valid() { + return self + } + var s = self + for _, p := range path { + switch p := p.(type) { + case int: + s = s.Index(p) + if !s.Valid() { + return s + } + case string: + s = s.Get(p) + if !s.Valid() { + return s + } + default: + panic("path must be either int or string") + } + } + return s } // Get loads given key of an object node on demands func (self *Node) Get(key string) *Node { - if err := self.should(types.V_OBJECT, "an object"); err != nil { - return unwrapError(err) - } - n, _ := self.skipKey(key) - return n + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return unwrapError(err) + } + n, _ := self.skipKey(key) + return n } // Index indexies node at given idx, // node type CAN be either V_OBJECT or V_ARRAY func (self *Node) Index(idx int) *Node { - if err := self.checkRaw(); err != nil { - return unwrapError(err) - } + if err := self.checkRaw(); err != nil { + return unwrapError(err) + } - it := self.itype() - if it == types.V_ARRAY { - return self.skipIndex(idx) + it := self.itype() + if it == types.V_ARRAY { + return self.skipIndex(idx) - }else if it == types.V_OBJECT { - pr := self.skipIndexPair(idx) - if pr == nil { - return newError(_ERR_NOT_FOUND, "value not exists") - } - return &pr.Value + } else if it == types.V_OBJECT { + pr := self.skipIndexPair(idx) + if pr == nil { + return newError(_ERR_NOT_FOUND, "value not exists") + } + return &pr.Value - } else { - return newError(_ERR_UNSUPPORT_TYPE, fmt.Sprintf("unsupported type: %v", self.itype())) - } + } else { + return newError(_ERR_UNSUPPORT_TYPE, fmt.Sprintf("unsupported type: %v", self.itype())) + } } // IndexPair indexies pair at given idx, // node type MUST be either V_OBJECT func (self *Node) IndexPair(idx int) *Pair { - if err := self.should(types.V_OBJECT, "an object"); err != nil { - return nil - } - return self.skipIndexPair(idx) + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return nil + } + return self.skipIndexPair(idx) } // IndexOrGet firstly use idx to index a value and check if its key matches // If not, then use the key to search value func (self *Node) IndexOrGet(idx int, key string) *Node { - if err := self.should(types.V_OBJECT, "an object"); err != nil { - return unwrapError(err) - } + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return unwrapError(err) + } - pr := self.skipIndexPair(idx) - if pr != nil && pr.Key == key { - return &pr.Value - } - n, _ := self.skipKey(key) - return n + pr := self.skipIndexPair(idx) + if pr != nil && pr.Key == key { + return &pr.Value + } + n, _ := self.skipKey(key) + return n } /** Generic Value Converters **/ // Map loads all keys of an object node func (self *Node) Map() (map[string]interface{}, error) { - if self.isAny() { - any := self.packAny() - if v, ok := any.(map[string]interface{}); ok { - return v, nil - } else { - return nil, ErrUnsupportType - } - } - if err := self.should(types.V_OBJECT, "an object"); err != nil { - return nil, err - } - if err := self.loadAllKey(); err != nil { - return nil, err - } - return self.toGenericObject() + if self.isAny() { + any := self.packAny() + if v, ok := any.(map[string]interface{}); ok { + return v, nil + } else { + return nil, ErrUnsupportType + } + } + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return nil, err + } + if err := self.loadAllKey(); err != nil { + return nil, err + } + return self.toGenericObject() } // MapUseNumber loads all keys of an object node, with numeric nodes casted to json.Number func (self *Node) MapUseNumber() (map[string]interface{}, error) { - if self.isAny() { - any := self.packAny() - if v, ok := any.(map[string]interface{}); ok { - return v, nil - } else { - return nil, ErrUnsupportType - } - } - if err := self.should(types.V_OBJECT, "an object"); err != nil { - return nil, err - } - if err := self.loadAllKey(); err != nil { - return nil, err - } - return self.toGenericObjectUseNumber() -} - -// MapUseNode scans both parsed and non-parsed chidren nodes, + if self.isAny() { + any := self.packAny() + if v, ok := any.(map[string]interface{}); ok { + return v, nil + } else { + return nil, ErrUnsupportType + } + } + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return nil, err + } + if err := self.loadAllKey(); err != nil { + return nil, err + } + return self.toGenericObjectUseNumber() +} + +// MapUseNode scans both parsed and non-parsed chidren nodes, // and map them by their keys func (self *Node) MapUseNode() (map[string]Node, error) { - if self.isAny() { - any := self.packAny() - if v, ok := any.(map[string]Node); ok { - return v, nil - } else { - return nil, ErrUnsupportType - } - } - if err := self.should(types.V_OBJECT, "an object"); err != nil { - return nil, err - } - if err := self.skipAllKey(); err != nil { - return nil, err - } - return self.toGenericObjectUseNode() + if self.isAny() { + any := self.packAny() + if v, ok := any.(map[string]Node); ok { + return v, nil + } else { + return nil, ErrUnsupportType + } + } + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return nil, err + } + if err := self.skipAllKey(); err != nil { + return nil, err + } + return self.toGenericObjectUseNode() } // MapUnsafe exports the underlying pointer to its children map // WARN: don't use it unless you know what you are doing func (self *Node) UnsafeMap() ([]Pair, error) { - if err := self.should(types.V_OBJECT, "an object"); err != nil { - return nil, err - } - if err := self.skipAllKey(); err != nil { - return nil, err - } - s := rt.Ptr2SlicePtr(self.p, int(self.len()), self.cap()) - return *(*[]Pair)(s), nil + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return nil, err + } + if err := self.skipAllKey(); err != nil { + return nil, err + } + s := rt.Ptr2SlicePtr(self.p, int(self.len()), self.cap()) + return *(*[]Pair)(s), nil } // SortKeys sorts children of a V_OBJECT node in ascending key-order. // If recurse is true, it recursively sorts children's children as long as a V_OBJECT node is found. func (self *Node) SortKeys(recurse bool) (err error) { - ps, err := self.UnsafeMap() - if err != nil { - return err - } - PairSlice(ps).Sort() - if recurse { - var sc Scanner - sc = func(path Sequence, node *Node) bool { - if node.itype() == types.V_OBJECT { - if err := node.SortKeys(recurse); err != nil { - return false - } - } - if node.itype() == types.V_ARRAY { - if err := node.ForEach(sc); err != nil { - return false - } - } - return true - } - self.ForEach(sc) - } - return nil + ps, err := self.UnsafeMap() + if err != nil { + return err + } + PairSlice(ps).Sort() + if recurse { + var sc Scanner + sc = func(path Sequence, node *Node) bool { + if node.itype() == types.V_OBJECT { + if err := node.SortKeys(recurse); err != nil { + return false + } + } + if node.itype() == types.V_ARRAY { + if err := node.ForEach(sc); err != nil { + return false + } + } + return true + } + self.ForEach(sc) + } + return nil } // Array loads all indexes of an array node func (self *Node) Array() ([]interface{}, error) { - if self.isAny() { - any := self.packAny() - if v, ok := any.([]interface{}); ok { - return v, nil - } else { - return nil, ErrUnsupportType - } - } - if err := self.should(types.V_ARRAY, "an array"); err != nil { - return nil, err - } - if err := self.loadAllIndex(); err != nil { - return nil, err - } - return self.toGenericArray() + if self.isAny() { + any := self.packAny() + if v, ok := any.([]interface{}); ok { + return v, nil + } else { + return nil, ErrUnsupportType + } + } + if err := self.should(types.V_ARRAY, "an array"); err != nil { + return nil, err + } + if err := self.loadAllIndex(); err != nil { + return nil, err + } + return self.toGenericArray() } // ArrayUseNumber loads all indexes of an array node, with numeric nodes casted to json.Number func (self *Node) ArrayUseNumber() ([]interface{}, error) { - if self.isAny() { - any := self.packAny() - if v, ok := any.([]interface{}); ok { - return v, nil - } else { - return nil, ErrUnsupportType - } - } - if err := self.should(types.V_ARRAY, "an array"); err != nil { - return nil, err - } - if err := self.loadAllIndex(); err != nil { - return nil, err - } - return self.toGenericArrayUseNumber() -} - -// ArrayUseNode copys both parsed and non-parsed chidren nodes, + if self.isAny() { + any := self.packAny() + if v, ok := any.([]interface{}); ok { + return v, nil + } else { + return nil, ErrUnsupportType + } + } + if err := self.should(types.V_ARRAY, "an array"); err != nil { + return nil, err + } + if err := self.loadAllIndex(); err != nil { + return nil, err + } + return self.toGenericArrayUseNumber() +} + +// ArrayUseNode copys both parsed and non-parsed chidren nodes, // and indexes them by original order func (self *Node) ArrayUseNode() ([]Node, error) { - if self.isAny() { - any := self.packAny() - if v, ok := any.([]Node); ok { - return v, nil - } else { - return nil, ErrUnsupportType - } - } - if err := self.should(types.V_ARRAY, "an array"); err != nil { - return nil, err - } - if err := self.skipAllIndex(); err != nil { - return nil, err - } - return self.toGenericArrayUseNode() + if self.isAny() { + any := self.packAny() + if v, ok := any.([]Node); ok { + return v, nil + } else { + return nil, ErrUnsupportType + } + } + if err := self.should(types.V_ARRAY, "an array"); err != nil { + return nil, err + } + if err := self.skipAllIndex(); err != nil { + return nil, err + } + return self.toGenericArrayUseNode() } // ArrayUnsafe exports the underlying pointer to its children array // WARN: don't use it unless you know what you are doing func (self *Node) UnsafeArray() ([]Node, error) { - if err := self.should(types.V_ARRAY, "an array"); err != nil { - return nil, err - } - if err := self.skipAllIndex(); err != nil { - return nil, err - } - s := rt.Ptr2SlicePtr(self.p, self.len(), self.cap()) - return *(*[]Node)(s), nil + if err := self.should(types.V_ARRAY, "an array"); err != nil { + return nil, err + } + if err := self.skipAllIndex(); err != nil { + return nil, err + } + s := rt.Ptr2SlicePtr(self.p, self.len(), self.cap()) + return *(*[]Node)(s), nil } // Interface loads all children under all pathes from this node, // and converts itself as generic type. // WARN: all numberic nodes are casted to float64 func (self *Node) Interface() (interface{}, error) { - if err := self.checkRaw(); err != nil { - return nil, err - } - switch self.t { - case V_ERROR : return nil, self.Check() - case types.V_NULL : return nil, nil - case types.V_TRUE : return true, nil - case types.V_FALSE : return false, nil - case types.V_ARRAY : return self.toGenericArray() - case types.V_OBJECT : return self.toGenericObject() - case types.V_STRING : return rt.StrFrom(self.p, self.v), nil - case _V_NUMBER : - v, err := numberToFloat64(self) - if err != nil { - return nil, err - } - return v, nil - case _V_ARRAY_LAZY : - if err := self.loadAllIndex(); err != nil { - return nil, err - } - return self.toGenericArray() - case _V_OBJECT_LAZY : - if err := self.loadAllKey(); err != nil { - return nil, err - } - return self.toGenericObject() - case _V_ANY: - switch v := self.packAny().(type) { - case Node : return v.Interface() - case *Node: return v.Interface() - default : return v, nil - } - default : return nil, ErrUnsupportType - } + if err := self.checkRaw(); err != nil { + return nil, err + } + switch self.t { + case V_ERROR: + return nil, self.Check() + case types.V_NULL: + return nil, nil + case types.V_TRUE: + return true, nil + case types.V_FALSE: + return false, nil + case types.V_ARRAY: + return self.toGenericArray() + case types.V_OBJECT: + return self.toGenericObject() + case types.V_STRING: + return rt.StrFrom(self.p, self.v), nil + case _V_NUMBER: + v, err := numberToFloat64(self) + if err != nil { + return nil, err + } + return v, nil + case _V_ARRAY_LAZY: + if err := self.loadAllIndex(); err != nil { + return nil, err + } + return self.toGenericArray() + case _V_OBJECT_LAZY: + if err := self.loadAllKey(); err != nil { + return nil, err + } + return self.toGenericObject() + case _V_ANY: + switch v := self.packAny().(type) { + case Node: + return v.Interface() + case *Node: + return v.Interface() + default: + return v, nil + } + default: + return nil, ErrUnsupportType + } } func (self *Node) packAny() interface{} { - return *(*interface{})(self.p) + return *(*interface{})(self.p) } // InterfaceUseNumber works same with Interface() // except numberic nodes are casted to json.Number func (self *Node) InterfaceUseNumber() (interface{}, error) { - if err := self.checkRaw(); err != nil { - return nil, err - } - switch self.t { - case V_ERROR : return nil, self.Check() - case types.V_NULL : return nil, nil - case types.V_TRUE : return true, nil - case types.V_FALSE : return false, nil - case types.V_ARRAY : return self.toGenericArrayUseNumber() - case types.V_OBJECT : return self.toGenericObjectUseNumber() - case types.V_STRING : return rt.StrFrom(self.p, self.v), nil - case _V_NUMBER : return toNumber(self), nil - case _V_ARRAY_LAZY : - if err := self.loadAllIndex(); err != nil { - return nil, err - } - return self.toGenericArrayUseNumber() - case _V_OBJECT_LAZY : - if err := self.loadAllKey(); err != nil { - return nil, err - } - return self.toGenericObjectUseNumber() - case _V_ANY : return self.packAny(), nil - default : return nil, ErrUnsupportType - } -} - -// InterfaceUseNode clone itself as a new node, + if err := self.checkRaw(); err != nil { + return nil, err + } + switch self.t { + case V_ERROR: + return nil, self.Check() + case types.V_NULL: + return nil, nil + case types.V_TRUE: + return true, nil + case types.V_FALSE: + return false, nil + case types.V_ARRAY: + return self.toGenericArrayUseNumber() + case types.V_OBJECT: + return self.toGenericObjectUseNumber() + case types.V_STRING: + return rt.StrFrom(self.p, self.v), nil + case _V_NUMBER: + return toNumber(self), nil + case _V_ARRAY_LAZY: + if err := self.loadAllIndex(); err != nil { + return nil, err + } + return self.toGenericArrayUseNumber() + case _V_OBJECT_LAZY: + if err := self.loadAllKey(); err != nil { + return nil, err + } + return self.toGenericObjectUseNumber() + case _V_ANY: + return self.packAny(), nil + default: + return nil, ErrUnsupportType + } +} + +// InterfaceUseNode clone itself as a new node, // or its children as map[string]Node (or []Node) func (self *Node) InterfaceUseNode() (interface{}, error) { - if err := self.checkRaw(); err != nil { - return nil, err - } - switch self.t { - case types.V_ARRAY : return self.toGenericArrayUseNode() - case types.V_OBJECT : return self.toGenericObjectUseNode() - case _V_ARRAY_LAZY : - if err := self.skipAllIndex(); err != nil { - return nil, err - } - return self.toGenericArrayUseNode() - case _V_OBJECT_LAZY : - if err := self.skipAllKey(); err != nil { - return nil, err - } - return self.toGenericObjectUseNode() - default : return *self, self.Check() - } + if err := self.checkRaw(); err != nil { + return nil, err + } + switch self.t { + case types.V_ARRAY: + return self.toGenericArrayUseNode() + case types.V_OBJECT: + return self.toGenericObjectUseNode() + case _V_ARRAY_LAZY: + if err := self.skipAllIndex(); err != nil { + return nil, err + } + return self.toGenericArrayUseNode() + case _V_OBJECT_LAZY: + if err := self.skipAllKey(); err != nil { + return nil, err + } + return self.toGenericObjectUseNode() + default: + return *self, self.Check() + } } // LoadAll loads all the node's children and children's children as parsed. // After calling it, the node can be safely used on concurrency func (self *Node) LoadAll() error { - if self.IsRaw() { - self.parseRaw(true) - return self.Check() - } - - switch self.itype() { - case types.V_ARRAY: - e := self.len() - if err := self.loadAllIndex(); err != nil { - return err - } - for i := 0; i < e; i++ { - n := self.nodeAt(i) - if n.IsRaw() { - n.parseRaw(true) - } - if err := n.Check(); err != nil { - return err - } - } - return nil - case types.V_OBJECT: - e := self.len() - if err := self.loadAllKey(); err != nil { - return err - } - for i := 0; i < e; i++ { - n := self.pairAt(i) - if n.Value.IsRaw() { - n.Value.parseRaw(true) - } - if err := n.Value.Check(); err != nil { - return err - } - } - return nil - default: - return self.Check() - } + if self.IsRaw() { + self.parseRaw(true) + return self.Check() + } + + switch self.itype() { + case types.V_ARRAY: + e := self.len() + if err := self.loadAllIndex(); err != nil { + return err + } + for i := 0; i < e; i++ { + n := self.nodeAt(i) + if n.IsRaw() { + n.parseRaw(true) + } + if err := n.Check(); err != nil { + return err + } + } + return nil + case types.V_OBJECT: + e := self.len() + if err := self.loadAllKey(); err != nil { + return err + } + for i := 0; i < e; i++ { + n := self.pairAt(i) + if n.Value.IsRaw() { + n.Value.parseRaw(true) + } + if err := n.Value.Check(); err != nil { + return err + } + } + return nil + default: + return self.Check() + } } // Load loads the node's children as parsed. // After calling it, only the node itself can be used on concurrency (not include its children) func (self *Node) Load() error { - if self.IsRaw() { - self.parseRaw(false) - return self.Load() - } + if self.IsRaw() { + self.parseRaw(false) + return self.Load() + } - switch self.t { - case _V_ARRAY_LAZY: - return self.skipAllIndex() - case _V_OBJECT_LAZY: - return self.skipAllKey() - default: - return self.Check() - } + switch self.t { + case _V_ARRAY_LAZY: + return self.skipAllIndex() + case _V_OBJECT_LAZY: + return self.skipAllKey() + default: + return self.Check() + } } /**---------------------------------- Internal Helper Methods ----------------------------------**/ var ( - _NODE_TYPE = rt.UnpackEface(Node{}).Type - _PAIR_TYPE = rt.UnpackEface(Pair{}).Type + _NODE_TYPE = rt.UnpackEface(Node{}).Type + _PAIR_TYPE = rt.UnpackEface(Pair{}).Type ) func (self *Node) setCapAndLen(cap int, len int) { - if self.t == types.V_ARRAY || self.t == types.V_OBJECT || self.t == _V_ARRAY_LAZY || self.t == _V_OBJECT_LAZY { - self.v = int64(len&_LEN_MASK | cap<<_CAP_BITS) - } else { - panic("value does not have a length") - } + if self.t == types.V_ARRAY || self.t == types.V_OBJECT || self.t == _V_ARRAY_LAZY || self.t == _V_OBJECT_LAZY { + self.v = int64(len&_LEN_MASK | cap<<_CAP_BITS) + } else { + panic("value does not have a length") + } } func (self *Node) unsafe_next() *Node { - return (*Node)(unsafe.Pointer(uintptr(unsafe.Pointer(self)) + _NODE_SIZE)) + return (*Node)(unsafe.Pointer(uintptr(unsafe.Pointer(self)) + _NODE_SIZE)) } func (self *Pair) unsafe_next() *Pair { - return (*Pair)(unsafe.Pointer(uintptr(unsafe.Pointer(self)) + _PAIR_SIZE)) + return (*Pair)(unsafe.Pointer(uintptr(unsafe.Pointer(self)) + _PAIR_SIZE)) } func (self *Node) must(t types.ValueType, s string) { - if err := self.checkRaw(); err != nil { - panic(err) - } - if err := self.Check(); err != nil { - panic(err) - } - if self.itype() != t { - panic("value cannot be represented as " + s) - } + if err := self.checkRaw(); err != nil { + panic(err) + } + if err := self.Check(); err != nil { + panic(err) + } + if self.itype() != t { + panic("value cannot be represented as " + s) + } } func (self *Node) should(t types.ValueType, s string) error { - if err := self.checkRaw(); err != nil { - return err - } - if self.itype() != t { - return ErrUnsupportType - } - return nil + if err := self.checkRaw(); err != nil { + return err + } + if self.itype() != t { + return ErrUnsupportType + } + return nil } func (self *Node) nodeAt(i int) *Node { - var p = self.p - if self.isLazy() { - _, stack := self.getParserAndArrayStack() - p = *(*unsafe.Pointer)(unsafe.Pointer(&stack.v)) - } - return (*Node)(unsafe.Pointer(uintptr(p) + uintptr(i)*_NODE_SIZE)) + var p = self.p + if self.isLazy() { + _, stack := self.getParserAndArrayStack() + p = *(*unsafe.Pointer)(unsafe.Pointer(&stack.v)) + } + return (*Node)(unsafe.Pointer(uintptr(p) + uintptr(i)*_NODE_SIZE)) } func (self *Node) pairAt(i int) *Pair { - var p = self.p - if self.isLazy() { - _, stack := self.getParserAndObjectStack() - p = *(*unsafe.Pointer)(unsafe.Pointer(&stack.v)) - } - return (*Pair)(unsafe.Pointer(uintptr(p) + uintptr(i)*_PAIR_SIZE)) + var p = self.p + if self.isLazy() { + _, stack := self.getParserAndObjectStack() + p = *(*unsafe.Pointer)(unsafe.Pointer(&stack.v)) + } + return (*Pair)(unsafe.Pointer(uintptr(p) + uintptr(i)*_PAIR_SIZE)) } func (self *Node) getParserAndArrayStack() (*Parser, *parseArrayStack) { - stack := (*parseArrayStack)(self.p) - ret := (*rt.GoSlice)(unsafe.Pointer(&stack.v)) - ret.Len = self.len() - ret.Cap = self.cap() - return &stack.parser, stack + stack := (*parseArrayStack)(self.p) + ret := (*rt.GoSlice)(unsafe.Pointer(&stack.v)) + ret.Len = self.len() + ret.Cap = self.cap() + return &stack.parser, stack } func (self *Node) getParserAndObjectStack() (*Parser, *parseObjectStack) { - stack := (*parseObjectStack)(self.p) - ret := (*rt.GoSlice)(unsafe.Pointer(&stack.v)) - ret.Len = self.len() - ret.Cap = self.cap() - return &stack.parser, stack + stack := (*parseObjectStack)(self.p) + ret := (*rt.GoSlice)(unsafe.Pointer(&stack.v)) + ret.Len = self.len() + ret.Cap = self.cap() + return &stack.parser, stack } func (self *Node) skipAllIndex() error { - if !self.isLazy() { - return nil - } - var err types.ParsingError - parser, stack := self.getParserAndArrayStack() - parser.skipValue = true - parser.noLazy = true - *self, err = parser.decodeArray(stack.v) - if err != 0 { - return parser.ExportError(err) - } - return nil + if !self.isLazy() { + return nil + } + var err types.ParsingError + parser, stack := self.getParserAndArrayStack() + parser.skipValue = true + parser.noLazy = true + *self, err = parser.decodeArray(stack.v) + if err != 0 { + return parser.ExportError(err) + } + return nil } func (self *Node) skipAllKey() error { - if !self.isLazy() { - return nil - } - var err types.ParsingError - parser, stack := self.getParserAndObjectStack() - parser.skipValue = true - parser.noLazy = true - *self, err = parser.decodeObject(stack.v) - if err != 0 { - return parser.ExportError(err) - } - return nil + if !self.isLazy() { + return nil + } + var err types.ParsingError + parser, stack := self.getParserAndObjectStack() + parser.skipValue = true + parser.noLazy = true + *self, err = parser.decodeObject(stack.v) + if err != 0 { + return parser.ExportError(err) + } + return nil } func (self *Node) skipKey(key string) (*Node, int) { - nb := self.len() - lazy := self.isLazy() - - if nb > 0 { - /* linear search */ - var p *Pair - if lazy { - s := (*parseObjectStack)(self.p) - p = &s.v[0] - } else { - p = (*Pair)(self.p) - } - - if p.Key == key { - return &p.Value, 0 - } - for i := 1; i < nb; i++ { - p = p.unsafe_next() - if p.Key == key { - return &p.Value, i - } - } - } - - /* not found */ - if !lazy { - return nil, -1 - } - - // lazy load - for last, i := self.skipNextPair(), nb; last != nil; last, i = self.skipNextPair(), i+1 { - if last.Value.Check() != nil { - return &last.Value, -1 - } - if last.Key == key { - return &last.Value, i - } - } - - return nil, -1 + nb := self.len() + lazy := self.isLazy() + + if nb > 0 { + /* linear search */ + var p *Pair + if lazy { + s := (*parseObjectStack)(self.p) + p = &s.v[0] + } else { + p = (*Pair)(self.p) + } + + if p.Key == key { + return &p.Value, 0 + } + for i := 1; i < nb; i++ { + p = p.unsafe_next() + if p.Key == key { + return &p.Value, i + } + } + } + + /* not found */ + if !lazy { + return nil, -1 + } + + // lazy load + for last, i := self.skipNextPair(), nb; last != nil; last, i = self.skipNextPair(), i+1 { + if last.Value.Check() != nil { + return &last.Value, -1 + } + if last.Key == key { + return &last.Value, i + } + } + + return nil, -1 } func (self *Node) skipIndex(index int) *Node { - nb := self.len() - if nb > index { - v := self.nodeAt(index) - return v - } - if !self.isLazy() { - return nil - } - - // lazy load - for last := self.skipNextNode(); last != nil; last = self.skipNextNode(){ - if last.Check() != nil { - return last - } - if self.len() > index { - return last - } - } - - return nil + nb := self.len() + if nb > index { + v := self.nodeAt(index) + return v + } + if !self.isLazy() { + return nil + } + + // lazy load + for last := self.skipNextNode(); last != nil; last = self.skipNextNode() { + if last.Check() != nil { + return last + } + if self.len() > index { + return last + } + } + + return nil } func (self *Node) skipIndexPair(index int) *Pair { - nb := self.len() - if nb > index { - return self.pairAt(index) - } - if !self.isLazy() { - return nil - } - - // lazy load - for last := self.skipNextPair(); last != nil; last = self.skipNextPair(){ - if last.Value.Check() != nil { - return last - } - if self.len() > index { - return last - } - } - - return nil + nb := self.len() + if nb > index { + return self.pairAt(index) + } + if !self.isLazy() { + return nil + } + + // lazy load + for last := self.skipNextPair(); last != nil; last = self.skipNextPair() { + if last.Value.Check() != nil { + return last + } + if self.len() > index { + return last + } + } + + return nil } func (self *Node) loadAllIndex() error { - if !self.isLazy() { - return nil - } - var err types.ParsingError - parser, stack := self.getParserAndArrayStack() - parser.noLazy = true - *self, err = parser.decodeArray(stack.v) - if err != 0 { - return parser.ExportError(err) - } - return nil + if !self.isLazy() { + return nil + } + var err types.ParsingError + parser, stack := self.getParserAndArrayStack() + parser.noLazy = true + *self, err = parser.decodeArray(stack.v) + if err != 0 { + return parser.ExportError(err) + } + return nil } func (self *Node) loadAllKey() error { - if !self.isLazy() { - return nil - } - var err types.ParsingError - parser, stack := self.getParserAndObjectStack() - parser.noLazy = true - *self, err = parser.decodeObject(stack.v) - if err != 0 { - return parser.ExportError(err) - } - return nil + if !self.isLazy() { + return nil + } + var err types.ParsingError + parser, stack := self.getParserAndObjectStack() + parser.noLazy = true + *self, err = parser.decodeObject(stack.v) + if err != 0 { + return parser.ExportError(err) + } + return nil } func (self *Node) removeNode(i int) { - nb := self.len() - 1 - node := self.nodeAt(i) - if i == nb { - self.setCapAndLen(self.cap(), nb) - *node = Node{} - return - } + nb := self.len() - 1 + node := self.nodeAt(i) + if i == nb { + self.setCapAndLen(self.cap(), nb) + *node = Node{} + return + } - from := self.nodeAt(i + 1) - memmove(unsafe.Pointer(node), unsafe.Pointer(from), _NODE_SIZE * uintptr(nb - i)) + from := self.nodeAt(i + 1) + memmove(unsafe.Pointer(node), unsafe.Pointer(from), _NODE_SIZE*uintptr(nb-i)) - last := self.nodeAt(nb) - *last = Node{} - - self.setCapAndLen(self.cap(), nb) + last := self.nodeAt(nb) + *last = Node{} + + self.setCapAndLen(self.cap(), nb) } func (self *Node) removePair(i int) { - nb := self.len() - 1 - node := self.pairAt(i) - if i == nb { - self.setCapAndLen(self.cap(), nb) - *node = Pair{} - return - } + nb := self.len() - 1 + node := self.pairAt(i) + if i == nb { + self.setCapAndLen(self.cap(), nb) + *node = Pair{} + return + } + + from := self.pairAt(i + 1) + memmove(unsafe.Pointer(node), unsafe.Pointer(from), _PAIR_SIZE*uintptr(nb-i)) - from := self.pairAt(i + 1) - memmove(unsafe.Pointer(node), unsafe.Pointer(from), _PAIR_SIZE * uintptr(nb - i)) + last := self.pairAt(nb) + *last = Pair{} - last := self.pairAt(nb) - *last = Pair{} - - self.setCapAndLen(self.cap(), nb) + self.setCapAndLen(self.cap(), nb) } func (self *Node) toGenericArray() ([]interface{}, error) { - nb := self.len() - ret := make([]interface{}, nb) - if nb == 0 { - return ret, nil - } - - /* convert each item */ - var p = (*Node)(self.p) - x, err := p.Interface() - if err != nil { - return nil, err - } - ret[0] = x - - for i := 1; i < nb; i++ { - p = p.unsafe_next() - x, err := p.Interface() - if err != nil { - return nil, err - } - ret[i] = x - } - - /* all done */ - return ret, nil + nb := self.len() + ret := make([]interface{}, nb) + if nb == 0 { + return ret, nil + } + + /* convert each item */ + var p = (*Node)(self.p) + x, err := p.Interface() + if err != nil { + return nil, err + } + ret[0] = x + + for i := 1; i < nb; i++ { + p = p.unsafe_next() + x, err := p.Interface() + if err != nil { + return nil, err + } + ret[i] = x + } + + /* all done */ + return ret, nil } func (self *Node) toGenericArrayUseNumber() ([]interface{}, error) { - nb := self.len() - ret := make([]interface{}, nb) - if nb == 0 { - return ret, nil - } - - /* convert each item */ - var p = (*Node)(self.p) - x, err := p.InterfaceUseNumber() - if err != nil { - return nil, err - } - ret[0] = x - - for i := 1; i < nb; i++ { - p = p.unsafe_next() - x, err := p.InterfaceUseNumber() - if err != nil { - return nil, err - } - ret[i] = x - } - - /* all done */ - return ret, nil + nb := self.len() + ret := make([]interface{}, nb) + if nb == 0 { + return ret, nil + } + + /* convert each item */ + var p = (*Node)(self.p) + x, err := p.InterfaceUseNumber() + if err != nil { + return nil, err + } + ret[0] = x + + for i := 1; i < nb; i++ { + p = p.unsafe_next() + x, err := p.InterfaceUseNumber() + if err != nil { + return nil, err + } + ret[i] = x + } + + /* all done */ + return ret, nil } func (self *Node) toGenericArrayUseNode() ([]Node, error) { - var nb = self.len() - var out = make([]Node, nb) - if nb == 0 { - return out, nil - } + var nb = self.len() + var out = make([]Node, nb) + if nb == 0 { + return out, nil + } - var p = (*Node)(self.p) - out[0] = *p - if err := p.Check(); err != nil { - return nil, err - } + var p = (*Node)(self.p) + out[0] = *p + if err := p.Check(); err != nil { + return nil, err + } - for i := 1; i < nb; i++ { - p = p.unsafe_next() - if err := p.Check(); err != nil { - return nil, err - } - out[i] = *p - } + for i := 1; i < nb; i++ { + p = p.unsafe_next() + if err := p.Check(); err != nil { + return nil, err + } + out[i] = *p + } - return out, nil + return out, nil } func (self *Node) toGenericObject() (map[string]interface{}, error) { - nb := self.len() - ret := make(map[string]interface{}, nb) - if nb == 0 { - return ret, nil - } - - /* convert each item */ - var p = (*Pair)(self.p) - x, err := p.Value.Interface() - if err != nil { - return nil, err - } - ret[p.Key] = x - - for i := 1; i < nb; i++ { - p = p.unsafe_next() - x, err := p.Value.Interface() - if err != nil { - return nil, err - } - ret[p.Key] = x - } - - /* all done */ - return ret, nil + nb := self.len() + ret := make(map[string]interface{}, nb) + if nb == 0 { + return ret, nil + } + + /* convert each item */ + var p = (*Pair)(self.p) + x, err := p.Value.Interface() + if err != nil { + return nil, err + } + ret[p.Key] = x + + for i := 1; i < nb; i++ { + p = p.unsafe_next() + x, err := p.Value.Interface() + if err != nil { + return nil, err + } + ret[p.Key] = x + } + + /* all done */ + return ret, nil } - func (self *Node) toGenericObjectUseNumber() (map[string]interface{}, error) { - nb := self.len() - ret := make(map[string]interface{}, nb) - if nb == 0 { - return ret, nil - } - - /* convert each item */ - var p = (*Pair)(self.p) - x, err := p.Value.InterfaceUseNumber() - if err != nil { - return nil, err - } - ret[p.Key] = x - - for i := 1; i < nb; i++ { - p = p.unsafe_next() - x, err := p.Value.InterfaceUseNumber() - if err != nil { - return nil, err - } - ret[p.Key] = x - } - - /* all done */ - return ret, nil + nb := self.len() + ret := make(map[string]interface{}, nb) + if nb == 0 { + return ret, nil + } + + /* convert each item */ + var p = (*Pair)(self.p) + x, err := p.Value.InterfaceUseNumber() + if err != nil { + return nil, err + } + ret[p.Key] = x + + for i := 1; i < nb; i++ { + p = p.unsafe_next() + x, err := p.Value.InterfaceUseNumber() + if err != nil { + return nil, err + } + ret[p.Key] = x + } + + /* all done */ + return ret, nil } func (self *Node) toGenericObjectUseNode() (map[string]Node, error) { - var nb = self.len() - var out = make(map[string]Node, nb) - if nb == 0 { - return out, nil - } - - var p = (*Pair)(self.p) - out[p.Key] = p.Value - if err := p.Value.Check(); err != nil { - return nil, err - } - - for i := 1; i < nb; i++ { - p = p.unsafe_next() - if err := p.Value.Check(); err != nil { - return nil, err - } - out[p.Key] = p.Value - } - - /* all done */ - return out, nil + var nb = self.len() + var out = make(map[string]Node, nb) + if nb == 0 { + return out, nil + } + + var p = (*Pair)(self.p) + out[p.Key] = p.Value + if err := p.Value.Check(); err != nil { + return nil, err + } + + for i := 1; i < nb; i++ { + p = p.unsafe_next() + if err := p.Value.Check(); err != nil { + return nil, err + } + out[p.Key] = p.Value + } + + /* all done */ + return out, nil } /**------------------------------------ Factory Methods ------------------------------------**/ var ( - nullNode = Node{t: types.V_NULL} - trueNode = Node{t: types.V_TRUE} - falseNode = Node{t: types.V_FALSE} + nullNode = Node{t: types.V_NULL} + trueNode = Node{t: types.V_TRUE} + falseNode = Node{t: types.V_FALSE} - emptyArrayNode = Node{t: types.V_ARRAY} - emptyObjectNode = Node{t: types.V_OBJECT} + emptyArrayNode = Node{t: types.V_ARRAY} + emptyObjectNode = Node{t: types.V_OBJECT} ) // NewRaw creates a node of raw json. // If the input json is invalid, NewRaw returns a error Node. func NewRaw(json string) Node { - parser := NewParser(json) - start, err := parser.skip() - if err != 0 { - return *newError(err, err.Message()) - } - it := switchRawType(parser.s[start]) - if it == _V_NONE { - return Node{} - } - return newRawNode(parser.s[start:parser.p], it) -} - -// NewAny creates a node of type V_ANY if any's type isn't Node or *Node, + parser := NewParser(json) + start, err := parser.skip() + if err != 0 { + return *newError(err, err.Message()) + } + it := switchRawType(parser.s[start]) + if it == _V_NONE { + return Node{} + } + return newRawNode(parser.s[start:parser.p], it) +} + +// NewAny creates a node of type V_ANY if any's type isn't Node or *Node, // which stores interface{} and can be only used for `.Interface()`\`.MarshalJSON()`. func NewAny(any interface{}) Node { - switch n := any.(type) { - case Node: - return n - case *Node: - return *n - default: - return Node{ - t: _V_ANY, - v: 0, - p: unsafe.Pointer(&any), - } - } + switch n := any.(type) { + case Node: + return n + case *Node: + return *n + default: + return Node{ + t: _V_ANY, + v: 0, + p: unsafe.Pointer(&any), + } + } } // NewBytes encodes given src with Base64 (RFC 4648), and creates a node of type V_STRING. func NewBytes(src []byte) Node { - if len(src) == 0 { - panic("empty src bytes") - } - out := encodeBase64(src) - return NewString(out) + if len(src) == 0 { + panic("empty src bytes") + } + out := encodeBase64(src) + return NewString(out) } // NewNull creates a node of type V_NULL func NewNull() Node { - return Node{ - v: 0, - p: nil, - t: types.V_NULL, - } + return Node{ + v: 0, + p: nil, + t: types.V_NULL, + } } // NewBool creates a node of type bool: -// If v is true, returns V_TRUE node -// If v is false, returns V_FALSE node +// +// If v is true, returns V_TRUE node +// If v is false, returns V_FALSE node func NewBool(v bool) Node { - var t = types.V_FALSE - if v { - t = types.V_TRUE - } - return Node{ - v: 0, - p: nil, - t: t, - } + var t = types.V_FALSE + if v { + t = types.V_TRUE + } + return Node{ + v: 0, + p: nil, + t: t, + } } // NewNumber creates a json.Number node // v must be a decimal string complying with RFC8259 func NewNumber(v string) Node { - return Node{ - v: int64(len(v) & _LEN_MASK), - p: rt.StrPtr(v), - t: _V_NUMBER, - } + return Node{ + v: int64(len(v) & _LEN_MASK), + p: rt.StrPtr(v), + t: _V_NUMBER, + } } func toNumber(node *Node) json.Number { - return json.Number(rt.StrFrom(node.p, node.v)) + return json.Number(rt.StrFrom(node.p, node.v)) } func numberToFloat64(node *Node) (float64, error) { - ret,err := toNumber(node).Float64() - if err != nil { - return 0, err - } - return ret, nil + ret, err := toNumber(node).Float64() + if err != nil { + return 0, err + } + return ret, nil } func numberToInt64(node *Node) (int64, error) { - ret,err := toNumber(node).Int64() - if err != nil { - return 0, err - } - return ret, nil + ret, err := toNumber(node).Int64() + if err != nil { + return 0, err + } + return ret, nil } func newBytes(v []byte) Node { - return Node{ - t: types.V_STRING, - p: mem2ptr(v), - v: int64(len(v) & _LEN_MASK), - } + return Node{ + t: types.V_STRING, + p: mem2ptr(v), + v: int64(len(v) & _LEN_MASK), + } } -// NewString creates a node of type V_STRING. +// NewString creates a node of type V_STRING. // v is considered to be a valid UTF-8 string, // which means it won't be validated and unescaped. // when the node is encoded to json, v will be escaped. func NewString(v string) Node { - return Node{ - t: types.V_STRING, - p: rt.StrPtr(v), - v: int64(len(v) & _LEN_MASK), - } + return Node{ + t: types.V_STRING, + p: rt.StrPtr(v), + v: int64(len(v) & _LEN_MASK), + } } // NewArray creates a node of type V_ARRAY, // using v as its underlying children func NewArray(v []Node) Node { - return Node{ - t: types.V_ARRAY, - v: int64(len(v)&_LEN_MASK | cap(v)<<_CAP_BITS), - p: *(*unsafe.Pointer)(unsafe.Pointer(&v)), - } + return Node{ + t: types.V_ARRAY, + v: int64(len(v)&_LEN_MASK | cap(v)<<_CAP_BITS), + p: *(*unsafe.Pointer)(unsafe.Pointer(&v)), + } } func (self *Node) setArray(v []Node) { - self.t = types.V_ARRAY - self.setCapAndLen(cap(v), len(v)) - self.p = *(*unsafe.Pointer)(unsafe.Pointer(&v)) + self.t = types.V_ARRAY + self.setCapAndLen(cap(v), len(v)) + self.p = *(*unsafe.Pointer)(unsafe.Pointer(&v)) } // NewObject creates a node of type V_OBJECT, // using v as its underlying children func NewObject(v []Pair) Node { - return Node{ - t: types.V_OBJECT, - v: int64(len(v)&_LEN_MASK | cap(v)<<_CAP_BITS), - p: *(*unsafe.Pointer)(unsafe.Pointer(&v)), - } + return Node{ + t: types.V_OBJECT, + v: int64(len(v)&_LEN_MASK | cap(v)<<_CAP_BITS), + p: *(*unsafe.Pointer)(unsafe.Pointer(&v)), + } } func (self *Node) setObject(v []Pair) { - self.t = types.V_OBJECT - self.setCapAndLen(cap(v), len(v)) - self.p = *(*unsafe.Pointer)(unsafe.Pointer(&v)) + self.t = types.V_OBJECT + self.setCapAndLen(cap(v), len(v)) + self.p = *(*unsafe.Pointer)(unsafe.Pointer(&v)) } type parseObjectStack struct { - parser Parser - v []Pair + parser Parser + v []Pair } type parseArrayStack struct { - parser Parser - v []Node + parser Parser + v []Node } func newLazyArray(p *Parser, v []Node) Node { - s := new(parseArrayStack) - s.parser = *p - s.v = v - return Node{ - t: _V_ARRAY_LAZY, - v: int64(len(v)&_LEN_MASK | cap(v)<<_CAP_BITS), - p: unsafe.Pointer(s), - } + s := new(parseArrayStack) + s.parser = *p + s.v = v + return Node{ + t: _V_ARRAY_LAZY, + v: int64(len(v)&_LEN_MASK | cap(v)<<_CAP_BITS), + p: unsafe.Pointer(s), + } } func (self *Node) setLazyArray(p *Parser, v []Node) { - s := new(parseArrayStack) - s.parser = *p - s.v = v - self.t = _V_ARRAY_LAZY - self.setCapAndLen(cap(v), len(v)) - self.p = (unsafe.Pointer)(s) + s := new(parseArrayStack) + s.parser = *p + s.v = v + self.t = _V_ARRAY_LAZY + self.setCapAndLen(cap(v), len(v)) + self.p = (unsafe.Pointer)(s) } func newLazyObject(p *Parser, v []Pair) Node { - s := new(parseObjectStack) - s.parser = *p - s.v = v - return Node{ - t: _V_OBJECT_LAZY, - v: int64(len(v)&_LEN_MASK | cap(v)<<_CAP_BITS), - p: unsafe.Pointer(s), - } + s := new(parseObjectStack) + s.parser = *p + s.v = v + return Node{ + t: _V_OBJECT_LAZY, + v: int64(len(v)&_LEN_MASK | cap(v)<<_CAP_BITS), + p: unsafe.Pointer(s), + } } func (self *Node) setLazyObject(p *Parser, v []Pair) { - s := new(parseObjectStack) - s.parser = *p - s.v = v - self.t = _V_OBJECT_LAZY - self.setCapAndLen(cap(v), len(v)) - self.p = (unsafe.Pointer)(s) + s := new(parseObjectStack) + s.parser = *p + s.v = v + self.t = _V_OBJECT_LAZY + self.setCapAndLen(cap(v), len(v)) + self.p = (unsafe.Pointer)(s) } func newRawNode(str string, typ types.ValueType) Node { - return Node{ - t: _V_RAW | typ, - p: rt.StrPtr(str), - v: int64(len(str) & _LEN_MASK), - } + return Node{ + t: _V_RAW | typ, + p: rt.StrPtr(str), + v: int64(len(str) & _LEN_MASK), + } } func (self *Node) parseRaw(full bool) { - raw := rt.StrFrom(self.p, self.v) - parser := NewParser(raw) - if full { - parser.noLazy = true - parser.skipValue = false - } - var e types.ParsingError - *self, e = parser.Parse() - if e != 0 { - *self = *newSyntaxError(parser.syntaxError(e)) - } + raw := rt.StrFrom(self.p, self.v) + parser := NewParser(raw) + if full { + parser.noLazy = true + parser.skipValue = false + } + var e types.ParsingError + *self, e = parser.Parse() + if e != 0 { + *self = *newSyntaxError(parser.syntaxError(e)) + } } func newError(err types.ParsingError, msg string) *Node { - return &Node{ - t: V_ERROR, - v: int64(err), - p: unsafe.Pointer(&msg), - } + return &Node{ + t: V_ERROR, + v: int64(err), + p: unsafe.Pointer(&msg), + } } var typeJumpTable = [256]types.ValueType{ - '"' : types.V_STRING, - '-' : _V_NUMBER, - '0' : _V_NUMBER, - '1' : _V_NUMBER, - '2' : _V_NUMBER, - '3' : _V_NUMBER, - '4' : _V_NUMBER, - '5' : _V_NUMBER, - '6' : _V_NUMBER, - '7' : _V_NUMBER, - '8' : _V_NUMBER, - '9' : _V_NUMBER, - '[' : types.V_ARRAY, - 'f' : types.V_FALSE, - 'n' : types.V_NULL, - 't' : types.V_TRUE, - '{' : types.V_OBJECT, + '"': types.V_STRING, + '-': _V_NUMBER, + '0': _V_NUMBER, + '1': _V_NUMBER, + '2': _V_NUMBER, + '3': _V_NUMBER, + '4': _V_NUMBER, + '5': _V_NUMBER, + '6': _V_NUMBER, + '7': _V_NUMBER, + '8': _V_NUMBER, + '9': _V_NUMBER, + '[': types.V_ARRAY, + 'f': types.V_FALSE, + 'n': types.V_NULL, + 't': types.V_TRUE, + '{': types.V_OBJECT, } func switchRawType(c byte) types.ValueType { - return typeJumpTable[c] + return typeJumpTable[c] } func unwrapError(err error) *Node { - if se, ok := err.(*Node); ok { - return se - }else if sse, ok := err.(Node); ok { - return &sse - } else { - msg := err.Error() - return &Node{ - t: V_ERROR, - v: 0, - p: unsafe.Pointer(&msg), - } - } -} \ No newline at end of file + if se, ok := err.(*Node); ok { + return se + } else if sse, ok := err.(Node); ok { + return &sse + } else { + msg := err.Error() + return &Node{ + t: V_ERROR, + v: 0, + p: unsafe.Pointer(&msg), + } + } +} diff --git a/vendor/github.com/bytedance/sonic/ast/parser.go b/vendor/github.com/bytedance/sonic/ast/parser.go index 0a8e7b068..66e4f47e8 100644 --- a/vendor/github.com/bytedance/sonic/ast/parser.go +++ b/vendor/github.com/bytedance/sonic/ast/parser.go @@ -17,602 +17,616 @@ package ast import ( - `fmt` - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/internal/rt` + "fmt" + + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/internal/rt" ) const _DEFAULT_NODE_CAP int = 16 const ( - _ERR_NOT_FOUND types.ParsingError = 33 - _ERR_UNSUPPORT_TYPE types.ParsingError = 34 + _ERR_NOT_FOUND types.ParsingError = 33 + _ERR_UNSUPPORT_TYPE types.ParsingError = 34 ) var ( - ErrNotExist error = newError(_ERR_NOT_FOUND, "value not exists") - ErrUnsupportType error = newError(_ERR_UNSUPPORT_TYPE, "unsupported type") + ErrNotExist error = newError(_ERR_NOT_FOUND, "value not exists") + ErrUnsupportType error = newError(_ERR_UNSUPPORT_TYPE, "unsupported type") ) type Parser struct { - p int - s string - noLazy bool - skipValue bool + p int + s string + noLazy bool + skipValue bool } /** Parser Private Methods **/ func (self *Parser) delim() types.ParsingError { - n := len(self.s) - p := self.lspace(self.p) - - /* check for EOF */ - if p >= n { - return types.ERR_EOF - } - - /* check for the delimtier */ - if self.s[p] != ':' { - return types.ERR_INVALID_CHAR - } - - /* update the read pointer */ - self.p = p + 1 - return 0 + n := len(self.s) + p := self.lspace(self.p) + + /* check for EOF */ + if p >= n { + return types.ERR_EOF + } + + /* check for the delimtier */ + if self.s[p] != ':' { + return types.ERR_INVALID_CHAR + } + + /* update the read pointer */ + self.p = p + 1 + return 0 } func (self *Parser) object() types.ParsingError { - n := len(self.s) - p := self.lspace(self.p) - - /* check for EOF */ - if p >= n { - return types.ERR_EOF - } - - /* check for the delimtier */ - if self.s[p] != '{' { - return types.ERR_INVALID_CHAR - } - - /* update the read pointer */ - self.p = p + 1 - return 0 + n := len(self.s) + p := self.lspace(self.p) + + /* check for EOF */ + if p >= n { + return types.ERR_EOF + } + + /* check for the delimtier */ + if self.s[p] != '{' { + return types.ERR_INVALID_CHAR + } + + /* update the read pointer */ + self.p = p + 1 + return 0 } func (self *Parser) array() types.ParsingError { - n := len(self.s) - p := self.lspace(self.p) - - /* check for EOF */ - if p >= n { - return types.ERR_EOF - } - - /* check for the delimtier */ - if self.s[p] != '[' { - return types.ERR_INVALID_CHAR - } - - /* update the read pointer */ - self.p = p + 1 - return 0 + n := len(self.s) + p := self.lspace(self.p) + + /* check for EOF */ + if p >= n { + return types.ERR_EOF + } + + /* check for the delimtier */ + if self.s[p] != '[' { + return types.ERR_INVALID_CHAR + } + + /* update the read pointer */ + self.p = p + 1 + return 0 } func (self *Parser) lspace(sp int) int { - ns := len(self.s) - for ; sp<ns && isSpace(self.s[sp]); sp+=1 {} + ns := len(self.s) + for ; sp < ns && isSpace(self.s[sp]); sp += 1 { + } - return sp + return sp } func (self *Parser) decodeArray(ret []Node) (Node, types.ParsingError) { - sp := self.p - ns := len(self.s) - - /* check for EOF */ - if self.p = self.lspace(sp); self.p >= ns { - return Node{}, types.ERR_EOF - } - - /* check for empty array */ - if self.s[self.p] == ']' { - self.p++ - return emptyArrayNode, 0 - } - - /* allocate array space and parse every element */ - for { - var val Node - var err types.ParsingError - - if self.skipValue { - /* skip the value */ - var start int - if start, err = self.skipFast(); err != 0 { - return Node{}, err - } - if self.p > ns { - return Node{}, types.ERR_EOF - } - t := switchRawType(self.s[start]) - if t == _V_NONE { - return Node{}, types.ERR_INVALID_CHAR - } - val = newRawNode(self.s[start:self.p], t) - }else{ - /* decode the value */ - if val, err = self.Parse(); err != 0 { - return Node{}, err - } - } - - /* add the value to result */ - ret = append(ret, val) - self.p = self.lspace(self.p) - - /* check for EOF */ - if self.p >= ns { - return Node{}, types.ERR_EOF - } - - /* check for the next character */ - switch self.s[self.p] { - case ',' : self.p++ - case ']' : self.p++; return NewArray(ret), 0 - default: - if val.isLazy() { - return newLazyArray(self, ret), 0 - } - return Node{}, types.ERR_INVALID_CHAR - } - } + sp := self.p + ns := len(self.s) + + /* check for EOF */ + if self.p = self.lspace(sp); self.p >= ns { + return Node{}, types.ERR_EOF + } + + /* check for empty array */ + if self.s[self.p] == ']' { + self.p++ + return emptyArrayNode, 0 + } + + /* allocate array space and parse every element */ + for { + var val Node + var err types.ParsingError + + if self.skipValue { + /* skip the value */ + var start int + if start, err = self.skipFast(); err != 0 { + return Node{}, err + } + if self.p > ns { + return Node{}, types.ERR_EOF + } + t := switchRawType(self.s[start]) + if t == _V_NONE { + return Node{}, types.ERR_INVALID_CHAR + } + val = newRawNode(self.s[start:self.p], t) + } else { + /* decode the value */ + if val, err = self.Parse(); err != 0 { + return Node{}, err + } + } + + /* add the value to result */ + ret = append(ret, val) + self.p = self.lspace(self.p) + + /* check for EOF */ + if self.p >= ns { + return Node{}, types.ERR_EOF + } + + /* check for the next character */ + switch self.s[self.p] { + case ',': + self.p++ + case ']': + self.p++ + return NewArray(ret), 0 + default: + if val.isLazy() { + return newLazyArray(self, ret), 0 + } + return Node{}, types.ERR_INVALID_CHAR + } + } } func (self *Parser) decodeObject(ret []Pair) (Node, types.ParsingError) { - sp := self.p - ns := len(self.s) - - /* check for EOF */ - if self.p = self.lspace(sp); self.p >= ns { - return Node{}, types.ERR_EOF - } - - /* check for empty object */ - if self.s[self.p] == '}' { - self.p++ - return emptyObjectNode, 0 - } - - /* decode each pair */ - for { - var val Node - var njs types.JsonState - var err types.ParsingError - - /* decode the key */ - if njs = self.decodeValue(); njs.Vt != types.V_STRING { - return Node{}, types.ERR_INVALID_CHAR - } - - /* extract the key */ - idx := self.p - 1 - key := self.s[njs.Iv:idx] - - /* check for escape sequence */ - if njs.Ep != -1 { - if key, err = unquote(key); err != 0 { - return Node{}, err - } - } - - /* expect a ':' delimiter */ - if err = self.delim(); err != 0 { - return Node{}, err - } - - - if self.skipValue { - /* skip the value */ - var start int - if start, err = self.skipFast(); err != 0 { - return Node{}, err - } - if self.p > ns { - return Node{}, types.ERR_EOF - } - t := switchRawType(self.s[start]) - if t == _V_NONE { - return Node{}, types.ERR_INVALID_CHAR - } - val = newRawNode(self.s[start:self.p], t) - } else { - /* decode the value */ - if val, err = self.Parse(); err != 0 { - return Node{}, err - } - } - - /* add the value to result */ - ret = append(ret, Pair{Key: key, Value: val}) - self.p = self.lspace(self.p) - - /* check for EOF */ - if self.p >= ns { - return Node{}, types.ERR_EOF - } - - /* check for the next character */ - switch self.s[self.p] { - case ',' : self.p++ - case '}' : self.p++; return NewObject(ret), 0 - default: - if val.isLazy() { - return newLazyObject(self, ret), 0 - } - return Node{}, types.ERR_INVALID_CHAR - } - } + sp := self.p + ns := len(self.s) + + /* check for EOF */ + if self.p = self.lspace(sp); self.p >= ns { + return Node{}, types.ERR_EOF + } + + /* check for empty object */ + if self.s[self.p] == '}' { + self.p++ + return emptyObjectNode, 0 + } + + /* decode each pair */ + for { + var val Node + var njs types.JsonState + var err types.ParsingError + + /* decode the key */ + if njs = self.decodeValue(); njs.Vt != types.V_STRING { + return Node{}, types.ERR_INVALID_CHAR + } + + /* extract the key */ + idx := self.p - 1 + key := self.s[njs.Iv:idx] + + /* check for escape sequence */ + if njs.Ep != -1 { + if key, err = unquote(key); err != 0 { + return Node{}, err + } + } + + /* expect a ':' delimiter */ + if err = self.delim(); err != 0 { + return Node{}, err + } + + if self.skipValue { + /* skip the value */ + var start int + if start, err = self.skipFast(); err != 0 { + return Node{}, err + } + if self.p > ns { + return Node{}, types.ERR_EOF + } + t := switchRawType(self.s[start]) + if t == _V_NONE { + return Node{}, types.ERR_INVALID_CHAR + } + val = newRawNode(self.s[start:self.p], t) + } else { + /* decode the value */ + if val, err = self.Parse(); err != 0 { + return Node{}, err + } + } + + /* add the value to result */ + ret = append(ret, Pair{Key: key, Value: val}) + self.p = self.lspace(self.p) + + /* check for EOF */ + if self.p >= ns { + return Node{}, types.ERR_EOF + } + + /* check for the next character */ + switch self.s[self.p] { + case ',': + self.p++ + case '}': + self.p++ + return NewObject(ret), 0 + default: + if val.isLazy() { + return newLazyObject(self, ret), 0 + } + return Node{}, types.ERR_INVALID_CHAR + } + } } func (self *Parser) decodeString(iv int64, ep int) (Node, types.ParsingError) { - p := self.p - 1 - s := self.s[iv:p] - - /* fast path: no escape sequence */ - if ep == -1 { - return NewString(s), 0 - } - - /* unquote the string */ - out, err := unquote(s) - - /* check for errors */ - if err != 0 { - return Node{}, err - } else { - return newBytes(rt.Str2Mem(out)), 0 - } + p := self.p - 1 + s := self.s[iv:p] + + /* fast path: no escape sequence */ + if ep == -1 { + return NewString(s), 0 + } + + /* unquote the string */ + out, err := unquote(s) + + /* check for errors */ + if err != 0 { + return Node{}, err + } else { + return newBytes(rt.Str2Mem(out)), 0 + } } /** Parser Interface **/ func (self *Parser) Pos() int { - return self.p + return self.p } func (self *Parser) Parse() (Node, types.ParsingError) { - switch val := self.decodeValue(); val.Vt { - case types.V_EOF : return Node{}, types.ERR_EOF - case types.V_NULL : return nullNode, 0 - case types.V_TRUE : return trueNode, 0 - case types.V_FALSE : return falseNode, 0 - case types.V_STRING : return self.decodeString(val.Iv, val.Ep) - case types.V_ARRAY: - if self.noLazy { - return self.decodeArray(make([]Node, 0, _DEFAULT_NODE_CAP)) - } - return newLazyArray(self, make([]Node, 0, _DEFAULT_NODE_CAP)), 0 - case types.V_OBJECT: - if self.noLazy { - return self.decodeObject(make([]Pair, 0, _DEFAULT_NODE_CAP)) - } - return newLazyObject(self, make([]Pair, 0, _DEFAULT_NODE_CAP)), 0 - case types.V_DOUBLE : return NewNumber(self.s[val.Ep:self.p]), 0 - case types.V_INTEGER : return NewNumber(self.s[val.Ep:self.p]), 0 - default : return Node{}, types.ParsingError(-val.Vt) - } + switch val := self.decodeValue(); val.Vt { + case types.V_EOF: + return Node{}, types.ERR_EOF + case types.V_NULL: + return nullNode, 0 + case types.V_TRUE: + return trueNode, 0 + case types.V_FALSE: + return falseNode, 0 + case types.V_STRING: + return self.decodeString(val.Iv, val.Ep) + case types.V_ARRAY: + if self.noLazy { + return self.decodeArray(make([]Node, 0, _DEFAULT_NODE_CAP)) + } + return newLazyArray(self, make([]Node, 0, _DEFAULT_NODE_CAP)), 0 + case types.V_OBJECT: + if self.noLazy { + return self.decodeObject(make([]Pair, 0, _DEFAULT_NODE_CAP)) + } + return newLazyObject(self, make([]Pair, 0, _DEFAULT_NODE_CAP)), 0 + case types.V_DOUBLE: + return NewNumber(self.s[val.Ep:self.p]), 0 + case types.V_INTEGER: + return NewNumber(self.s[val.Ep:self.p]), 0 + default: + return Node{}, types.ParsingError(-val.Vt) + } } func (self *Parser) searchKey(match string) types.ParsingError { - ns := len(self.s) - if err := self.object(); err != 0 { - return err - } - - /* check for EOF */ - if self.p = self.lspace(self.p); self.p >= ns { - return types.ERR_EOF - } - - /* check for empty object */ - if self.s[self.p] == '}' { - self.p++ - return _ERR_NOT_FOUND - } - - var njs types.JsonState - var err types.ParsingError - /* decode each pair */ - for { - - /* decode the key */ - if njs = self.decodeValue(); njs.Vt != types.V_STRING { - return types.ERR_INVALID_CHAR - } - - /* extract the key */ - idx := self.p - 1 - key := self.s[njs.Iv:idx] - - /* check for escape sequence */ - if njs.Ep != -1 { - if key, err = unquote(key); err != 0 { - return err - } - } - - /* expect a ':' delimiter */ - if err = self.delim(); err != 0 { - return err - } - - /* skip value */ - if key != match { - if _, err = self.skipFast(); err != 0 { - return err - } - } else { - return 0 - } - - /* check for EOF */ - self.p = self.lspace(self.p) - if self.p >= ns { - return types.ERR_EOF - } - - /* check for the next character */ - switch self.s[self.p] { - case ',': - self.p++ - case '}': - self.p++ - return _ERR_NOT_FOUND - default: - return types.ERR_INVALID_CHAR - } - } + ns := len(self.s) + if err := self.object(); err != 0 { + return err + } + + /* check for EOF */ + if self.p = self.lspace(self.p); self.p >= ns { + return types.ERR_EOF + } + + /* check for empty object */ + if self.s[self.p] == '}' { + self.p++ + return _ERR_NOT_FOUND + } + + var njs types.JsonState + var err types.ParsingError + /* decode each pair */ + for { + + /* decode the key */ + if njs = self.decodeValue(); njs.Vt != types.V_STRING { + return types.ERR_INVALID_CHAR + } + + /* extract the key */ + idx := self.p - 1 + key := self.s[njs.Iv:idx] + + /* check for escape sequence */ + if njs.Ep != -1 { + if key, err = unquote(key); err != 0 { + return err + } + } + + /* expect a ':' delimiter */ + if err = self.delim(); err != 0 { + return err + } + + /* skip value */ + if key != match { + if _, err = self.skipFast(); err != 0 { + return err + } + } else { + return 0 + } + + /* check for EOF */ + self.p = self.lspace(self.p) + if self.p >= ns { + return types.ERR_EOF + } + + /* check for the next character */ + switch self.s[self.p] { + case ',': + self.p++ + case '}': + self.p++ + return _ERR_NOT_FOUND + default: + return types.ERR_INVALID_CHAR + } + } } func (self *Parser) searchIndex(idx int) types.ParsingError { - ns := len(self.s) - if err := self.array(); err != 0 { - return err - } - - /* check for EOF */ - if self.p = self.lspace(self.p); self.p >= ns { - return types.ERR_EOF - } - - /* check for empty array */ - if self.s[self.p] == ']' { - self.p++ - return _ERR_NOT_FOUND - } - - var err types.ParsingError - /* allocate array space and parse every element */ - for i := 0; i < idx; i++ { - - /* decode the value */ - if _, err = self.skipFast(); err != 0 { - return err - } - - /* check for EOF */ - self.p = self.lspace(self.p) - if self.p >= ns { - return types.ERR_EOF - } - - /* check for the next character */ - switch self.s[self.p] { - case ',': - self.p++ - case ']': - self.p++ - return _ERR_NOT_FOUND - default: - return types.ERR_INVALID_CHAR - } - } - - return 0 + ns := len(self.s) + if err := self.array(); err != 0 { + return err + } + + /* check for EOF */ + if self.p = self.lspace(self.p); self.p >= ns { + return types.ERR_EOF + } + + /* check for empty array */ + if self.s[self.p] == ']' { + self.p++ + return _ERR_NOT_FOUND + } + + var err types.ParsingError + /* allocate array space and parse every element */ + for i := 0; i < idx; i++ { + + /* decode the value */ + if _, err = self.skipFast(); err != 0 { + return err + } + + /* check for EOF */ + self.p = self.lspace(self.p) + if self.p >= ns { + return types.ERR_EOF + } + + /* check for the next character */ + switch self.s[self.p] { + case ',': + self.p++ + case ']': + self.p++ + return _ERR_NOT_FOUND + default: + return types.ERR_INVALID_CHAR + } + } + + return 0 } func (self *Node) skipNextNode() *Node { - if !self.isLazy() { - return nil - } - - parser, stack := self.getParserAndArrayStack() - ret := stack.v - sp := parser.p - ns := len(parser.s) - - /* check for EOF */ - if parser.p = parser.lspace(sp); parser.p >= ns { - return newSyntaxError(parser.syntaxError(types.ERR_EOF)) - } - - /* check for empty array */ - if parser.s[parser.p] == ']' { - parser.p++ - self.setArray(ret) - return nil - } - - var val Node - /* skip the value */ - if start, err := parser.skipFast(); err != 0 { - return newSyntaxError(parser.syntaxError(err)) - } else { - t := switchRawType(parser.s[start]) - if t == _V_NONE { - return newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR)) - } - val = newRawNode(parser.s[start:parser.p], t) - } - - /* add the value to result */ - ret = append(ret, val) - parser.p = parser.lspace(parser.p) - - /* check for EOF */ - if parser.p >= ns { - return newSyntaxError(parser.syntaxError(types.ERR_EOF)) - } - - /* check for the next character */ - switch parser.s[parser.p] { - case ',': - parser.p++ - self.setLazyArray(parser, ret) - return &ret[len(ret)-1] - case ']': - parser.p++ - self.setArray(ret) - return &ret[len(ret)-1] - default: - return newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR)) - } + if !self.isLazy() { + return nil + } + + parser, stack := self.getParserAndArrayStack() + ret := stack.v + sp := parser.p + ns := len(parser.s) + + /* check for EOF */ + if parser.p = parser.lspace(sp); parser.p >= ns { + return newSyntaxError(parser.syntaxError(types.ERR_EOF)) + } + + /* check for empty array */ + if parser.s[parser.p] == ']' { + parser.p++ + self.setArray(ret) + return nil + } + + var val Node + /* skip the value */ + if start, err := parser.skipFast(); err != 0 { + return newSyntaxError(parser.syntaxError(err)) + } else { + t := switchRawType(parser.s[start]) + if t == _V_NONE { + return newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR)) + } + val = newRawNode(parser.s[start:parser.p], t) + } + + /* add the value to result */ + ret = append(ret, val) + parser.p = parser.lspace(parser.p) + + /* check for EOF */ + if parser.p >= ns { + return newSyntaxError(parser.syntaxError(types.ERR_EOF)) + } + + /* check for the next character */ + switch parser.s[parser.p] { + case ',': + parser.p++ + self.setLazyArray(parser, ret) + return &ret[len(ret)-1] + case ']': + parser.p++ + self.setArray(ret) + return &ret[len(ret)-1] + default: + return newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR)) + } } -func (self *Node) skipNextPair() (*Pair) { - if !self.isLazy() { - return nil - } - - parser, stack := self.getParserAndObjectStack() - ret := stack.v - sp := parser.p - ns := len(parser.s) - - /* check for EOF */ - if parser.p = parser.lspace(sp); parser.p >= ns { - return &Pair{"", *newSyntaxError(parser.syntaxError(types.ERR_EOF))} - } - - /* check for empty object */ - if parser.s[parser.p] == '}' { - parser.p++ - self.setObject(ret) - return nil - } - - /* decode one pair */ - var val Node - var njs types.JsonState - var err types.ParsingError - - /* decode the key */ - if njs = parser.decodeValue(); njs.Vt != types.V_STRING { - return &Pair{"", *newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR))} - } - - /* extract the key */ - idx := parser.p - 1 - key := parser.s[njs.Iv:idx] - - /* check for escape sequence */ - if njs.Ep != -1 { - if key, err = unquote(key); err != 0 { - return &Pair{key, *newSyntaxError(parser.syntaxError(err))} - } - } - - /* expect a ':' delimiter */ - if err = parser.delim(); err != 0 { - return &Pair{key, *newSyntaxError(parser.syntaxError(err))} - } - - /* skip the value */ - if start, err := parser.skipFast(); err != 0 { - return &Pair{key, *newSyntaxError(parser.syntaxError(err))} - } else { - t := switchRawType(parser.s[start]) - if t == _V_NONE { - return &Pair{key, *newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR))} - } - val = newRawNode(parser.s[start:parser.p], t) - } - - /* add the value to result */ - ret = append(ret, Pair{Key: key, Value: val}) - parser.p = parser.lspace(parser.p) - - /* check for EOF */ - if parser.p >= ns { - return &Pair{key, *newSyntaxError(parser.syntaxError(types.ERR_EOF))} - } - - /* check for the next character */ - switch parser.s[parser.p] { - case ',': - parser.p++ - self.setLazyObject(parser, ret) - return &ret[len(ret)-1] - case '}': - parser.p++ - self.setObject(ret) - return &ret[len(ret)-1] - default: - return &Pair{key, *newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR))} - } +func (self *Node) skipNextPair() *Pair { + if !self.isLazy() { + return nil + } + + parser, stack := self.getParserAndObjectStack() + ret := stack.v + sp := parser.p + ns := len(parser.s) + + /* check for EOF */ + if parser.p = parser.lspace(sp); parser.p >= ns { + return &Pair{"", *newSyntaxError(parser.syntaxError(types.ERR_EOF))} + } + + /* check for empty object */ + if parser.s[parser.p] == '}' { + parser.p++ + self.setObject(ret) + return nil + } + + /* decode one pair */ + var val Node + var njs types.JsonState + var err types.ParsingError + + /* decode the key */ + if njs = parser.decodeValue(); njs.Vt != types.V_STRING { + return &Pair{"", *newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR))} + } + + /* extract the key */ + idx := parser.p - 1 + key := parser.s[njs.Iv:idx] + + /* check for escape sequence */ + if njs.Ep != -1 { + if key, err = unquote(key); err != 0 { + return &Pair{key, *newSyntaxError(parser.syntaxError(err))} + } + } + + /* expect a ':' delimiter */ + if err = parser.delim(); err != 0 { + return &Pair{key, *newSyntaxError(parser.syntaxError(err))} + } + + /* skip the value */ + if start, err := parser.skipFast(); err != 0 { + return &Pair{key, *newSyntaxError(parser.syntaxError(err))} + } else { + t := switchRawType(parser.s[start]) + if t == _V_NONE { + return &Pair{key, *newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR))} + } + val = newRawNode(parser.s[start:parser.p], t) + } + + /* add the value to result */ + ret = append(ret, Pair{Key: key, Value: val}) + parser.p = parser.lspace(parser.p) + + /* check for EOF */ + if parser.p >= ns { + return &Pair{key, *newSyntaxError(parser.syntaxError(types.ERR_EOF))} + } + + /* check for the next character */ + switch parser.s[parser.p] { + case ',': + parser.p++ + self.setLazyObject(parser, ret) + return &ret[len(ret)-1] + case '}': + parser.p++ + self.setObject(ret) + return &ret[len(ret)-1] + default: + return &Pair{key, *newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR))} + } } - /** Parser Factory **/ // Loads parse all json into interface{} func Loads(src string) (int, interface{}, error) { - ps := &Parser{s: src} - np, err := ps.Parse() - - /* check for errors */ - if err != 0 { - return 0, nil, ps.ExportError(err) - } else { - x, err := np.Interface() - if err != nil { - return 0, nil, err - } - return ps.Pos(), x, nil - } + ps := &Parser{s: src} + np, err := ps.Parse() + + /* check for errors */ + if err != 0 { + return 0, nil, ps.ExportError(err) + } else { + x, err := np.Interface() + if err != nil { + return 0, nil, err + } + return ps.Pos(), x, nil + } } // LoadsUseNumber parse all json into interface{}, with numeric nodes casted to json.Number func LoadsUseNumber(src string) (int, interface{}, error) { - ps := &Parser{s: src} - np, err := ps.Parse() - - /* check for errors */ - if err != 0 { - return 0, nil, err - } else { - x, err := np.InterfaceUseNumber() - if err != nil { - return 0, nil, err - } - return ps.Pos(), x, nil - } + ps := &Parser{s: src} + np, err := ps.Parse() + + /* check for errors */ + if err != 0 { + return 0, nil, err + } else { + x, err := np.InterfaceUseNumber() + if err != nil { + return 0, nil, err + } + return ps.Pos(), x, nil + } } func NewParser(src string) *Parser { - return &Parser{s: src} + return &Parser{s: src} } // ExportError converts types.ParsingError to std Error func (self *Parser) ExportError(err types.ParsingError) error { - if err == _ERR_NOT_FOUND { - return ErrNotExist - } - return fmt.Errorf("%q", SyntaxError{ - Pos : self.p, - Src : self.s, - Code: err, - }.Description()) -} \ No newline at end of file + if err == _ERR_NOT_FOUND { + return ErrNotExist + } + return fmt.Errorf("%q", SyntaxError{ + Pos: self.p, + Src: self.s, + Code: err, + }.Description()) +} diff --git a/vendor/github.com/bytedance/sonic/ast/search.go b/vendor/github.com/bytedance/sonic/ast/search.go index bb6fceaa7..db81a183a 100644 --- a/vendor/github.com/bytedance/sonic/ast/search.go +++ b/vendor/github.com/bytedance/sonic/ast/search.go @@ -17,14 +17,14 @@ package ast type Searcher struct { - parser Parser + parser Parser } func NewSearcher(str string) *Searcher { - return &Searcher{ - parser: Parser{ - s: str, - noLazy: false, - }, - } + return &Searcher{ + parser: Parser{ + s: str, + noLazy: false, + }, + } } diff --git a/vendor/github.com/bytedance/sonic/ast/sort.go b/vendor/github.com/bytedance/sonic/ast/sort.go index 0a9f14559..003892834 100644 --- a/vendor/github.com/bytedance/sonic/ast/sort.go +++ b/vendor/github.com/bytedance/sonic/ast/sort.go @@ -19,188 +19,188 @@ package ast // Algorithm 3-way Radix Quicksort, d means the radix. // Reference: https://algs4.cs.princeton.edu/51radix/Quick3string.java.html func radixQsort(kvs PairSlice, d, maxDepth int) { - for len(kvs) > 11 { - // To avoid the worst case of quickSort (time: O(n^2)), use introsort here. - // Reference: https://en.wikipedia.org/wiki/Introsort and - // https://github.com/golang/go/issues/467 - if maxDepth == 0 { - heapSort(kvs, 0, len(kvs)) - return - } - maxDepth-- - - p := pivot(kvs, d) - lt, i, gt := 0, 0, len(kvs) - for i < gt { - c := byteAt(kvs[i].Key, d) - if c < p { - swap(kvs, lt, i) - i++ - lt++ - } else if c > p { - gt-- - swap(kvs, i, gt) - } else { - i++ - } - } - - // kvs[0:lt] < v = kvs[lt:gt] < kvs[gt:len(kvs)] - // Native implemention: - // radixQsort(kvs[:lt], d, maxDepth) - // if p > -1 { - // radixQsort(kvs[lt:gt], d+1, maxDepth) - // } - // radixQsort(kvs[gt:], d, maxDepth) - // Optimize as follows: make recursive calls only for the smaller parts. - // Reference: https://www.geeksforgeeks.org/quicksort-tail-call-optimization-reducing-worst-case-space-log-n/ - if p == -1 { - if lt > len(kvs) - gt { - radixQsort(kvs[gt:], d, maxDepth) - kvs = kvs[:lt] - } else { - radixQsort(kvs[:lt], d, maxDepth) - kvs = kvs[gt:] - } - } else { - ml := maxThree(lt, gt-lt, len(kvs)-gt) - if ml == lt { - radixQsort(kvs[lt:gt], d+1, maxDepth) - radixQsort(kvs[gt:], d, maxDepth) - kvs = kvs[:lt] - } else if ml == gt-lt { - radixQsort(kvs[:lt], d, maxDepth) - radixQsort(kvs[gt:], d, maxDepth) - kvs = kvs[lt:gt] - d += 1 - } else { - radixQsort(kvs[:lt], d, maxDepth) - radixQsort(kvs[lt:gt], d+1, maxDepth) - kvs = kvs[gt:] - } - } - } - insertRadixSort(kvs, d) + for len(kvs) > 11 { + // To avoid the worst case of quickSort (time: O(n^2)), use introsort here. + // Reference: https://en.wikipedia.org/wiki/Introsort and + // https://github.com/golang/go/issues/467 + if maxDepth == 0 { + heapSort(kvs, 0, len(kvs)) + return + } + maxDepth-- + + p := pivot(kvs, d) + lt, i, gt := 0, 0, len(kvs) + for i < gt { + c := byteAt(kvs[i].Key, d) + if c < p { + swap(kvs, lt, i) + i++ + lt++ + } else if c > p { + gt-- + swap(kvs, i, gt) + } else { + i++ + } + } + + // kvs[0:lt] < v = kvs[lt:gt] < kvs[gt:len(kvs)] + // Native implemention: + // radixQsort(kvs[:lt], d, maxDepth) + // if p > -1 { + // radixQsort(kvs[lt:gt], d+1, maxDepth) + // } + // radixQsort(kvs[gt:], d, maxDepth) + // Optimize as follows: make recursive calls only for the smaller parts. + // Reference: https://www.geeksforgeeks.org/quicksort-tail-call-optimization-reducing-worst-case-space-log-n/ + if p == -1 { + if lt > len(kvs)-gt { + radixQsort(kvs[gt:], d, maxDepth) + kvs = kvs[:lt] + } else { + radixQsort(kvs[:lt], d, maxDepth) + kvs = kvs[gt:] + } + } else { + ml := maxThree(lt, gt-lt, len(kvs)-gt) + if ml == lt { + radixQsort(kvs[lt:gt], d+1, maxDepth) + radixQsort(kvs[gt:], d, maxDepth) + kvs = kvs[:lt] + } else if ml == gt-lt { + radixQsort(kvs[:lt], d, maxDepth) + radixQsort(kvs[gt:], d, maxDepth) + kvs = kvs[lt:gt] + d += 1 + } else { + radixQsort(kvs[:lt], d, maxDepth) + radixQsort(kvs[lt:gt], d+1, maxDepth) + kvs = kvs[gt:] + } + } + } + insertRadixSort(kvs, d) } func insertRadixSort(kvs PairSlice, d int) { - for i := 1; i < len(kvs); i++ { - for j := i; j > 0 && lessFrom(kvs[j].Key, kvs[j-1].Key, d); j-- { - swap(kvs, j, j-1) - } - } + for i := 1; i < len(kvs); i++ { + for j := i; j > 0 && lessFrom(kvs[j].Key, kvs[j-1].Key, d); j-- { + swap(kvs, j, j-1) + } + } } func pivot(kvs PairSlice, d int) int { - m := len(kvs) >> 1 - if len(kvs) > 40 { - // Tukey's ``Ninther,'' median of three mediankvs of three. - t := len(kvs) / 8 - return medianThree( - medianThree(byteAt(kvs[0].Key, d), byteAt(kvs[t].Key, d), byteAt(kvs[2*t].Key, d)), - medianThree(byteAt(kvs[m].Key, d), byteAt(kvs[m-t].Key, d), byteAt(kvs[m+t].Key, d)), - medianThree(byteAt(kvs[len(kvs)-1].Key, d), - byteAt(kvs[len(kvs)-1-t].Key, d), - byteAt(kvs[len(kvs)-1-2*t].Key, d))) - } - return medianThree(byteAt(kvs[0].Key, d), byteAt(kvs[m].Key, d), byteAt(kvs[len(kvs)-1].Key, d)) + m := len(kvs) >> 1 + if len(kvs) > 40 { + // Tukey's ``Ninther,'' median of three mediankvs of three. + t := len(kvs) / 8 + return medianThree( + medianThree(byteAt(kvs[0].Key, d), byteAt(kvs[t].Key, d), byteAt(kvs[2*t].Key, d)), + medianThree(byteAt(kvs[m].Key, d), byteAt(kvs[m-t].Key, d), byteAt(kvs[m+t].Key, d)), + medianThree(byteAt(kvs[len(kvs)-1].Key, d), + byteAt(kvs[len(kvs)-1-t].Key, d), + byteAt(kvs[len(kvs)-1-2*t].Key, d))) + } + return medianThree(byteAt(kvs[0].Key, d), byteAt(kvs[m].Key, d), byteAt(kvs[len(kvs)-1].Key, d)) } func medianThree(i, j, k int) int { - if i > j { - i, j = j, i - } // i < j - if k < i { - return i - } - if k > j { - return j - } - return k + if i > j { + i, j = j, i + } // i < j + if k < i { + return i + } + if k > j { + return j + } + return k } func maxThree(i, j, k int) int { - max := i - if max < j { - max = j - } - if max < k { - max = k - } - return max + max := i + if max < j { + max = j + } + if max < k { + max = k + } + return max } // maxDepth returns a threshold at which quicksort should switch // to heapsort. It returnkvs 2*ceil(lg(n+1)). func maxDepth(n int) int { - var depth int - for i := n; i > 0; i >>= 1 { - depth++ - } - return depth * 2 + var depth int + for i := n; i > 0; i >>= 1 { + depth++ + } + return depth * 2 } // siftDown implements the heap property on kvs[lo:hi]. // first is an offset into the array where the root of the heap lies. func siftDown(kvs PairSlice, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && kvs[first+child].Key < kvs[first+child+1].Key { - child++ - } - if kvs[first+root].Key >= kvs[first+child].Key { - return - } - swap(kvs, first+root, first+child) - root = child - } + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && kvs[first+child].Key < kvs[first+child+1].Key { + child++ + } + if kvs[first+root].Key >= kvs[first+child].Key { + return + } + swap(kvs, first+root, first+child) + root = child + } } func heapSort(kvs PairSlice, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with the greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDown(kvs, i, hi, first) - } - - // Pop elements, the largest first, into end of kvs. - for i := hi - 1; i >= 0; i-- { - swap(kvs, first, first+i) - siftDown(kvs, lo, i, first) - } + first := a + lo := 0 + hi := b - a + + // Build heap with the greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDown(kvs, i, hi, first) + } + + // Pop elements, the largest first, into end of kvs. + for i := hi - 1; i >= 0; i-- { + swap(kvs, first, first+i) + siftDown(kvs, lo, i, first) + } } // Note that Pair.Key is NOT pointed to Pair.m when map key is integer after swap func swap(kvs PairSlice, a, b int) { - kvs[a].Key, kvs[b].Key = kvs[b].Key, kvs[a].Key - kvs[a].Value, kvs[b].Value = kvs[b].Value, kvs[a].Value + kvs[a].Key, kvs[b].Key = kvs[b].Key, kvs[a].Key + kvs[a].Value, kvs[b].Value = kvs[b].Value, kvs[a].Value } // Compare two strings from the pos d. func lessFrom(a, b string, d int) bool { - l := len(a) - if l > len(b) { - l = len(b) - } - for i := d; i < l; i++ { - if a[i] == b[i] { - continue - } - return a[i] < b[i] - } - return len(a) < len(b) + l := len(a) + if l > len(b) { + l = len(b) + } + for i := d; i < l; i++ { + if a[i] == b[i] { + continue + } + return a[i] < b[i] + } + return len(a) < len(b) } func byteAt(b string, p int) int { - if p < len(b) { - return int(b[p]) - } - return -1 + if p < len(b) { + return int(b[p]) + } + return -1 } diff --git a/vendor/github.com/bytedance/sonic/ast/stubs_go115.go b/vendor/github.com/bytedance/sonic/ast/stubs_go115.go index 37b9451f0..ce52a44f8 100644 --- a/vendor/github.com/bytedance/sonic/ast/stubs_go115.go +++ b/vendor/github.com/bytedance/sonic/ast/stubs_go115.go @@ -1,3 +1,4 @@ +//go:build !go1.20 // +build !go1.20 /* @@ -19,10 +20,10 @@ package ast import ( - `unsafe` - `unicode/utf8` + "unicode/utf8" + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) //go:noescape @@ -40,16 +41,16 @@ func growslice(et *rt.GoType, old rt.GoSlice, cap int) rt.GoSlice //go:nosplit func mem2ptr(s []byte) unsafe.Pointer { - return (*rt.GoSlice)(unsafe.Pointer(&s)).Ptr + return (*rt.GoSlice)(unsafe.Pointer(&s)).Ptr } var ( - //go:linkname safeSet encoding/json.safeSet - safeSet [utf8.RuneSelf]bool + //go:linkname safeSet encoding/json.safeSet + safeSet [utf8.RuneSelf]bool - //go:linkname hex encoding/json.hex - hex string + //go:linkname hex encoding/json.hex + hex string ) //go:linkname unquoteBytes encoding/json.unquoteBytes -func unquoteBytes(s []byte) (t []byte, ok bool) \ No newline at end of file +func unquoteBytes(s []byte) (t []byte, ok bool) diff --git a/vendor/github.com/bytedance/sonic/ast/stubs_go120.go b/vendor/github.com/bytedance/sonic/ast/stubs_go120.go index bd6fff680..f80151627 100644 --- a/vendor/github.com/bytedance/sonic/ast/stubs_go120.go +++ b/vendor/github.com/bytedance/sonic/ast/stubs_go120.go @@ -1,3 +1,4 @@ +//go:build go1.20 // +build go1.20 /* @@ -19,10 +20,10 @@ package ast import ( - `unsafe` - `unicode/utf8` + "unicode/utf8" + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) //go:noescape @@ -40,16 +41,16 @@ func growslice(et *rt.GoType, old rt.GoSlice, cap int) rt.GoSlice //go:nosplit func mem2ptr(s []byte) unsafe.Pointer { - return (*rt.GoSlice)(unsafe.Pointer(&s)).Ptr + return (*rt.GoSlice)(unsafe.Pointer(&s)).Ptr } var ( - //go:linkname safeSet encoding/json.safeSet - safeSet [utf8.RuneSelf]bool + //go:linkname safeSet encoding/json.safeSet + safeSet [utf8.RuneSelf]bool - //go:linkname hex encoding/json.hex - hex string + //go:linkname hex encoding/json.hex + hex string ) //go:linkname unquoteBytes encoding/json.unquoteBytes -func unquoteBytes(s []byte) (t []byte, ok bool) \ No newline at end of file +func unquoteBytes(s []byte) (t []byte, ok bool) diff --git a/vendor/github.com/bytedance/sonic/compat.go b/vendor/github.com/bytedance/sonic/compat.go index 015aa62bf..498158536 100644 --- a/vendor/github.com/bytedance/sonic/compat.go +++ b/vendor/github.com/bytedance/sonic/compat.go @@ -1,3 +1,4 @@ +//go:build !amd64 || go1.21 // +build !amd64 go1.21 /* @@ -19,30 +20,30 @@ package sonic import ( - `bytes` - `encoding/json` - `io` - `reflect` + "bytes" + "encoding/json" + "io" + "reflect" - `github.com/bytedance/sonic/option` + "github.com/bytedance/sonic/option" ) type frozenConfig struct { - Config + Config } // Froze convert the Config to API func (cfg Config) Froze() API { - api := &frozenConfig{Config: cfg} - return api + api := &frozenConfig{Config: cfg} + return api } func (cfg frozenConfig) marshalOptions(val interface{}, prefix, indent string) ([]byte, error) { - w := bytes.NewBuffer([]byte{}) - enc := json.NewEncoder(w) - enc.SetEscapeHTML(cfg.EscapeHTML) - enc.SetIndent(prefix, indent) - err := enc.Encode(val) + w := bytes.NewBuffer([]byte{}) + enc := json.NewEncoder(w) + enc.SetEscapeHTML(cfg.EscapeHTML) + enc.SetIndent(prefix, indent) + err := enc.Encode(val) out := w.Bytes() // json.Encoder always appends '\n' after encoding, @@ -55,68 +56,68 @@ func (cfg frozenConfig) marshalOptions(val interface{}, prefix, indent string) ( // Marshal is implemented by sonic func (cfg frozenConfig) Marshal(val interface{}) ([]byte, error) { - if !cfg.EscapeHTML { - return cfg.marshalOptions(val, "", "") - } - return json.Marshal(val) + if !cfg.EscapeHTML { + return cfg.marshalOptions(val, "", "") + } + return json.Marshal(val) } // MarshalToString is implemented by sonic func (cfg frozenConfig) MarshalToString(val interface{}) (string, error) { - out, err := cfg.Marshal(val) - return string(out), err + out, err := cfg.Marshal(val) + return string(out), err } // MarshalIndent is implemented by sonic func (cfg frozenConfig) MarshalIndent(val interface{}, prefix, indent string) ([]byte, error) { - if !cfg.EscapeHTML { - return cfg.marshalOptions(val, prefix, indent) - } - return json.MarshalIndent(val, prefix, indent) + if !cfg.EscapeHTML { + return cfg.marshalOptions(val, prefix, indent) + } + return json.MarshalIndent(val, prefix, indent) } // UnmarshalFromString is implemented by sonic func (cfg frozenConfig) UnmarshalFromString(buf string, val interface{}) error { - r := bytes.NewBufferString(buf) - dec := json.NewDecoder(r) - if cfg.UseNumber { - dec.UseNumber() - } - if cfg.DisallowUnknownFields { - dec.DisallowUnknownFields() - } - return dec.Decode(val) + r := bytes.NewBufferString(buf) + dec := json.NewDecoder(r) + if cfg.UseNumber { + dec.UseNumber() + } + if cfg.DisallowUnknownFields { + dec.DisallowUnknownFields() + } + return dec.Decode(val) } // Unmarshal is implemented by sonic func (cfg frozenConfig) Unmarshal(buf []byte, val interface{}) error { - return cfg.UnmarshalFromString(string(buf), val) + return cfg.UnmarshalFromString(string(buf), val) } // NewEncoder is implemented by sonic func (cfg frozenConfig) NewEncoder(writer io.Writer) Encoder { - enc := json.NewEncoder(writer) - if !cfg.EscapeHTML { - enc.SetEscapeHTML(cfg.EscapeHTML) - } - return enc + enc := json.NewEncoder(writer) + if !cfg.EscapeHTML { + enc.SetEscapeHTML(cfg.EscapeHTML) + } + return enc } // NewDecoder is implemented by sonic func (cfg frozenConfig) NewDecoder(reader io.Reader) Decoder { - dec := json.NewDecoder(reader) - if cfg.UseNumber { - dec.UseNumber() - } - if cfg.DisallowUnknownFields { - dec.DisallowUnknownFields() - } - return dec + dec := json.NewDecoder(reader) + if cfg.UseNumber { + dec.UseNumber() + } + if cfg.DisallowUnknownFields { + dec.DisallowUnknownFields() + } + return dec } // Valid is implemented by sonic func (cfg frozenConfig) Valid(data []byte) bool { - return json.Valid(data) + return json.Valid(data) } // Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in @@ -126,6 +127,5 @@ func (cfg frozenConfig) Valid(data []byte) bool { // * This is the none implement for !amd64. // It will be useful for someone who develop with !amd64 arch,like Mac M1. func Pretouch(vt reflect.Type, opts ...option.CompileOption) error { - return nil + return nil } - diff --git a/vendor/github.com/bytedance/sonic/decoder/decoder_amd64.go b/vendor/github.com/bytedance/sonic/decoder/decoder_amd64.go index 2ef19957c..1e0b3b3d4 100644 --- a/vendor/github.com/bytedance/sonic/decoder/decoder_amd64.go +++ b/vendor/github.com/bytedance/sonic/decoder/decoder_amd64.go @@ -1,3 +1,4 @@ +//go:build amd64 && go1.15 && !go1.21 // +build amd64,go1.15,!go1.21 /* @@ -14,12 +15,12 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. -*/ + */ package decoder import ( - `github.com/bytedance/sonic/internal/decoder` + "github.com/bytedance/sonic/internal/decoder" ) // Decoder is the decoder context object @@ -31,12 +32,12 @@ type MismatchTypeError = decoder.MismatchTypeError type Options = decoder.Options const ( - OptionUseInt64 Options = decoder.OptionUseInt64 - OptionUseNumber Options = decoder.OptionUseNumber - OptionUseUnicodeErrors Options = decoder.OptionUseUnicodeErrors - OptionDisableUnknown Options = decoder.OptionDisableUnknown - OptionCopyString Options = decoder.OptionCopyString - OptionValidateString Options = decoder.OptionValidateString + OptionUseInt64 Options = decoder.OptionUseInt64 + OptionUseNumber Options = decoder.OptionUseNumber + OptionUseUnicodeErrors Options = decoder.OptionUseUnicodeErrors + OptionDisableUnknown Options = decoder.OptionDisableUnknown + OptionCopyString Options = decoder.OptionCopyString + OptionValidateString Options = decoder.OptionValidateString ) // StreamDecoder is the decoder context object for streaming input. @@ -45,22 +46,22 @@ type StreamDecoder = decoder.StreamDecoder type SyntaxError = decoder.SyntaxError var ( - // NewDecoder creates a new decoder instance. - NewDecoder = decoder.NewDecoder + // NewDecoder creates a new decoder instance. + NewDecoder = decoder.NewDecoder - // NewStreamDecoder adapts to encoding/json.NewDecoder API. - // - // NewStreamDecoder returns a new decoder that reads from r. - NewStreamDecoder = decoder.NewStreamDecoder + // NewStreamDecoder adapts to encoding/json.NewDecoder API. + // + // NewStreamDecoder returns a new decoder that reads from r. + NewStreamDecoder = decoder.NewStreamDecoder - // Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in - // order to reduce the first-hit latency. - // - // Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is - // a compile option to set the depth of recursive compile for the nested struct type. - Pretouch = decoder.Pretouch - - // Skip skips only one json value, and returns first non-blank character position and its ending position if it is valid. - // Otherwise, returns negative error code using start and invalid character position using end - Skip = decoder.Skip + // Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in + // order to reduce the first-hit latency. + // + // Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is + // a compile option to set the depth of recursive compile for the nested struct type. + Pretouch = decoder.Pretouch + + // Skip skips only one json value, and returns first non-blank character position and its ending position if it is valid. + // Otherwise, returns negative error code using start and invalid character position using end + Skip = decoder.Skip ) diff --git a/vendor/github.com/bytedance/sonic/decoder/decoder_compat.go b/vendor/github.com/bytedance/sonic/decoder/decoder_compat.go index e6b9463d7..b15ab825c 100644 --- a/vendor/github.com/bytedance/sonic/decoder/decoder_compat.go +++ b/vendor/github.com/bytedance/sonic/decoder/decoder_compat.go @@ -1,3 +1,4 @@ +//go:build !amd64 || go1.21 // +build !amd64 go1.21 /* @@ -14,144 +15,143 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. -*/ + */ package decoder import ( - `encoding/json` - `bytes` - `reflect` - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/option` - `io` + "bytes" + "encoding/json" + "io" + "reflect" + + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/option" ) const ( - _F_use_int64 = iota - _F_use_number - _F_disable_urc - _F_disable_unknown - _F_copy_string - _F_validate_string - - _F_allow_control = 31 + _F_use_int64 = iota + _F_use_number + _F_disable_urc + _F_disable_unknown + _F_copy_string + _F_validate_string + + _F_allow_control = 31 ) type Options uint64 const ( - OptionUseInt64 Options = 1 << _F_use_int64 - OptionUseNumber Options = 1 << _F_use_number - OptionUseUnicodeErrors Options = 1 << _F_disable_urc - OptionDisableUnknown Options = 1 << _F_disable_unknown - OptionCopyString Options = 1 << _F_copy_string - OptionValidateString Options = 1 << _F_validate_string + OptionUseInt64 Options = 1 << _F_use_int64 + OptionUseNumber Options = 1 << _F_use_number + OptionUseUnicodeErrors Options = 1 << _F_disable_urc + OptionDisableUnknown Options = 1 << _F_disable_unknown + OptionCopyString Options = 1 << _F_copy_string + OptionValidateString Options = 1 << _F_validate_string ) func (self *Decoder) SetOptions(opts Options) { - if (opts & OptionUseNumber != 0) && (opts & OptionUseInt64 != 0) { - panic("can't set OptionUseInt64 and OptionUseNumber both!") - } - self.f = uint64(opts) + if (opts&OptionUseNumber != 0) && (opts&OptionUseInt64 != 0) { + panic("can't set OptionUseInt64 and OptionUseNumber both!") + } + self.f = uint64(opts) } - // Decoder is the decoder context object type Decoder struct { - i int - f uint64 - s string + i int + f uint64 + s string } // NewDecoder creates a new decoder instance. func NewDecoder(s string) *Decoder { - return &Decoder{s: s} + return &Decoder{s: s} } // Pos returns the current decoding position. func (self *Decoder) Pos() int { - return self.i + return self.i } func (self *Decoder) Reset(s string) { - self.s = s - self.i = 0 - // self.f = 0 + self.s = s + self.i = 0 + // self.f = 0 } // NOTE: api fallback do nothing func (self *Decoder) CheckTrailings() error { - pos := self.i - buf := self.s - /* skip all the trailing spaces */ - if pos != len(buf) { - for pos < len(buf) && (types.SPACE_MASK & (1 << buf[pos])) != 0 { - pos++ - } - } + pos := self.i + buf := self.s + /* skip all the trailing spaces */ + if pos != len(buf) { + for pos < len(buf) && (types.SPACE_MASK&(1<<buf[pos])) != 0 { + pos++ + } + } - /* then it must be at EOF */ - if pos == len(buf) { - return nil - } + /* then it must be at EOF */ + if pos == len(buf) { + return nil + } - /* junk after JSON value */ - return nil + /* junk after JSON value */ + return nil } - // Decode parses the JSON-encoded data from current position and stores the result // in the value pointed to by val. func (self *Decoder) Decode(val interface{}) error { - r := bytes.NewBufferString(self.s) - dec := json.NewDecoder(r) - if (self.f | uint64(OptionUseNumber)) != 0 { - dec.UseNumber() - } - if (self.f | uint64(OptionDisableUnknown)) != 0 { - dec.DisallowUnknownFields() - } - return dec.Decode(val) + r := bytes.NewBufferString(self.s) + dec := json.NewDecoder(r) + if (self.f | uint64(OptionUseNumber)) != 0 { + dec.UseNumber() + } + if (self.f | uint64(OptionDisableUnknown)) != 0 { + dec.DisallowUnknownFields() + } + return dec.Decode(val) } // UseInt64 indicates the Decoder to unmarshal an integer into an interface{} as an // int64 instead of as a float64. func (self *Decoder) UseInt64() { - self.f |= 1 << _F_use_int64 - self.f &^= 1 << _F_use_number + self.f |= 1 << _F_use_int64 + self.f &^= 1 << _F_use_number } // UseNumber indicates the Decoder to unmarshal a number into an interface{} as a // json.Number instead of as a float64. func (self *Decoder) UseNumber() { - self.f &^= 1 << _F_use_int64 - self.f |= 1 << _F_use_number + self.f &^= 1 << _F_use_int64 + self.f |= 1 << _F_use_number } // UseUnicodeErrors indicates the Decoder to return an error when encounter invalid // UTF-8 escape sequences. func (self *Decoder) UseUnicodeErrors() { - self.f |= 1 << _F_disable_urc + self.f |= 1 << _F_disable_urc } // DisallowUnknownFields indicates the Decoder to return an error when the destination // is a struct and the input contains object keys which do not match any // non-ignored, exported fields in the destination. func (self *Decoder) DisallowUnknownFields() { - self.f |= 1 << _F_disable_unknown + self.f |= 1 << _F_disable_unknown } // CopyString indicates the Decoder to decode string values by copying instead of referring. func (self *Decoder) CopyString() { - self.f |= 1 << _F_copy_string + self.f |= 1 << _F_copy_string } -// ValidateString causes the Decoder to validate string values when decoding string value +// ValidateString causes the Decoder to validate string values when decoding string value // in JSON. Validation is that, returning error when unescaped control chars(0x00-0x1f) or // invalid UTF-8 chars in the string value of JSON. func (self *Decoder) ValidateString() { - self.f |= 1 << _F_validate_string + self.f |= 1 << _F_validate_string } // Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in @@ -160,37 +160,36 @@ func (self *Decoder) ValidateString() { // Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is // a compile option to set the depth of recursive compile for the nested struct type. func Pretouch(vt reflect.Type, opts ...option.CompileOption) error { - return nil + return nil } type StreamDecoder struct { - r io.Reader - buf []byte - scanp int - scanned int64 - err error - Decoder + r io.Reader + buf []byte + scanp int + scanned int64 + err error + Decoder } // NewStreamDecoder adapts to encoding/json.NewDecoder API. // // NewStreamDecoder returns a new decoder that reads from r. func NewStreamDecoder(r io.Reader) *StreamDecoder { - return &StreamDecoder{r : r} + return &StreamDecoder{r: r} } -// Decode decodes input stream into val with corresponding data. +// Decode decodes input stream into val with corresponding data. // Redundantly bytes may be read and left in its buffer, and can be used at next call. -// Either io error from underlying io.Reader (except io.EOF) +// Either io error from underlying io.Reader (except io.EOF) // or syntax error from data will be recorded and stop subsequently decoding. func (self *StreamDecoder) Decode(val interface{}) (err error) { - dec := json.NewDecoder(self.r) - if (self.f | uint64(OptionUseNumber)) != 0 { - dec.UseNumber() - } - if (self.f | uint64(OptionDisableUnknown)) != 0 { - dec.DisallowUnknownFields() - } - return dec.Decode(val) + dec := json.NewDecoder(self.r) + if (self.f | uint64(OptionUseNumber)) != 0 { + dec.UseNumber() + } + if (self.f | uint64(OptionDisableUnknown)) != 0 { + dec.DisallowUnknownFields() + } + return dec.Decode(val) } - diff --git a/vendor/github.com/bytedance/sonic/encoder/encoder_amd64.go b/vendor/github.com/bytedance/sonic/encoder/encoder_amd64.go index fa107c73f..a60f6dccc 100644 --- a/vendor/github.com/bytedance/sonic/encoder/encoder_amd64.go +++ b/vendor/github.com/bytedance/sonic/encoder/encoder_amd64.go @@ -1,3 +1,4 @@ +//go:build amd64 && go1.15 && !go1.21 // +build amd64,go1.15,!go1.21 /* @@ -19,10 +20,9 @@ package encoder import ( - `github.com/bytedance/sonic/internal/encoder` + "github.com/bytedance/sonic/internal/encoder" ) - // Encoder represents a specific set of encoder configurations. type Encoder = encoder.Encoder @@ -33,76 +33,75 @@ type StreamEncoder = encoder.StreamEncoder type Options = encoder.Options const ( - // SortMapKeys indicates that the keys of a map needs to be sorted - // before serializing into JSON. - // WARNING: This hurts performance A LOT, USE WITH CARE. - SortMapKeys Options = encoder.SortMapKeys - - // EscapeHTML indicates encoder to escape all HTML characters - // after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape). - // WARNING: This hurts performance A LOT, USE WITH CARE. - EscapeHTML Options = encoder.EscapeHTML - - // CompactMarshaler indicates that the output JSON from json.Marshaler - // is always compact and needs no validation - CompactMarshaler Options = encoder.CompactMarshaler - - // NoQuoteTextMarshaler indicates that the output text from encoding.TextMarshaler - // is always escaped string and needs no quoting - NoQuoteTextMarshaler Options = encoder.NoQuoteTextMarshaler - - // NoNullSliceOrMap indicates all empty Array or Object are encoded as '[]' or '{}', - // instead of 'null' - NoNullSliceOrMap Options = encoder.NoNullSliceOrMap - - // ValidateString indicates that encoder should validate the input string - // before encoding it into JSON. - ValidateString Options = encoder.ValidateString - - // CompatibleWithStd is used to be compatible with std encoder. - CompatibleWithStd Options = encoder.CompatibleWithStd + // SortMapKeys indicates that the keys of a map needs to be sorted + // before serializing into JSON. + // WARNING: This hurts performance A LOT, USE WITH CARE. + SortMapKeys Options = encoder.SortMapKeys + + // EscapeHTML indicates encoder to escape all HTML characters + // after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape). + // WARNING: This hurts performance A LOT, USE WITH CARE. + EscapeHTML Options = encoder.EscapeHTML + + // CompactMarshaler indicates that the output JSON from json.Marshaler + // is always compact and needs no validation + CompactMarshaler Options = encoder.CompactMarshaler + + // NoQuoteTextMarshaler indicates that the output text from encoding.TextMarshaler + // is always escaped string and needs no quoting + NoQuoteTextMarshaler Options = encoder.NoQuoteTextMarshaler + + // NoNullSliceOrMap indicates all empty Array or Object are encoded as '[]' or '{}', + // instead of 'null' + NoNullSliceOrMap Options = encoder.NoNullSliceOrMap + + // ValidateString indicates that encoder should validate the input string + // before encoding it into JSON. + ValidateString Options = encoder.ValidateString + + // CompatibleWithStd is used to be compatible with std encoder. + CompatibleWithStd Options = encoder.CompatibleWithStd ) - var ( - // Encode returns the JSON encoding of val, encoded with opts. - Encode = encoder.Encode - - // EncodeInto is like Encode but uses a user-supplied buffer instead of allocating a new one. - EncodeIndented = encoder.EncodeIndented - - // EncodeIndented is like Encode but applies Indent to format the output. - // Each JSON element in the output will begin on a new line beginning with prefix - // followed by one or more copies of indent according to the indentation nesting. - EncodeInto = encoder.EncodeInto - - // HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 - // characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 - // so that the JSON will be safe to embed inside HTML <script> tags. - // For historical reasons, web browsers don't honor standard HTML - // escaping within <script> tags, so an alternative JSON encoding must - // be used. - HTMLEscape = encoder.HTMLEscape - - // Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in - // order to reduce the first-hit latency. - // - // Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is - // a compile option to set the depth of recursive compile for the nested struct type. - Pretouch = encoder.Pretouch - - // Quote returns the JSON-quoted version of s. - Quote = encoder.Quote - - // Valid validates json and returns first non-blank character position, - // if it is only one valid json value. - // Otherwise returns invalid character position using start. - // - // Note: it does not check for the invalid UTF-8 characters. - Valid = encoder.Valid - - // NewStreamEncoder adapts to encoding/json.NewDecoder API. - // - // NewStreamEncoder returns a new encoder that write to w. - NewStreamEncoder = encoder.NewStreamEncoder -) \ No newline at end of file + // Encode returns the JSON encoding of val, encoded with opts. + Encode = encoder.Encode + + // EncodeInto is like Encode but uses a user-supplied buffer instead of allocating a new one. + EncodeIndented = encoder.EncodeIndented + + // EncodeIndented is like Encode but applies Indent to format the output. + // Each JSON element in the output will begin on a new line beginning with prefix + // followed by one or more copies of indent according to the indentation nesting. + EncodeInto = encoder.EncodeInto + + // HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 + // characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 + // so that the JSON will be safe to embed inside HTML <script> tags. + // For historical reasons, web browsers don't honor standard HTML + // escaping within <script> tags, so an alternative JSON encoding must + // be used. + HTMLEscape = encoder.HTMLEscape + + // Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in + // order to reduce the first-hit latency. + // + // Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is + // a compile option to set the depth of recursive compile for the nested struct type. + Pretouch = encoder.Pretouch + + // Quote returns the JSON-quoted version of s. + Quote = encoder.Quote + + // Valid validates json and returns first non-blank character position, + // if it is only one valid json value. + // Otherwise returns invalid character position using start. + // + // Note: it does not check for the invalid UTF-8 characters. + Valid = encoder.Valid + + // NewStreamEncoder adapts to encoding/json.NewDecoder API. + // + // NewStreamEncoder returns a new encoder that write to w. + NewStreamEncoder = encoder.NewStreamEncoder +) diff --git a/vendor/github.com/bytedance/sonic/encoder/encoder_compat.go b/vendor/github.com/bytedance/sonic/encoder/encoder_compat.go index afa80d561..47543e4f1 100644 --- a/vendor/github.com/bytedance/sonic/encoder/encoder_compat.go +++ b/vendor/github.com/bytedance/sonic/encoder/encoder_compat.go @@ -1,3 +1,4 @@ +//go:build !amd64 || go1.21 // +build !amd64 go1.21 /* @@ -14,158 +15,158 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. -*/ + */ package encoder import ( - `io` - `bytes` - `encoding/json` - `reflect` + "bytes" + "encoding/json" + "io" + "reflect" - `github.com/bytedance/sonic/option` + "github.com/bytedance/sonic/option" ) // Options is a set of encoding options. type Options uint64 const ( - bitSortMapKeys = iota - bitEscapeHTML - bitCompactMarshaler - bitNoQuoteTextMarshaler - bitNoNullSliceOrMap - bitValidateString - - // used for recursive compile - bitPointerValue = 63 + bitSortMapKeys = iota + bitEscapeHTML + bitCompactMarshaler + bitNoQuoteTextMarshaler + bitNoNullSliceOrMap + bitValidateString + + // used for recursive compile + bitPointerValue = 63 ) const ( - // SortMapKeys indicates that the keys of a map needs to be sorted - // before serializing into JSON. - // WARNING: This hurts performance A LOT, USE WITH CARE. - SortMapKeys Options = 1 << bitSortMapKeys - - // EscapeHTML indicates encoder to escape all HTML characters - // after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape). - // WARNING: This hurts performance A LOT, USE WITH CARE. - EscapeHTML Options = 1 << bitEscapeHTML - - // CompactMarshaler indicates that the output JSON from json.Marshaler - // is always compact and needs no validation - CompactMarshaler Options = 1 << bitCompactMarshaler - - // NoQuoteTextMarshaler indicates that the output text from encoding.TextMarshaler - // is always escaped string and needs no quoting - NoQuoteTextMarshaler Options = 1 << bitNoQuoteTextMarshaler - - // NoNullSliceOrMap indicates all empty Array or Object are encoded as '[]' or '{}', - // instead of 'null' - NoNullSliceOrMap Options = 1 << bitNoNullSliceOrMap - - // ValidateString indicates that encoder should validate the input string - // before encoding it into JSON. - ValidateString Options = 1 << bitValidateString - - // CompatibleWithStd is used to be compatible with std encoder. - CompatibleWithStd Options = SortMapKeys | EscapeHTML | CompactMarshaler + // SortMapKeys indicates that the keys of a map needs to be sorted + // before serializing into JSON. + // WARNING: This hurts performance A LOT, USE WITH CARE. + SortMapKeys Options = 1 << bitSortMapKeys + + // EscapeHTML indicates encoder to escape all HTML characters + // after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape). + // WARNING: This hurts performance A LOT, USE WITH CARE. + EscapeHTML Options = 1 << bitEscapeHTML + + // CompactMarshaler indicates that the output JSON from json.Marshaler + // is always compact and needs no validation + CompactMarshaler Options = 1 << bitCompactMarshaler + + // NoQuoteTextMarshaler indicates that the output text from encoding.TextMarshaler + // is always escaped string and needs no quoting + NoQuoteTextMarshaler Options = 1 << bitNoQuoteTextMarshaler + + // NoNullSliceOrMap indicates all empty Array or Object are encoded as '[]' or '{}', + // instead of 'null' + NoNullSliceOrMap Options = 1 << bitNoNullSliceOrMap + + // ValidateString indicates that encoder should validate the input string + // before encoding it into JSON. + ValidateString Options = 1 << bitValidateString + + // CompatibleWithStd is used to be compatible with std encoder. + CompatibleWithStd Options = SortMapKeys | EscapeHTML | CompactMarshaler ) // Encoder represents a specific set of encoder configurations. type Encoder struct { - Opts Options - prefix string - indent string + Opts Options + prefix string + indent string } // Encode returns the JSON encoding of v. func (self *Encoder) Encode(v interface{}) ([]byte, error) { - if self.indent != "" || self.prefix != "" { - return EncodeIndented(v, self.prefix, self.indent, self.Opts) - } - return Encode(v, self.Opts) + if self.indent != "" || self.prefix != "" { + return EncodeIndented(v, self.prefix, self.indent, self.Opts) + } + return Encode(v, self.Opts) } // SortKeys enables the SortMapKeys option. func (self *Encoder) SortKeys() *Encoder { - self.Opts |= SortMapKeys - return self + self.Opts |= SortMapKeys + return self } // SetEscapeHTML specifies if option EscapeHTML opens func (self *Encoder) SetEscapeHTML(f bool) { - if f { - self.Opts |= EscapeHTML - } else { - self.Opts &= ^EscapeHTML - } + if f { + self.Opts |= EscapeHTML + } else { + self.Opts &= ^EscapeHTML + } } // SetValidateString specifies if option ValidateString opens func (self *Encoder) SetValidateString(f bool) { - if f { - self.Opts |= ValidateString - } else { - self.Opts &= ^ValidateString - } + if f { + self.Opts |= ValidateString + } else { + self.Opts &= ^ValidateString + } } // SetCompactMarshaler specifies if option CompactMarshaler opens func (self *Encoder) SetCompactMarshaler(f bool) { - if f { - self.Opts |= CompactMarshaler - } else { - self.Opts &= ^CompactMarshaler - } + if f { + self.Opts |= CompactMarshaler + } else { + self.Opts &= ^CompactMarshaler + } } // SetNoQuoteTextMarshaler specifies if option NoQuoteTextMarshaler opens func (self *Encoder) SetNoQuoteTextMarshaler(f bool) { - if f { - self.Opts |= NoQuoteTextMarshaler - } else { - self.Opts &= ^NoQuoteTextMarshaler - } + if f { + self.Opts |= NoQuoteTextMarshaler + } else { + self.Opts &= ^NoQuoteTextMarshaler + } } // SetIndent instructs the encoder to format each subsequent encoded // value as if indented by the package-level function EncodeIndent(). // Calling SetIndent("", "") disables indentation. func (enc *Encoder) SetIndent(prefix, indent string) { - enc.prefix = prefix - enc.indent = indent + enc.prefix = prefix + enc.indent = indent } // Quote returns the JSON-quoted version of s. func Quote(s string) string { - /* check for empty string */ - if s == "" { - return `""` - } + /* check for empty string */ + if s == "" { + return `""` + } - out, _ := json.Marshal(s) - return string(out) + out, _ := json.Marshal(s) + return string(out) } // Encode returns the JSON encoding of val, encoded with opts. func Encode(val interface{}, opts Options) ([]byte, error) { - return json.Marshal(val) + return json.Marshal(val) } // EncodeInto is like Encode but uses a user-supplied buffer instead of allocating // a new one. func EncodeInto(buf *[]byte, val interface{}, opts Options) error { - if buf == nil { - panic("user-supplied buffer buf is nil") - } - w := bytes.NewBuffer(*buf) - enc := json.NewEncoder(w) - enc.SetEscapeHTML((opts & EscapeHTML) != 0) - err := enc.Encode(val) - *buf = w.Bytes() - return err + if buf == nil { + panic("user-supplied buffer buf is nil") + } + w := bytes.NewBuffer(*buf) + enc := json.NewEncoder(w) + enc.SetEscapeHTML((opts & EscapeHTML) != 0) + err := enc.Encode(val) + *buf = w.Bytes() + return err } // HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 @@ -175,22 +176,22 @@ func EncodeInto(buf *[]byte, val interface{}, opts Options) error { // escaping within <script> tags, so an alternative JSON encoding must // be used. func HTMLEscape(dst []byte, src []byte) []byte { - d := bytes.NewBuffer(dst) - json.HTMLEscape(d, src) - return d.Bytes() + d := bytes.NewBuffer(dst) + json.HTMLEscape(d, src) + return d.Bytes() } // EncodeIndented is like Encode but applies Indent to format the output. // Each JSON element in the output will begin on a new line beginning with prefix // followed by one or more copies of indent according to the indentation nesting. func EncodeIndented(val interface{}, prefix string, indent string, opts Options) ([]byte, error) { - w := bytes.NewBuffer([]byte{}) - enc := json.NewEncoder(w) - enc.SetEscapeHTML((opts & EscapeHTML) != 0) - enc.SetIndent(prefix, indent) - err := enc.Encode(val) - out := w.Bytes() - return out, err + w := bytes.NewBuffer([]byte{}) + enc := json.NewEncoder(w) + enc.SetEscapeHTML((opts & EscapeHTML) != 0) + enc.SetIndent(prefix, indent) + err := enc.Encode(val) + out := w.Bytes() + return out, err } // Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in @@ -199,7 +200,7 @@ func EncodeIndented(val interface{}, prefix string, indent string, opts Options) // Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is // a compile option to set the depth of recursive compile for the nested struct type. func Pretouch(vt reflect.Type, opts ...option.CompileOption) error { - return nil + return nil } // Valid validates json and returns first non-blank character position, @@ -208,27 +209,27 @@ func Pretouch(vt reflect.Type, opts ...option.CompileOption) error { // // Note: it does not check for the invalid UTF-8 characters. func Valid(data []byte) (ok bool, start int) { - return json.Valid(data), 0 + return json.Valid(data), 0 } -// StreamEncoder uses io.Writer as +// StreamEncoder uses io.Writer as type StreamEncoder struct { - w io.Writer - Encoder + w io.Writer + Encoder } // NewStreamEncoder adapts to encoding/json.NewDecoder API. // // NewStreamEncoder returns a new encoder that write to w. func NewStreamEncoder(w io.Writer) *StreamEncoder { - return &StreamEncoder{w: w} + return &StreamEncoder{w: w} } // Encode encodes interface{} as JSON to io.Writer func (enc *StreamEncoder) Encode(val interface{}) (err error) { - jenc := json.NewEncoder(enc.w) - jenc.SetEscapeHTML((enc.Opts & EscapeHTML) != 0) - jenc.SetIndent(enc.prefix, enc.indent) - err = jenc.Encode(val) - return err + jenc := json.NewEncoder(enc.w) + jenc.SetEscapeHTML((enc.Opts & EscapeHTML) != 0) + jenc.SetIndent(enc.prefix, enc.indent) + err = jenc.Encode(val) + return err } diff --git a/vendor/github.com/bytedance/sonic/internal/caching/fcache.go b/vendor/github.com/bytedance/sonic/internal/caching/fcache.go index 8cf62ff44..a0ce05e24 100644 --- a/vendor/github.com/bytedance/sonic/internal/caching/fcache.go +++ b/vendor/github.com/bytedance/sonic/internal/caching/fcache.go @@ -17,99 +17,99 @@ package caching import ( - `strings` - `unsafe` + "strings" + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) type FieldMap struct { - N uint64 - b unsafe.Pointer - m map[string]int + N uint64 + b unsafe.Pointer + m map[string]int } type FieldEntry struct { - ID int - Name string - Hash uint64 + ID int + Name string + Hash uint64 } const ( - FieldMap_N = int64(unsafe.Offsetof(FieldMap{}.N)) - FieldMap_b = int64(unsafe.Offsetof(FieldMap{}.b)) + FieldMap_N = int64(unsafe.Offsetof(FieldMap{}.N)) + FieldMap_b = int64(unsafe.Offsetof(FieldMap{}.b)) FieldEntrySize = int64(unsafe.Sizeof(FieldEntry{})) ) func newBucket(n int) unsafe.Pointer { - v := make([]FieldEntry, n) - return (*rt.GoSlice)(unsafe.Pointer(&v)).Ptr + v := make([]FieldEntry, n) + return (*rt.GoSlice)(unsafe.Pointer(&v)).Ptr } func CreateFieldMap(n int) *FieldMap { - return &FieldMap { - N: uint64(n * 2), - b: newBucket(n * 2), // LoadFactor = 0.5 - m: make(map[string]int, n * 2), - } + return &FieldMap{ + N: uint64(n * 2), + b: newBucket(n * 2), // LoadFactor = 0.5 + m: make(map[string]int, n*2), + } } func (self *FieldMap) At(p uint64) *FieldEntry { - off := uintptr(p) * uintptr(FieldEntrySize) - return (*FieldEntry)(unsafe.Pointer(uintptr(self.b) + off)) + off := uintptr(p) * uintptr(FieldEntrySize) + return (*FieldEntry)(unsafe.Pointer(uintptr(self.b) + off)) } // Get searches FieldMap by name. JIT generated assembly does NOT call this // function, rather it implements its own version directly in assembly. So // we must ensure this function stays in sync with the JIT generated one. func (self *FieldMap) Get(name string) int { - h := StrHash(name) - p := h % self.N - s := self.At(p) - - /* find the element; - * the hash map is never full, so the loop will always terminate */ - for s.Hash != 0 { - if s.Hash == h && s.Name == name { - return s.ID - } else { - p = (p + 1) % self.N - s = self.At(p) - } - } - - /* not found */ - return -1 + h := StrHash(name) + p := h % self.N + s := self.At(p) + + /* find the element; + * the hash map is never full, so the loop will always terminate */ + for s.Hash != 0 { + if s.Hash == h && s.Name == name { + return s.ID + } else { + p = (p + 1) % self.N + s = self.At(p) + } + } + + /* not found */ + return -1 } func (self *FieldMap) Set(name string, i int) { - h := StrHash(name) - p := h % self.N - s := self.At(p) - - /* searching for an empty slot; - * the hash map is never full, so the loop will always terminate */ - for s.Hash != 0 { - p = (p + 1) % self.N - s = self.At(p) - } - - /* set the value */ - s.ID = i - s.Hash = h - s.Name = name - - /* add the case-insensitive version, prefer the one with smaller field ID */ - key := strings.ToLower(name) - if v, ok := self.m[key]; !ok || i < v { - self.m[key] = i - } + h := StrHash(name) + p := h % self.N + s := self.At(p) + + /* searching for an empty slot; + * the hash map is never full, so the loop will always terminate */ + for s.Hash != 0 { + p = (p + 1) % self.N + s = self.At(p) + } + + /* set the value */ + s.ID = i + s.Hash = h + s.Name = name + + /* add the case-insensitive version, prefer the one with smaller field ID */ + key := strings.ToLower(name) + if v, ok := self.m[key]; !ok || i < v { + self.m[key] = i + } } func (self *FieldMap) GetCaseInsensitive(name string) int { - if i, ok := self.m[strings.ToLower(name)]; ok { - return i - } else { - return -1 - } + if i, ok := self.m[strings.ToLower(name)]; ok { + return i + } else { + return -1 + } } diff --git a/vendor/github.com/bytedance/sonic/internal/caching/hashing.go b/vendor/github.com/bytedance/sonic/internal/caching/hashing.go index b8876a410..cfa2b7761 100644 --- a/vendor/github.com/bytedance/sonic/internal/caching/hashing.go +++ b/vendor/github.com/bytedance/sonic/internal/caching/hashing.go @@ -17,14 +17,14 @@ package caching import ( - `unsafe` + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) var ( - V_strhash = rt.UnpackEface(strhash) - S_strhash = *(*uintptr)(V_strhash.Value) + V_strhash = rt.UnpackEface(strhash) + S_strhash = *(*uintptr)(V_strhash.Value) ) //go:noescape @@ -32,9 +32,9 @@ var ( func strhash(_ unsafe.Pointer, _ uintptr) uintptr func StrHash(s string) uint64 { - if v := strhash(unsafe.Pointer(&s), 0); v == 0 { - return 1 - } else { - return uint64(v) - } + if v := strhash(unsafe.Pointer(&s), 0); v == 0 { + return 1 + } else { + return uint64(v) + } } diff --git a/vendor/github.com/bytedance/sonic/internal/caching/pcache.go b/vendor/github.com/bytedance/sonic/internal/caching/pcache.go index 8c1224d9c..d5be70e72 100644 --- a/vendor/github.com/bytedance/sonic/internal/caching/pcache.go +++ b/vendor/github.com/bytedance/sonic/internal/caching/pcache.go @@ -17,157 +17,157 @@ package caching import ( - `sync` - `sync/atomic` - `unsafe` + "sync" + "sync/atomic" + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) /** Program Map **/ const ( - _LoadFactor = 0.5 - _InitCapacity = 4096 // must be a power of 2 + _LoadFactor = 0.5 + _InitCapacity = 4096 // must be a power of 2 ) type _ProgramMap struct { - n uint64 - m uint32 - b []_ProgramEntry + n uint64 + m uint32 + b []_ProgramEntry } type _ProgramEntry struct { - vt *rt.GoType - fn interface{} + vt *rt.GoType + fn interface{} } func newProgramMap() *_ProgramMap { - return &_ProgramMap { - n: 0, - m: _InitCapacity - 1, - b: make([]_ProgramEntry, _InitCapacity), - } + return &_ProgramMap{ + n: 0, + m: _InitCapacity - 1, + b: make([]_ProgramEntry, _InitCapacity), + } } func (self *_ProgramMap) copy() *_ProgramMap { - fork := &_ProgramMap{ - n: self.n, - m: self.m, - b: make([]_ProgramEntry, len(self.b)), - } - for i, f := range self.b { - fork.b[i] = f - } - return fork + fork := &_ProgramMap{ + n: self.n, + m: self.m, + b: make([]_ProgramEntry, len(self.b)), + } + for i, f := range self.b { + fork.b[i] = f + } + return fork } func (self *_ProgramMap) get(vt *rt.GoType) interface{} { - i := self.m + 1 - p := vt.Hash & self.m - - /* linear probing */ - for ; i > 0; i-- { - if b := self.b[p]; b.vt == vt { - return b.fn - } else if b.vt == nil { - break - } else { - p = (p + 1) & self.m - } - } - - /* not found */ - return nil + i := self.m + 1 + p := vt.Hash & self.m + + /* linear probing */ + for ; i > 0; i-- { + if b := self.b[p]; b.vt == vt { + return b.fn + } else if b.vt == nil { + break + } else { + p = (p + 1) & self.m + } + } + + /* not found */ + return nil } func (self *_ProgramMap) add(vt *rt.GoType, fn interface{}) *_ProgramMap { - p := self.copy() - f := float64(atomic.LoadUint64(&p.n) + 1) / float64(p.m + 1) + p := self.copy() + f := float64(atomic.LoadUint64(&p.n)+1) / float64(p.m+1) - /* check for load factor */ - if f > _LoadFactor { - p = p.rehash() - } + /* check for load factor */ + if f > _LoadFactor { + p = p.rehash() + } - /* insert the value */ - p.insert(vt, fn) - return p + /* insert the value */ + p.insert(vt, fn) + return p } func (self *_ProgramMap) rehash() *_ProgramMap { - c := (self.m + 1) << 1 - r := &_ProgramMap{m: c - 1, b: make([]_ProgramEntry, int(c))} - - /* rehash every entry */ - for i := uint32(0); i <= self.m; i++ { - if b := self.b[i]; b.vt != nil { - r.insert(b.vt, b.fn) - } - } - - /* rebuild successful */ - return r + c := (self.m + 1) << 1 + r := &_ProgramMap{m: c - 1, b: make([]_ProgramEntry, int(c))} + + /* rehash every entry */ + for i := uint32(0); i <= self.m; i++ { + if b := self.b[i]; b.vt != nil { + r.insert(b.vt, b.fn) + } + } + + /* rebuild successful */ + return r } func (self *_ProgramMap) insert(vt *rt.GoType, fn interface{}) { - h := vt.Hash - p := h & self.m - - /* linear probing */ - for i := uint32(0); i <= self.m; i++ { - if b := &self.b[p]; b.vt != nil { - p += 1 - p &= self.m - } else { - b.vt = vt - b.fn = fn - atomic.AddUint64(&self.n, 1) - return - } - } - - /* should never happens */ - panic("no available slots") + h := vt.Hash + p := h & self.m + + /* linear probing */ + for i := uint32(0); i <= self.m; i++ { + if b := &self.b[p]; b.vt != nil { + p += 1 + p &= self.m + } else { + b.vt = vt + b.fn = fn + atomic.AddUint64(&self.n, 1) + return + } + } + + /* should never happens */ + panic("no available slots") } /** RCU Program Cache **/ type ProgramCache struct { - m sync.Mutex - p unsafe.Pointer + m sync.Mutex + p unsafe.Pointer } func CreateProgramCache() *ProgramCache { - return &ProgramCache { - m: sync.Mutex{}, - p: unsafe.Pointer(newProgramMap()), - } + return &ProgramCache{ + m: sync.Mutex{}, + p: unsafe.Pointer(newProgramMap()), + } } func (self *ProgramCache) Get(vt *rt.GoType) interface{} { - return (*_ProgramMap)(atomic.LoadPointer(&self.p)).get(vt) + return (*_ProgramMap)(atomic.LoadPointer(&self.p)).get(vt) } -func (self *ProgramCache) Compute(vt *rt.GoType, compute func(*rt.GoType, ... interface{}) (interface{}, error), ex ...interface{}) (interface{}, error) { - var err error - var val interface{} +func (self *ProgramCache) Compute(vt *rt.GoType, compute func(*rt.GoType, ...interface{}) (interface{}, error), ex ...interface{}) (interface{}, error) { + var err error + var val interface{} - /* use defer to prevent inlining of this function */ - self.m.Lock() - defer self.m.Unlock() + /* use defer to prevent inlining of this function */ + self.m.Lock() + defer self.m.Unlock() - /* double check with write lock held */ - if val = self.Get(vt); val != nil { - return val, nil - } + /* double check with write lock held */ + if val = self.Get(vt); val != nil { + return val, nil + } - /* compute the value */ - if val, err = compute(vt, ex...); err != nil { - return nil, err - } + /* compute the value */ + if val, err = compute(vt, ex...); err != nil { + return nil, err + } - /* update the RCU cache */ - atomic.StorePointer(&self.p, unsafe.Pointer((*_ProgramMap)(atomic.LoadPointer(&self.p)).add(vt, val))) - return val, nil + /* update the RCU cache */ + atomic.StorePointer(&self.p, unsafe.Pointer((*_ProgramMap)(atomic.LoadPointer(&self.p)).add(vt, val))) + return val, nil } diff --git a/vendor/github.com/bytedance/sonic/internal/cpu/features.go b/vendor/github.com/bytedance/sonic/internal/cpu/features.go index f9ee3b8f3..57158f7f0 100644 --- a/vendor/github.com/bytedance/sonic/internal/cpu/features.go +++ b/vendor/github.com/bytedance/sonic/internal/cpu/features.go @@ -17,24 +17,30 @@ package cpu import ( - `fmt` - `os` + "fmt" + "os" - `github.com/klauspost/cpuid/v2` + "github.com/klauspost/cpuid/v2" ) var ( - HasAVX = cpuid.CPU.Has(cpuid.AVX) - HasAVX2 = cpuid.CPU.Has(cpuid.AVX2) - HasSSE = cpuid.CPU.Has(cpuid.SSE) + HasAVX = cpuid.CPU.Has(cpuid.AVX) + HasAVX2 = cpuid.CPU.Has(cpuid.AVX2) + HasSSE = cpuid.CPU.Has(cpuid.SSE) ) func init() { - switch v := os.Getenv("SONIC_MODE"); v { - case "" : break - case "auto" : break - case "noavx" : HasAVX = false; fallthrough - case "noavx2" : HasAVX2 = false - default : panic(fmt.Sprintf("invalid mode: '%s', should be one of 'auto', 'noavx', 'noavx2'", v)) - } + switch v := os.Getenv("SONIC_MODE"); v { + case "": + break + case "auto": + break + case "noavx": + HasAVX = false + fallthrough + case "noavx2": + HasAVX2 = false + default: + panic(fmt.Sprintf("invalid mode: '%s', should be one of 'auto', 'noavx', 'noavx2'", v)) + } } diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/assembler_amd64_go116.go b/vendor/github.com/bytedance/sonic/internal/decoder/assembler_amd64_go116.go index a2618bb3c..2049a8873 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/assembler_amd64_go116.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/assembler_amd64_go116.go @@ -1,3 +1,4 @@ +//go:build go1.15 && !go1.17 // +build go1.15,!go1.17 /* @@ -19,20 +20,20 @@ package decoder import ( - `encoding/json` - `fmt` - `math` - `reflect` - `strconv` - `unsafe` - - `github.com/bytedance/sonic/internal/caching` - `github.com/bytedance/sonic/internal/jit` - `github.com/bytedance/sonic/internal/native` - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/internal/rt` - `github.com/twitchyliquid64/golang-asm/obj` - `github.com/twitchyliquid64/golang-asm/obj/x86` + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "unsafe" + + "github.com/bytedance/sonic/internal/caching" + "github.com/bytedance/sonic/internal/jit" + "github.com/bytedance/sonic/internal/native" + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/internal/rt" + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/obj/x86" ) /** Register Allocations @@ -67,1889 +68,1886 @@ import ( */ const ( - _FP_args = 96 // 96 bytes to pass arguments and return values for this function - _FP_fargs = 80 // 80 bytes for passing arguments to other Go functions - _FP_saves = 40 // 40 bytes for saving the registers before CALL instructions - _FP_locals = 144 // 144 bytes for local variables + _FP_args = 96 // 96 bytes to pass arguments and return values for this function + _FP_fargs = 80 // 80 bytes for passing arguments to other Go functions + _FP_saves = 40 // 40 bytes for saving the registers before CALL instructions + _FP_locals = 144 // 144 bytes for local variables ) const ( - _FP_offs = _FP_fargs + _FP_saves + _FP_locals - _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer - _FP_base = _FP_size + 8 // 8 bytes for the return address + _FP_offs = _FP_fargs + _FP_saves + _FP_locals + _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer + _FP_base = _FP_size + 8 // 8 bytes for the return address ) const ( - _IM_null = 0x6c6c756e // 'null' - _IM_true = 0x65757274 // 'true' - _IM_alse = 0x65736c61 // 'alse' ('false' without the 'f') + _IM_null = 0x6c6c756e // 'null' + _IM_true = 0x65757274 // 'true' + _IM_alse = 0x65736c61 // 'alse' ('false' without the 'f') ) const ( - _BM_space = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n') + _BM_space = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n') ) const ( - _MODE_JSON = 1 << 3 // base64 mode + _MODE_JSON = 1 << 3 // base64 mode ) const ( - _LB_error = "_error" - _LB_im_error = "_im_error" - _LB_eof_error = "_eof_error" - _LB_type_error = "_type_error" - _LB_field_error = "_field_error" - _LB_range_error = "_range_error" - _LB_stack_error = "_stack_error" - _LB_base64_error = "_base64_error" - _LB_unquote_error = "_unquote_error" - _LB_parsing_error = "_parsing_error" - _LB_parsing_error_v = "_parsing_error_v" - _LB_mismatch_error = "_mismatch_error" + _LB_error = "_error" + _LB_im_error = "_im_error" + _LB_eof_error = "_eof_error" + _LB_type_error = "_type_error" + _LB_field_error = "_field_error" + _LB_range_error = "_range_error" + _LB_stack_error = "_stack_error" + _LB_base64_error = "_base64_error" + _LB_unquote_error = "_unquote_error" + _LB_parsing_error = "_parsing_error" + _LB_parsing_error_v = "_parsing_error_v" + _LB_mismatch_error = "_mismatch_error" ) const ( - _LB_char_0_error = "_char_0_error" - _LB_char_1_error = "_char_1_error" - _LB_char_2_error = "_char_2_error" - _LB_char_3_error = "_char_3_error" - _LB_char_4_error = "_char_4_error" - _LB_char_m2_error = "_char_m2_error" - _LB_char_m3_error = "_char_m3_error" + _LB_char_0_error = "_char_0_error" + _LB_char_1_error = "_char_1_error" + _LB_char_2_error = "_char_2_error" + _LB_char_3_error = "_char_3_error" + _LB_char_4_error = "_char_4_error" + _LB_char_m2_error = "_char_m2_error" + _LB_char_m3_error = "_char_m3_error" ) const ( - _LB_skip_one = "_skip_one" - _LB_skip_key_value = "_skip_key_value" + _LB_skip_one = "_skip_one" + _LB_skip_key_value = "_skip_key_value" ) var ( - _AX = jit.Reg("AX") - _CX = jit.Reg("CX") - _DX = jit.Reg("DX") - _DI = jit.Reg("DI") - _SI = jit.Reg("SI") - _BP = jit.Reg("BP") - _SP = jit.Reg("SP") - _R8 = jit.Reg("R8") - _R9 = jit.Reg("R9") - _X0 = jit.Reg("X0") - _X1 = jit.Reg("X1") + _AX = jit.Reg("AX") + _CX = jit.Reg("CX") + _DX = jit.Reg("DX") + _DI = jit.Reg("DI") + _SI = jit.Reg("SI") + _BP = jit.Reg("BP") + _SP = jit.Reg("SP") + _R8 = jit.Reg("R8") + _R9 = jit.Reg("R9") + _X0 = jit.Reg("X0") + _X1 = jit.Reg("X1") ) var ( - _ST = jit.Reg("BX") - _IP = jit.Reg("R12") - _IL = jit.Reg("R13") - _IC = jit.Reg("R14") - _VP = jit.Reg("R15") + _ST = jit.Reg("BX") + _IP = jit.Reg("R12") + _IL = jit.Reg("R13") + _IC = jit.Reg("R14") + _VP = jit.Reg("R15") ) var ( - _R10 = jit.Reg("R10") // used for gcWriteBarrier - _DF = jit.Reg("R10") // reuse R10 in generic decoder for flags - _ET = jit.Reg("R10") - _EP = jit.Reg("R11") + _R10 = jit.Reg("R10") // used for gcWriteBarrier + _DF = jit.Reg("R10") // reuse R10 in generic decoder for flags + _ET = jit.Reg("R10") + _EP = jit.Reg("R11") ) var ( - _ARG_s = _ARG_sp - _ARG_sp = jit.Ptr(_SP, _FP_base) - _ARG_sl = jit.Ptr(_SP, _FP_base + 8) - _ARG_ic = jit.Ptr(_SP, _FP_base + 16) - _ARG_vp = jit.Ptr(_SP, _FP_base + 24) - _ARG_sb = jit.Ptr(_SP, _FP_base + 32) - _ARG_fv = jit.Ptr(_SP, _FP_base + 40) + _ARG_s = _ARG_sp + _ARG_sp = jit.Ptr(_SP, _FP_base) + _ARG_sl = jit.Ptr(_SP, _FP_base+8) + _ARG_ic = jit.Ptr(_SP, _FP_base+16) + _ARG_vp = jit.Ptr(_SP, _FP_base+24) + _ARG_sb = jit.Ptr(_SP, _FP_base+32) + _ARG_fv = jit.Ptr(_SP, _FP_base+40) ) var ( - _VAR_sv = _VAR_sv_p - _VAR_sv_p = jit.Ptr(_SP, _FP_base + 48) - _VAR_sv_n = jit.Ptr(_SP, _FP_base + 56) - _VAR_vk = jit.Ptr(_SP, _FP_base + 64) + _VAR_sv = _VAR_sv_p + _VAR_sv_p = jit.Ptr(_SP, _FP_base+48) + _VAR_sv_n = jit.Ptr(_SP, _FP_base+56) + _VAR_vk = jit.Ptr(_SP, _FP_base+64) ) var ( - _RET_rc = jit.Ptr(_SP, _FP_base + 72) - _RET_et = jit.Ptr(_SP, _FP_base + 80) - _RET_ep = jit.Ptr(_SP, _FP_base + 88) + _RET_rc = jit.Ptr(_SP, _FP_base+72) + _RET_et = jit.Ptr(_SP, _FP_base+80) + _RET_ep = jit.Ptr(_SP, _FP_base+88) ) var ( - _VAR_st = _VAR_st_Vt - _VAR_sr = jit.Ptr(_SP, _FP_fargs + _FP_saves) + _VAR_st = _VAR_st_Vt + _VAR_sr = jit.Ptr(_SP, _FP_fargs+_FP_saves) ) - var ( - _VAR_st_Vt = jit.Ptr(_SP, _FP_fargs + _FP_saves + 0) - _VAR_st_Dv = jit.Ptr(_SP, _FP_fargs + _FP_saves + 8) - _VAR_st_Iv = jit.Ptr(_SP, _FP_fargs + _FP_saves + 16) - _VAR_st_Ep = jit.Ptr(_SP, _FP_fargs + _FP_saves + 24) - _VAR_st_Db = jit.Ptr(_SP, _FP_fargs + _FP_saves + 32) - _VAR_st_Dc = jit.Ptr(_SP, _FP_fargs + _FP_saves + 40) + _VAR_st_Vt = jit.Ptr(_SP, _FP_fargs+_FP_saves+0) + _VAR_st_Dv = jit.Ptr(_SP, _FP_fargs+_FP_saves+8) + _VAR_st_Iv = jit.Ptr(_SP, _FP_fargs+_FP_saves+16) + _VAR_st_Ep = jit.Ptr(_SP, _FP_fargs+_FP_saves+24) + _VAR_st_Db = jit.Ptr(_SP, _FP_fargs+_FP_saves+32) + _VAR_st_Dc = jit.Ptr(_SP, _FP_fargs+_FP_saves+40) ) var ( - _VAR_ss_AX = jit.Ptr(_SP, _FP_fargs + _FP_saves + 48) - _VAR_ss_CX = jit.Ptr(_SP, _FP_fargs + _FP_saves + 56) - _VAR_ss_SI = jit.Ptr(_SP, _FP_fargs + _FP_saves + 64) - _VAR_ss_R8 = jit.Ptr(_SP, _FP_fargs + _FP_saves + 72) - _VAR_ss_R9 = jit.Ptr(_SP, _FP_fargs + _FP_saves + 80) + _VAR_ss_AX = jit.Ptr(_SP, _FP_fargs+_FP_saves+48) + _VAR_ss_CX = jit.Ptr(_SP, _FP_fargs+_FP_saves+56) + _VAR_ss_SI = jit.Ptr(_SP, _FP_fargs+_FP_saves+64) + _VAR_ss_R8 = jit.Ptr(_SP, _FP_fargs+_FP_saves+72) + _VAR_ss_R9 = jit.Ptr(_SP, _FP_fargs+_FP_saves+80) ) var ( - _VAR_bs_p = jit.Ptr(_SP, _FP_fargs + _FP_saves + 88) - _VAR_bs_n = jit.Ptr(_SP, _FP_fargs + _FP_saves + 96) - _VAR_bs_LR = jit.Ptr(_SP, _FP_fargs + _FP_saves + 104) + _VAR_bs_p = jit.Ptr(_SP, _FP_fargs+_FP_saves+88) + _VAR_bs_n = jit.Ptr(_SP, _FP_fargs+_FP_saves+96) + _VAR_bs_LR = jit.Ptr(_SP, _FP_fargs+_FP_saves+104) ) -var _VAR_fl = jit.Ptr(_SP, _FP_fargs + _FP_saves + 112) +var _VAR_fl = jit.Ptr(_SP, _FP_fargs+_FP_saves+112) var ( - _VAR_et = jit.Ptr(_SP, _FP_fargs + _FP_saves + 120) // save dismatched type - _VAR_ic = jit.Ptr(_SP, _FP_fargs + _FP_saves + 128) // save dismatched position - _VAR_pc = jit.Ptr(_SP, _FP_fargs + _FP_saves + 136) // save skip return pc + _VAR_et = jit.Ptr(_SP, _FP_fargs+_FP_saves+120) // save dismatched type + _VAR_ic = jit.Ptr(_SP, _FP_fargs+_FP_saves+128) // save dismatched position + _VAR_pc = jit.Ptr(_SP, _FP_fargs+_FP_saves+136) // save skip return pc ) type _Assembler struct { - jit.BaseAssembler - p _Program - name string + jit.BaseAssembler + p _Program + name string } func newAssembler(p _Program) *_Assembler { - return new(_Assembler).Init(p) + return new(_Assembler).Init(p) } /** Assembler Interface **/ func (self *_Assembler) Load() _Decoder { - return ptodec(self.BaseAssembler.Load("decode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs)) + return ptodec(self.BaseAssembler.Load("decode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs)) } func (self *_Assembler) Init(p _Program) *_Assembler { - self.p = p - self.BaseAssembler.Init(self.compile) - return self + self.p = p + self.BaseAssembler.Init(self.compile) + return self } func (self *_Assembler) compile() { - self.prologue() - self.instrs() - self.epilogue() - self.copy_string() - self.escape_string() - self.escape_string_twice() - self.skip_one() - self.skip_key_value() - self.mismatch_error() - self.type_error() - self.field_error() - self.range_error() - self.stack_error() - self.base64_error() - self.parsing_error() + self.prologue() + self.instrs() + self.epilogue() + self.copy_string() + self.escape_string() + self.escape_string_twice() + self.skip_one() + self.skip_key_value() + self.mismatch_error() + self.type_error() + self.field_error() + self.range_error() + self.stack_error() + self.base64_error() + self.parsing_error() } /** Assembler Stages **/ -var _OpFuncTab = [256]func(*_Assembler, *_Instr) { - _OP_any : (*_Assembler)._asm_OP_any, - _OP_dyn : (*_Assembler)._asm_OP_dyn, - _OP_str : (*_Assembler)._asm_OP_str, - _OP_bin : (*_Assembler)._asm_OP_bin, - _OP_bool : (*_Assembler)._asm_OP_bool, - _OP_num : (*_Assembler)._asm_OP_num, - _OP_i8 : (*_Assembler)._asm_OP_i8, - _OP_i16 : (*_Assembler)._asm_OP_i16, - _OP_i32 : (*_Assembler)._asm_OP_i32, - _OP_i64 : (*_Assembler)._asm_OP_i64, - _OP_u8 : (*_Assembler)._asm_OP_u8, - _OP_u16 : (*_Assembler)._asm_OP_u16, - _OP_u32 : (*_Assembler)._asm_OP_u32, - _OP_u64 : (*_Assembler)._asm_OP_u64, - _OP_f32 : (*_Assembler)._asm_OP_f32, - _OP_f64 : (*_Assembler)._asm_OP_f64, - _OP_unquote : (*_Assembler)._asm_OP_unquote, - _OP_nil_1 : (*_Assembler)._asm_OP_nil_1, - _OP_nil_2 : (*_Assembler)._asm_OP_nil_2, - _OP_nil_3 : (*_Assembler)._asm_OP_nil_3, - _OP_deref : (*_Assembler)._asm_OP_deref, - _OP_index : (*_Assembler)._asm_OP_index, - _OP_is_null : (*_Assembler)._asm_OP_is_null, - _OP_is_null_quote : (*_Assembler)._asm_OP_is_null_quote, - _OP_map_init : (*_Assembler)._asm_OP_map_init, - _OP_map_key_i8 : (*_Assembler)._asm_OP_map_key_i8, - _OP_map_key_i16 : (*_Assembler)._asm_OP_map_key_i16, - _OP_map_key_i32 : (*_Assembler)._asm_OP_map_key_i32, - _OP_map_key_i64 : (*_Assembler)._asm_OP_map_key_i64, - _OP_map_key_u8 : (*_Assembler)._asm_OP_map_key_u8, - _OP_map_key_u16 : (*_Assembler)._asm_OP_map_key_u16, - _OP_map_key_u32 : (*_Assembler)._asm_OP_map_key_u32, - _OP_map_key_u64 : (*_Assembler)._asm_OP_map_key_u64, - _OP_map_key_f32 : (*_Assembler)._asm_OP_map_key_f32, - _OP_map_key_f64 : (*_Assembler)._asm_OP_map_key_f64, - _OP_map_key_str : (*_Assembler)._asm_OP_map_key_str, - _OP_map_key_utext : (*_Assembler)._asm_OP_map_key_utext, - _OP_map_key_utext_p : (*_Assembler)._asm_OP_map_key_utext_p, - _OP_array_skip : (*_Assembler)._asm_OP_array_skip, - _OP_array_clear : (*_Assembler)._asm_OP_array_clear, - _OP_array_clear_p : (*_Assembler)._asm_OP_array_clear_p, - _OP_slice_init : (*_Assembler)._asm_OP_slice_init, - _OP_slice_append : (*_Assembler)._asm_OP_slice_append, - _OP_object_skip : (*_Assembler)._asm_OP_object_skip, - _OP_object_next : (*_Assembler)._asm_OP_object_next, - _OP_struct_field : (*_Assembler)._asm_OP_struct_field, - _OP_unmarshal : (*_Assembler)._asm_OP_unmarshal, - _OP_unmarshal_p : (*_Assembler)._asm_OP_unmarshal_p, - _OP_unmarshal_text : (*_Assembler)._asm_OP_unmarshal_text, - _OP_unmarshal_text_p : (*_Assembler)._asm_OP_unmarshal_text_p, - _OP_lspace : (*_Assembler)._asm_OP_lspace, - _OP_match_char : (*_Assembler)._asm_OP_match_char, - _OP_check_char : (*_Assembler)._asm_OP_check_char, - _OP_load : (*_Assembler)._asm_OP_load, - _OP_save : (*_Assembler)._asm_OP_save, - _OP_drop : (*_Assembler)._asm_OP_drop, - _OP_drop_2 : (*_Assembler)._asm_OP_drop_2, - _OP_recurse : (*_Assembler)._asm_OP_recurse, - _OP_goto : (*_Assembler)._asm_OP_goto, - _OP_switch : (*_Assembler)._asm_OP_switch, - _OP_check_char_0 : (*_Assembler)._asm_OP_check_char_0, - _OP_dismatch_err : (*_Assembler)._asm_OP_dismatch_err, - _OP_go_skip : (*_Assembler)._asm_OP_go_skip, - _OP_add : (*_Assembler)._asm_OP_add, - _OP_check_empty : (*_Assembler)._asm_OP_check_empty, +var _OpFuncTab = [256]func(*_Assembler, *_Instr){ + _OP_any: (*_Assembler)._asm_OP_any, + _OP_dyn: (*_Assembler)._asm_OP_dyn, + _OP_str: (*_Assembler)._asm_OP_str, + _OP_bin: (*_Assembler)._asm_OP_bin, + _OP_bool: (*_Assembler)._asm_OP_bool, + _OP_num: (*_Assembler)._asm_OP_num, + _OP_i8: (*_Assembler)._asm_OP_i8, + _OP_i16: (*_Assembler)._asm_OP_i16, + _OP_i32: (*_Assembler)._asm_OP_i32, + _OP_i64: (*_Assembler)._asm_OP_i64, + _OP_u8: (*_Assembler)._asm_OP_u8, + _OP_u16: (*_Assembler)._asm_OP_u16, + _OP_u32: (*_Assembler)._asm_OP_u32, + _OP_u64: (*_Assembler)._asm_OP_u64, + _OP_f32: (*_Assembler)._asm_OP_f32, + _OP_f64: (*_Assembler)._asm_OP_f64, + _OP_unquote: (*_Assembler)._asm_OP_unquote, + _OP_nil_1: (*_Assembler)._asm_OP_nil_1, + _OP_nil_2: (*_Assembler)._asm_OP_nil_2, + _OP_nil_3: (*_Assembler)._asm_OP_nil_3, + _OP_deref: (*_Assembler)._asm_OP_deref, + _OP_index: (*_Assembler)._asm_OP_index, + _OP_is_null: (*_Assembler)._asm_OP_is_null, + _OP_is_null_quote: (*_Assembler)._asm_OP_is_null_quote, + _OP_map_init: (*_Assembler)._asm_OP_map_init, + _OP_map_key_i8: (*_Assembler)._asm_OP_map_key_i8, + _OP_map_key_i16: (*_Assembler)._asm_OP_map_key_i16, + _OP_map_key_i32: (*_Assembler)._asm_OP_map_key_i32, + _OP_map_key_i64: (*_Assembler)._asm_OP_map_key_i64, + _OP_map_key_u8: (*_Assembler)._asm_OP_map_key_u8, + _OP_map_key_u16: (*_Assembler)._asm_OP_map_key_u16, + _OP_map_key_u32: (*_Assembler)._asm_OP_map_key_u32, + _OP_map_key_u64: (*_Assembler)._asm_OP_map_key_u64, + _OP_map_key_f32: (*_Assembler)._asm_OP_map_key_f32, + _OP_map_key_f64: (*_Assembler)._asm_OP_map_key_f64, + _OP_map_key_str: (*_Assembler)._asm_OP_map_key_str, + _OP_map_key_utext: (*_Assembler)._asm_OP_map_key_utext, + _OP_map_key_utext_p: (*_Assembler)._asm_OP_map_key_utext_p, + _OP_array_skip: (*_Assembler)._asm_OP_array_skip, + _OP_array_clear: (*_Assembler)._asm_OP_array_clear, + _OP_array_clear_p: (*_Assembler)._asm_OP_array_clear_p, + _OP_slice_init: (*_Assembler)._asm_OP_slice_init, + _OP_slice_append: (*_Assembler)._asm_OP_slice_append, + _OP_object_skip: (*_Assembler)._asm_OP_object_skip, + _OP_object_next: (*_Assembler)._asm_OP_object_next, + _OP_struct_field: (*_Assembler)._asm_OP_struct_field, + _OP_unmarshal: (*_Assembler)._asm_OP_unmarshal, + _OP_unmarshal_p: (*_Assembler)._asm_OP_unmarshal_p, + _OP_unmarshal_text: (*_Assembler)._asm_OP_unmarshal_text, + _OP_unmarshal_text_p: (*_Assembler)._asm_OP_unmarshal_text_p, + _OP_lspace: (*_Assembler)._asm_OP_lspace, + _OP_match_char: (*_Assembler)._asm_OP_match_char, + _OP_check_char: (*_Assembler)._asm_OP_check_char, + _OP_load: (*_Assembler)._asm_OP_load, + _OP_save: (*_Assembler)._asm_OP_save, + _OP_drop: (*_Assembler)._asm_OP_drop, + _OP_drop_2: (*_Assembler)._asm_OP_drop_2, + _OP_recurse: (*_Assembler)._asm_OP_recurse, + _OP_goto: (*_Assembler)._asm_OP_goto, + _OP_switch: (*_Assembler)._asm_OP_switch, + _OP_check_char_0: (*_Assembler)._asm_OP_check_char_0, + _OP_dismatch_err: (*_Assembler)._asm_OP_dismatch_err, + _OP_go_skip: (*_Assembler)._asm_OP_go_skip, + _OP_add: (*_Assembler)._asm_OP_add, + _OP_check_empty: (*_Assembler)._asm_OP_check_empty, } func (self *_Assembler) instr(v *_Instr) { - if fn := _OpFuncTab[v.op()]; fn != nil { - fn(self, v) - } else { - panic(fmt.Sprintf("invalid opcode: %d", v.op())) - } + if fn := _OpFuncTab[v.op()]; fn != nil { + fn(self, v) + } else { + panic(fmt.Sprintf("invalid opcode: %d", v.op())) + } } func (self *_Assembler) instrs() { - for i, v := range self.p { - self.Mark(i) - self.instr(&v) - self.debug_instr(i, &v) - } + for i, v := range self.p { + self.Mark(i) + self.instr(&v) + self.debug_instr(i, &v) + } } func (self *_Assembler) epilogue() { - self.Mark(len(self.p)) - self.Emit("XORL", _EP, _EP) // XORL EP, EP - self.Emit("MOVQ", _VAR_et, _ET) // MOVQ VAR_et, ET - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ", _LB_mismatch_error) // JNZ _LB_mismatch_error - self.Link(_LB_error) // _error: - self.Emit("MOVQ", _IC, _RET_rc) // MOVQ IC, rc<>+40(FP) - self.Emit("MOVQ", _ET, _RET_et) // MOVQ ET, et<>+48(FP) - self.Emit("MOVQ", _EP, _RET_ep) // MOVQ EP, ep<>+56(FP) - self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP - self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP - self.Emit("RET") // RET + self.Mark(len(self.p)) + self.Emit("XORL", _EP, _EP) // XORL EP, EP + self.Emit("MOVQ", _VAR_et, _ET) // MOVQ VAR_et, ET + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_mismatch_error) // JNZ _LB_mismatch_error + self.Link(_LB_error) // _error: + self.Emit("MOVQ", _IC, _RET_rc) // MOVQ IC, rc<>+40(FP) + self.Emit("MOVQ", _ET, _RET_et) // MOVQ ET, et<>+48(FP) + self.Emit("MOVQ", _EP, _RET_ep) // MOVQ EP, ep<>+56(FP) + self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP + self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP + self.Emit("RET") // RET } func (self *_Assembler) prologue() { - self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP - self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP) - self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP - self.Emit("MOVQ", _ARG_sp, _IP) // MOVQ s.p<>+0(FP), IP - self.Emit("MOVQ", _ARG_sl, _IL) // MOVQ s.l<>+8(FP), IL - self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC - self.Emit("MOVQ", _ARG_vp, _VP) // MOVQ vp<>+24(FP), VP - self.Emit("MOVQ", _ARG_sb, _ST) // MOVQ vp<>+32(FP), ST - // initialize digital buffer first - self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_st_Dc) // MOVQ $_MaxDigitNums, ss.Dcap - self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX - self.Emit("MOVQ", _AX, _VAR_st_Db) // MOVQ AX, ss.Dbuf - self.Emit("XORL", _AX, _AX) // XORL AX, AX - self.Emit("MOVQ", _AX, _VAR_et) // MOVQ AX, ss.Dp + self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP + self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP) + self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP + self.Emit("MOVQ", _ARG_sp, _IP) // MOVQ s.p<>+0(FP), IP + self.Emit("MOVQ", _ARG_sl, _IL) // MOVQ s.l<>+8(FP), IL + self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC + self.Emit("MOVQ", _ARG_vp, _VP) // MOVQ vp<>+24(FP), VP + self.Emit("MOVQ", _ARG_sb, _ST) // MOVQ vp<>+32(FP), ST + // initialize digital buffer first + self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_st_Dc) // MOVQ $_MaxDigitNums, ss.Dcap + self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX + self.Emit("MOVQ", _AX, _VAR_st_Db) // MOVQ AX, ss.Dbuf + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, _VAR_et) // MOVQ AX, ss.Dp } /** Function Calling Helpers **/ -var _REG_go = []obj.Addr { - _ST, - _VP, - _IP, - _IL, - _IC, +var _REG_go = []obj.Addr{ + _ST, + _VP, + _IP, + _IL, + _IC, } func (self *_Assembler) save(r ...obj.Addr) { - for i, v := range r { - if i > _FP_saves / 8 - 1 { - panic("too many registers to save") - } else { - self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + int64(i) * 8)) - } - } + for i, v := range r { + if i > _FP_saves/8-1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs+int64(i)*8)) + } + } } func (self *_Assembler) load(r ...obj.Addr) { - for i, v := range r { - if i > _FP_saves / 8 - 1 { - panic("too many registers to load") - } else { - self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + int64(i) * 8), v) - } - } + for i, v := range r { + if i > _FP_saves/8-1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs+int64(i)*8), v) + } + } } func (self *_Assembler) call(fn obj.Addr) { - self.Emit("MOVQ", fn, _AX) // MOVQ ${fn}, AX - self.Rjmp("CALL", _AX) // CALL AX + self.Emit("MOVQ", fn, _AX) // MOVQ ${fn}, AX + self.Rjmp("CALL", _AX) // CALL AX } func (self *_Assembler) call_go(fn obj.Addr) { - self.save(_REG_go...) // SAVE $REG_go - self.call(fn) // CALL ${fn} - self.load(_REG_go...) // LOAD $REG_go + self.save(_REG_go...) // SAVE $REG_go + self.call(fn) // CALL ${fn} + self.load(_REG_go...) // LOAD $REG_go } func (self *_Assembler) call_sf(fn obj.Addr) { - self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI - self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) - self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI - self.Emit("LEAQ", jit.Ptr(_ST, _FsmOffset), _DX) // LEAQ _FsmOffset(ST), DX - self.Emit("MOVQ", _ARG_fv, _CX) - self.call(fn) // CALL ${fn} - self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC + self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI + self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) + self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI + self.Emit("LEAQ", jit.Ptr(_ST, _FsmOffset), _DX) // LEAQ _FsmOffset(ST), DX + self.Emit("MOVQ", _ARG_fv, _CX) + self.call(fn) // CALL ${fn} + self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC } func (self *_Assembler) call_vf(fn obj.Addr) { - self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI - self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) - self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI - self.Emit("LEAQ", _VAR_st, _DX) // LEAQ st, DX - self.call(fn) // CALL ${fn} - self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC + self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI + self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) + self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI + self.Emit("LEAQ", _VAR_st, _DX) // LEAQ st, DX + self.call(fn) // CALL ${fn} + self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC } /** Assembler Error Handlers **/ var ( - _F_convT64 = jit.Func(convT64) - _F_error_wrap = jit.Func(error_wrap) - _F_error_type = jit.Func(error_type) - _F_error_field = jit.Func(error_field) - _F_error_value = jit.Func(error_value) - _F_error_mismatch = jit.Func(error_mismatch) + _F_convT64 = jit.Func(convT64) + _F_error_wrap = jit.Func(error_wrap) + _F_error_type = jit.Func(error_type) + _F_error_field = jit.Func(error_field) + _F_error_value = jit.Func(error_value) + _F_error_mismatch = jit.Func(error_mismatch) ) var ( - _I_int8 , _T_int8 = rtype(reflect.TypeOf(int8(0))) - _I_int16 , _T_int16 = rtype(reflect.TypeOf(int16(0))) - _I_int32 , _T_int32 = rtype(reflect.TypeOf(int32(0))) - _I_uint8 , _T_uint8 = rtype(reflect.TypeOf(uint8(0))) - _I_uint16 , _T_uint16 = rtype(reflect.TypeOf(uint16(0))) - _I_uint32 , _T_uint32 = rtype(reflect.TypeOf(uint32(0))) - _I_float32 , _T_float32 = rtype(reflect.TypeOf(float32(0))) + _I_int8, _T_int8 = rtype(reflect.TypeOf(int8(0))) + _I_int16, _T_int16 = rtype(reflect.TypeOf(int16(0))) + _I_int32, _T_int32 = rtype(reflect.TypeOf(int32(0))) + _I_uint8, _T_uint8 = rtype(reflect.TypeOf(uint8(0))) + _I_uint16, _T_uint16 = rtype(reflect.TypeOf(uint16(0))) + _I_uint32, _T_uint32 = rtype(reflect.TypeOf(uint32(0))) + _I_float32, _T_float32 = rtype(reflect.TypeOf(float32(0))) ) var ( - _T_error = rt.UnpackType(errorType) - _I_base64_CorruptInputError = jit.Itab(_T_error, base64CorruptInputError) + _T_error = rt.UnpackType(errorType) + _I_base64_CorruptInputError = jit.Itab(_T_error, base64CorruptInputError) ) var ( - _V_stackOverflow = jit.Imm(int64(uintptr(unsafe.Pointer(&stackOverflow)))) - _I_json_UnsupportedValueError = jit.Itab(_T_error, reflect.TypeOf(new(json.UnsupportedValueError))) - _I_json_MismatchTypeError = jit.Itab(_T_error, reflect.TypeOf(new(MismatchTypeError))) + _V_stackOverflow = jit.Imm(int64(uintptr(unsafe.Pointer(&stackOverflow)))) + _I_json_UnsupportedValueError = jit.Itab(_T_error, reflect.TypeOf(new(json.UnsupportedValueError))) + _I_json_MismatchTypeError = jit.Itab(_T_error, reflect.TypeOf(new(MismatchTypeError))) ) func (self *_Assembler) type_error() { - self.Link(_LB_type_error) // _type_error: - self.Emit("MOVQ", _ET, jit.Ptr(_SP, 0)) // MOVQ ET, (SP) - self.call_go(_F_error_type) // CALL_GO error_type - self.Emit("MOVQ", jit.Ptr(_SP, 8), _ET) // MOVQ 8(SP), ET - self.Emit("MOVQ", jit.Ptr(_SP, 16), _EP) // MOVQ 16(SP), EP - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_type_error) // _type_error: + self.Emit("MOVQ", _ET, jit.Ptr(_SP, 0)) // MOVQ ET, (SP) + self.call_go(_F_error_type) // CALL_GO error_type + self.Emit("MOVQ", jit.Ptr(_SP, 8), _ET) // MOVQ 8(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 16), _EP) // MOVQ 16(SP), EP + self.Sjmp("JMP", _LB_error) // JMP _error } - func (self *_Assembler) mismatch_error() { - self.Link(_LB_mismatch_error) // _type_error: - self.Emit("MOVQ", _VAR_et, _ET) // MOVQ _VAR_et, ET - self.Emit("MOVQ", _VAR_ic, _EP) // MOVQ _VAR_ic, EP - self.Emit("MOVQ", _I_json_MismatchTypeError, _AX) // MOVQ _I_json_MismatchTypeError, AX - self.Emit("CMPQ", _ET, _AX) // CMPQ ET, AX - self.Sjmp("JE" , _LB_error) // JE _LB_error - self.Emit("MOVQ", _ARG_sp, _AX) - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVQ", _ARG_sl, _CX) - self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) - self.Emit("MOVQ", _VAR_ic, _AX) - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) - self.Emit("MOVQ", _VAR_et, _CX) - self.Emit("MOVQ", _CX, jit.Ptr(_SP, 24)) // MOVQ CX, 24(SP) - self.call_go(_F_error_mismatch) // CALL_GO error_type - self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET - self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_mismatch_error) // _type_error: + self.Emit("MOVQ", _VAR_et, _ET) // MOVQ _VAR_et, ET + self.Emit("MOVQ", _VAR_ic, _EP) // MOVQ _VAR_ic, EP + self.Emit("MOVQ", _I_json_MismatchTypeError, _AX) // MOVQ _I_json_MismatchTypeError, AX + self.Emit("CMPQ", _ET, _AX) // CMPQ ET, AX + self.Sjmp("JE", _LB_error) // JE _LB_error + self.Emit("MOVQ", _ARG_sp, _AX) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", _ARG_sl, _CX) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.Emit("MOVQ", _VAR_ic, _AX) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.Emit("MOVQ", _VAR_et, _CX) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 24)) // MOVQ CX, 24(SP) + self.call_go(_F_error_mismatch) // CALL_GO error_type + self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) _asm_OP_dismatch_err(p *_Instr) { - self.Emit("MOVQ", _IC, _VAR_ic) - self.Emit("MOVQ", jit.Type(p.vt()), _ET) - self.Emit("MOVQ", _ET, _VAR_et) + self.Emit("MOVQ", _IC, _VAR_ic) + self.Emit("MOVQ", jit.Type(p.vt()), _ET) + self.Emit("MOVQ", _ET, _VAR_et) } func (self *_Assembler) _asm_OP_go_skip(p *_Instr) { - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Xref(p.vi(), 4) - self.Emit("MOVQ", _R9, _VAR_pc) - self.Sjmp("JMP" , _LB_skip_one) // JMP _skip_one + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Xref(p.vi(), 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP", _LB_skip_one) // JMP _skip_one } func (self *_Assembler) skip_one() { - self.Link(_LB_skip_one) // _skip: - self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC - self.call_sf(_F_skip_one) // CALL_SF skip_one - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v - self.Emit("MOVQ" , _VAR_pc, _R9) // MOVQ pc, R9 - self.Rjmp("JMP" , _R9) // JMP (R9) + self.Link(_LB_skip_one) // _skip: + self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v + self.Emit("MOVQ", _VAR_pc, _R9) // MOVQ pc, R9 + self.Rjmp("JMP", _R9) // JMP (R9) } - func (self *_Assembler) skip_key_value() { - self.Link(_LB_skip_key_value) // _skip: - // skip the key - self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC - self.call_sf(_F_skip_one) // CALL_SF skip_one - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v - // match char ':' - self.lspace("_global_1") - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(':')) - self.Sjmp("JNE" , _LB_parsing_error_v) // JNE _parse_error_v - self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC - self.lspace("_global_2") - // skip the value - self.call_sf(_F_skip_one) // CALL_SF skip_one - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v - // jump back to specified address - self.Emit("MOVQ" , _VAR_pc, _R9) // MOVQ pc, R9 - self.Rjmp("JMP" , _R9) // JMP (R9) + self.Link(_LB_skip_key_value) // _skip: + // skip the key + self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v + // match char ':' + self.lspace("_global_1") + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(':')) + self.Sjmp("JNE", _LB_parsing_error_v) // JNE _parse_error_v + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.lspace("_global_2") + // skip the value + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v + // jump back to specified address + self.Emit("MOVQ", _VAR_pc, _R9) // MOVQ pc, R9 + self.Rjmp("JMP", _R9) // JMP (R9) } func (self *_Assembler) field_error() { - self.Link(_LB_field_error) // _field_error: - self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0 - self.Emit("MOVOU", _X0, jit.Ptr(_SP, 0)) // MOVOU X0, (SP) - self.call_go(_F_error_field) // CALL_GO error_field - self.Emit("MOVQ" , jit.Ptr(_SP, 16), _ET) // MOVQ 16(SP), ET - self.Emit("MOVQ" , jit.Ptr(_SP, 24), _EP) // MOVQ 24(SP), EP - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_field_error) // _field_error: + self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 0)) // MOVOU X0, (SP) + self.call_go(_F_error_field) // CALL_GO error_field + self.Emit("MOVQ", jit.Ptr(_SP, 16), _ET) // MOVQ 16(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 24), _EP) // MOVQ 24(SP), EP + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) range_error() { - self.Link(_LB_range_error) // _range_error: - self.slice_from(_VAR_st_Ep, 0) // SLICE st.Ep, $0 - self.Emit("MOVQ", _DI, jit.Ptr(_SP, 0)) // MOVQ DI, (SP) - self.Emit("MOVQ", _SI, jit.Ptr(_SP, 8)) // MOVQ SI, 8(SP) - self.Emit("MOVQ", _ET, jit.Ptr(_SP, 16)) // MOVQ ET, 16(SP) - self.Emit("MOVQ", _EP, jit.Ptr(_SP, 24)) // MOVQ EP, 24(SP) - self.call_go(_F_error_value) // CALL_GO error_value - self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET - self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_range_error) // _range_error: + self.slice_from(_VAR_st_Ep, 0) // SLICE st.Ep, $0 + self.Emit("MOVQ", _DI, jit.Ptr(_SP, 0)) // MOVQ DI, (SP) + self.Emit("MOVQ", _SI, jit.Ptr(_SP, 8)) // MOVQ SI, 8(SP) + self.Emit("MOVQ", _ET, jit.Ptr(_SP, 16)) // MOVQ ET, 16(SP) + self.Emit("MOVQ", _EP, jit.Ptr(_SP, 24)) // MOVQ EP, 24(SP) + self.call_go(_F_error_value) // CALL_GO error_value + self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) stack_error() { - self.Link(_LB_stack_error) // _stack_error: - self.Emit("MOVQ", _V_stackOverflow, _EP) // MOVQ ${_V_stackOverflow}, EP - self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ ${_I_json_UnsupportedValueError}, ET - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_stack_error) // _stack_error: + self.Emit("MOVQ", _V_stackOverflow, _EP) // MOVQ ${_V_stackOverflow}, EP + self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ ${_I_json_UnsupportedValueError}, ET + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) base64_error() { - self.Link(_LB_base64_error) - self.Emit("NEGQ", _AX) // NEGQ AX - self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.call_go(_F_convT64) // CALL_GO convT64 - self.Emit("MOVQ", jit.Ptr(_SP, 8), _EP) // MOVQ 8(SP), EP - self.Emit("MOVQ", _I_base64_CorruptInputError, _ET) // MOVQ ${itab(base64.CorruptInputError)}, ET - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_base64_error) + self.Emit("NEGQ", _AX) // NEGQ AX + self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.call_go(_F_convT64) // CALL_GO convT64 + self.Emit("MOVQ", jit.Ptr(_SP, 8), _EP) // MOVQ 8(SP), EP + self.Emit("MOVQ", _I_base64_CorruptInputError, _ET) // MOVQ ${itab(base64.CorruptInputError)}, ET + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) parsing_error() { - self.Link(_LB_eof_error) // _eof_error: - self.Emit("MOVQ" , _IL, _IC) // MOVQ IL, IC - self.Emit("MOVL" , jit.Imm(int64(types.ERR_EOF)), _EP) // MOVL ${types.ERR_EOF}, EP - self.Sjmp("JMP" , _LB_parsing_error) // JMP _parsing_error - self.Link(_LB_unquote_error) // _unquote_error: - self.Emit("SUBQ" , _VAR_sr, _SI) // SUBQ sr, SI - self.Emit("SUBQ" , _SI, _IC) // SUBQ IL, IC - self.Link(_LB_parsing_error_v) // _parsing_error_v: - self.Emit("MOVQ" , _AX, _EP) // MOVQ AX, EP - self.Emit("NEGQ" , _EP) // NEGQ EP - self.Sjmp("JMP" , _LB_parsing_error) // JMP _parsing_error - self.Link(_LB_char_m3_error) // _char_m3_error: - self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC - self.Link(_LB_char_m2_error) // _char_m2_error: - self.Emit("SUBQ" , jit.Imm(2), _IC) // SUBQ $2, IC - self.Sjmp("JMP" , _LB_char_0_error) // JMP _char_0_error - self.Link(_LB_im_error) // _im_error: - self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPB CX, (IP)(IC) - self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error - self.Emit("SHRL" , jit.Imm(8), _CX) // SHRL $8, CX - self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 1)) // CMPB CX, 1(IP)(IC) - self.Sjmp("JNE" , _LB_char_1_error) // JNE _char_1_error - self.Emit("SHRL" , jit.Imm(8), _CX) // SHRL $8, CX - self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 2)) // CMPB CX, 2(IP)(IC) - self.Sjmp("JNE" , _LB_char_2_error) // JNE _char_2_error - self.Sjmp("JMP" , _LB_char_3_error) // JNE _char_3_error - self.Link(_LB_char_4_error) // _char_4_error: - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - self.Link(_LB_char_3_error) // _char_3_error: - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - self.Link(_LB_char_2_error) // _char_2_error: - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - self.Link(_LB_char_1_error) // _char_1_error: - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - self.Link(_LB_char_0_error) // _char_0_error: - self.Emit("MOVL" , jit.Imm(int64(types.ERR_INVALID_CHAR)), _EP) // MOVL ${types.ERR_INVALID_CHAR}, EP - self.Link(_LB_parsing_error) // _parsing_error: - self.Emit("MOVOU", _ARG_s, _X0) // MOVOU s, X0 - self.Emit("MOVOU", _X0, jit.Ptr(_SP, 0)) // MOVOU X0, (SP) - self.Emit("MOVQ" , _IC, jit.Ptr(_SP, 16)) // MOVQ IC, 16(SP) - self.Emit("MOVQ" , _EP, jit.Ptr(_SP, 24)) // MOVQ EP, 24(SP) - self.call_go(_F_error_wrap) // CALL_GO error_wrap - self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET - self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_eof_error) // _eof_error: + self.Emit("MOVQ", _IL, _IC) // MOVQ IL, IC + self.Emit("MOVL", jit.Imm(int64(types.ERR_EOF)), _EP) // MOVL ${types.ERR_EOF}, EP + self.Sjmp("JMP", _LB_parsing_error) // JMP _parsing_error + self.Link(_LB_unquote_error) // _unquote_error: + self.Emit("SUBQ", _VAR_sr, _SI) // SUBQ sr, SI + self.Emit("SUBQ", _SI, _IC) // SUBQ IL, IC + self.Link(_LB_parsing_error_v) // _parsing_error_v: + self.Emit("MOVQ", _AX, _EP) // MOVQ AX, EP + self.Emit("NEGQ", _EP) // NEGQ EP + self.Sjmp("JMP", _LB_parsing_error) // JMP _parsing_error + self.Link(_LB_char_m3_error) // _char_m3_error: + self.Emit("SUBQ", jit.Imm(1), _IC) // SUBQ $1, IC + self.Link(_LB_char_m2_error) // _char_m2_error: + self.Emit("SUBQ", jit.Imm(2), _IC) // SUBQ $2, IC + self.Sjmp("JMP", _LB_char_0_error) // JMP _char_0_error + self.Link(_LB_im_error) // _im_error: + self.Emit("CMPB", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPB CX, (IP)(IC) + self.Sjmp("JNE", _LB_char_0_error) // JNE _char_0_error + self.Emit("SHRL", jit.Imm(8), _CX) // SHRL $8, CX + self.Emit("CMPB", _CX, jit.Sib(_IP, _IC, 1, 1)) // CMPB CX, 1(IP)(IC) + self.Sjmp("JNE", _LB_char_1_error) // JNE _char_1_error + self.Emit("SHRL", jit.Imm(8), _CX) // SHRL $8, CX + self.Emit("CMPB", _CX, jit.Sib(_IP, _IC, 1, 2)) // CMPB CX, 2(IP)(IC) + self.Sjmp("JNE", _LB_char_2_error) // JNE _char_2_error + self.Sjmp("JMP", _LB_char_3_error) // JNE _char_3_error + self.Link(_LB_char_4_error) // _char_4_error: + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_3_error) // _char_3_error: + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_2_error) // _char_2_error: + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_1_error) // _char_1_error: + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_0_error) // _char_0_error: + self.Emit("MOVL", jit.Imm(int64(types.ERR_INVALID_CHAR)), _EP) // MOVL ${types.ERR_INVALID_CHAR}, EP + self.Link(_LB_parsing_error) // _parsing_error: + self.Emit("MOVOU", _ARG_s, _X0) // MOVOU s, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 0)) // MOVOU X0, (SP) + self.Emit("MOVQ", _IC, jit.Ptr(_SP, 16)) // MOVQ IC, 16(SP) + self.Emit("MOVQ", _EP, jit.Ptr(_SP, 24)) // MOVQ EP, 24(SP) + self.call_go(_F_error_wrap) // CALL_GO error_wrap + self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Sjmp("JMP", _LB_error) // JMP _error } /** Memory Management Routines **/ var ( - _T_byte = jit.Type(byteType) - _F_mallocgc = jit.Func(mallocgc) + _T_byte = jit.Type(byteType) + _F_mallocgc = jit.Func(mallocgc) ) func (self *_Assembler) malloc(nb obj.Addr, ret obj.Addr) { - self.Emit("XORL", _AX, _AX) // XORL AX, AX - self.Emit("MOVQ", _T_byte, _CX) // MOVQ ${type(byte)}, CX - self.Emit("MOVQ", nb, jit.Ptr(_SP, 0)) // MOVQ ${nb}, (SP) - self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) - self.call_go(_F_mallocgc) // CALL_GO mallocgc - self.Emit("MOVQ", jit.Ptr(_SP, 24), ret) // MOVQ 24(SP), ${ret} + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _T_byte, _CX) // MOVQ ${type(byte)}, CX + self.Emit("MOVQ", nb, jit.Ptr(_SP, 0)) // MOVQ ${nb}, (SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.call_go(_F_mallocgc) // CALL_GO mallocgc + self.Emit("MOVQ", jit.Ptr(_SP, 24), ret) // MOVQ 24(SP), ${ret} } func (self *_Assembler) valloc(vt reflect.Type, ret obj.Addr) { - self.Emit("MOVQ", jit.Imm(int64(vt.Size())), _AX) // MOVQ ${vt.Size()}, AX - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVQ", jit.Type(vt), _AX) // MOVQ ${vt}, AX - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) - self.Emit("MOVB", jit.Imm(1), jit.Ptr(_SP, 16)) // MOVB $1, 16(SP) - self.call_go(_F_mallocgc) // CALL_GO mallocgc - self.Emit("MOVQ", jit.Ptr(_SP, 24), ret) // MOVQ 24(SP), ${ret} + self.Emit("MOVQ", jit.Imm(int64(vt.Size())), _AX) // MOVQ ${vt.Size()}, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", jit.Type(vt), _AX) // MOVQ ${vt}, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + self.Emit("MOVB", jit.Imm(1), jit.Ptr(_SP, 16)) // MOVB $1, 16(SP) + self.call_go(_F_mallocgc) // CALL_GO mallocgc + self.Emit("MOVQ", jit.Ptr(_SP, 24), ret) // MOVQ 24(SP), ${ret} } func (self *_Assembler) vfollow(vt reflect.Type) { - self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n} - self.valloc(vt, _AX) // VALLOC ${vt}, AX - self.WritePtrAX(1, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) - self.Link("_end_{n}") // _end_{n}: - self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP + self.Emit("MOVQ", jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ", "_end_{n}") // JNZ _end_{n} + self.valloc(vt, _AX) // VALLOC ${vt}, AX + self.WritePtrAX(1, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Link("_end_{n}") // _end_{n}: + self.Emit("MOVQ", _AX, _VP) // MOVQ AX, VP } /** Value Parsing Routines **/ var ( - _F_vstring = jit.Imm(int64(native.S_vstring)) - _F_vnumber = jit.Imm(int64(native.S_vnumber)) - _F_vsigned = jit.Imm(int64(native.S_vsigned)) - _F_vunsigned = jit.Imm(int64(native.S_vunsigned)) + _F_vstring = jit.Imm(int64(native.S_vstring)) + _F_vnumber = jit.Imm(int64(native.S_vnumber)) + _F_vsigned = jit.Imm(int64(native.S_vsigned)) + _F_vunsigned = jit.Imm(int64(native.S_vunsigned)) ) func (self *_Assembler) check_err(vt reflect.Type, pin string, pin2 int) { - self.Emit("MOVQ" , _VAR_st_Vt, _AX) // MOVQ st.Vt, AX - self.Emit("TESTQ", _AX, _AX) // CMPQ AX, ${native.V_STRING} - // try to skip the value - if vt != nil { - self.Sjmp("JNS" , "_check_err_{n}") // JNE _parsing_error_v - self.Emit("MOVQ", jit.Type(vt), _ET) - self.Emit("MOVQ", _ET, _VAR_et) - if pin2 != -1 { - self.Emit("SUBQ", jit.Imm(1), _BP) - self.Emit("MOVQ", _BP, _VAR_ic) - self.Byte(0x4c , 0x8d, 0x0d) // LEAQ (PC), R9 - self.Xref(pin2, 4) - self.Emit("MOVQ", _R9, _VAR_pc) - self.Sjmp("JMP" , _LB_skip_key_value) - } else { - self.Emit("MOVQ", _BP, _VAR_ic) - self.Byte(0x4c , 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref(pin, 4) - self.Emit("MOVQ", _R9, _VAR_pc) - self.Sjmp("JMP" , _LB_skip_one) - } - self.Link("_check_err_{n}") - } else { - self.Sjmp("JS" , _LB_parsing_error_v) // JNE _parsing_error_v - } + self.Emit("MOVQ", _VAR_st_Vt, _AX) // MOVQ st.Vt, AX + self.Emit("TESTQ", _AX, _AX) // CMPQ AX, ${native.V_STRING} + // try to skip the value + if vt != nil { + self.Sjmp("JNS", "_check_err_{n}") // JNE _parsing_error_v + self.Emit("MOVQ", jit.Type(vt), _ET) + self.Emit("MOVQ", _ET, _VAR_et) + if pin2 != -1 { + self.Emit("SUBQ", jit.Imm(1), _BP) + self.Emit("MOVQ", _BP, _VAR_ic) + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Xref(pin2, 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP", _LB_skip_key_value) + } else { + self.Emit("MOVQ", _BP, _VAR_ic) + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref(pin, 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP", _LB_skip_one) + } + self.Link("_check_err_{n}") + } else { + self.Sjmp("JS", _LB_parsing_error_v) // JNE _parsing_error_v + } } func (self *_Assembler) check_eof(d int64) { - if d == 1 { - self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL - self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error - } else { - self.Emit("LEAQ", jit.Ptr(_IC, d), _AX) // LEAQ ${d}(IC), AX - self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL - self.Sjmp("JA" , _LB_eof_error) // JA _eof_error - } + if d == 1 { + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE", _LB_eof_error) // JAE _eof_error + } else { + self.Emit("LEAQ", jit.Ptr(_IC, d), _AX) // LEAQ ${d}(IC), AX + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA", _LB_eof_error) // JA _eof_error + } } -func (self *_Assembler) parse_string() { // parse_string has a validate flag params in the last - self.Emit("MOVQ", _ARG_fv, _CX) - self.call_vf(_F_vstring) - self.check_err(nil, "", -1) +func (self *_Assembler) parse_string() { // parse_string has a validate flag params in the last + self.Emit("MOVQ", _ARG_fv, _CX) + self.call_vf(_F_vstring) + self.check_err(nil, "", -1) } func (self *_Assembler) parse_number(vt reflect.Type, pin string, pin2 int) { - self.Emit("MOVQ", _IC, _BP) - self.call_vf(_F_vnumber) // call vnumber - self.check_err(vt, pin, pin2) + self.Emit("MOVQ", _IC, _BP) + self.call_vf(_F_vnumber) // call vnumber + self.check_err(vt, pin, pin2) } func (self *_Assembler) parse_signed(vt reflect.Type, pin string, pin2 int) { - self.Emit("MOVQ", _IC, _BP) - self.call_vf(_F_vsigned) - self.check_err(vt, pin, pin2) + self.Emit("MOVQ", _IC, _BP) + self.call_vf(_F_vsigned) + self.check_err(vt, pin, pin2) } func (self *_Assembler) parse_unsigned(vt reflect.Type, pin string, pin2 int) { - self.Emit("MOVQ", _IC, _BP) - self.call_vf(_F_vunsigned) - self.check_err(vt, pin, pin2) + self.Emit("MOVQ", _IC, _BP) + self.call_vf(_F_vunsigned) + self.check_err(vt, pin, pin2) } -// Pointer: DI, Size: SI, Return: R9 +// Pointer: DI, Size: SI, Return: R9 func (self *_Assembler) copy_string() { - self.Link("_copy_string") - self.Emit("MOVQ", _DI, _VAR_bs_p) - self.Emit("MOVQ", _SI, _VAR_bs_n) - self.Emit("MOVQ", _R9, _VAR_bs_LR) - self.malloc(_SI, _AX) - self.Emit("MOVQ", _AX, _VAR_sv_p) - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) - self.Emit("MOVQ", _VAR_bs_p, _DI) - self.Emit("MOVQ", _DI, jit.Ptr(_SP, 8)) - self.Emit("MOVQ", _VAR_bs_n, _SI) - self.Emit("MOVQ", _SI, jit.Ptr(_SP, 16)) - self.call_go(_F_memmove) - self.Emit("MOVQ", _VAR_sv_p, _DI) - self.Emit("MOVQ", _VAR_bs_n, _SI) - self.Emit("MOVQ", _VAR_bs_LR, _R9) - self.Rjmp("JMP", _R9) + self.Link("_copy_string") + self.Emit("MOVQ", _DI, _VAR_bs_p) + self.Emit("MOVQ", _SI, _VAR_bs_n) + self.Emit("MOVQ", _R9, _VAR_bs_LR) + self.malloc(_SI, _AX) + self.Emit("MOVQ", _AX, _VAR_sv_p) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) + self.Emit("MOVQ", _VAR_bs_p, _DI) + self.Emit("MOVQ", _DI, jit.Ptr(_SP, 8)) + self.Emit("MOVQ", _VAR_bs_n, _SI) + self.Emit("MOVQ", _SI, jit.Ptr(_SP, 16)) + self.call_go(_F_memmove) + self.Emit("MOVQ", _VAR_sv_p, _DI) + self.Emit("MOVQ", _VAR_bs_n, _SI) + self.Emit("MOVQ", _VAR_bs_LR, _R9) + self.Rjmp("JMP", _R9) } // Pointer: DI, Size: SI, Return: R9 func (self *_Assembler) escape_string() { - self.Link("_escape_string") - self.Emit("MOVQ" , _DI, _VAR_bs_p) - self.Emit("MOVQ" , _SI, _VAR_bs_n) - self.Emit("MOVQ" , _R9, _VAR_bs_LR) - self.malloc(_SI, _DX) // MALLOC SI, DX - self.Emit("MOVQ" , _DX, _VAR_sv_p) - self.Emit("MOVQ" , _VAR_bs_p, _DI) - self.Emit("MOVQ" , _VAR_bs_n, _SI) - self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX - self.Emit("XORL" , _R8, _R8) // XORL R8, R8 - self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, fv - self.Emit("SETCC", _R8) // SETCC R8 - self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8 - self.call(_F_unquote) // CALL unquote - self.Emit("MOVQ" , _VAR_bs_n, _SI) // MOVQ ${n}, SI - self.Emit("ADDQ" , jit.Imm(1), _SI) // ADDQ $1, SI - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_unquote_error) // JS _unquote_error - self.Emit("MOVQ" , _AX, _SI) - self.Emit("MOVQ" , _VAR_sv_p, _DI) - self.Emit("MOVQ" , _VAR_bs_LR, _R9) - self.Rjmp("JMP", _R9) + self.Link("_escape_string") + self.Emit("MOVQ", _DI, _VAR_bs_p) + self.Emit("MOVQ", _SI, _VAR_bs_n) + self.Emit("MOVQ", _R9, _VAR_bs_LR) + self.malloc(_SI, _DX) // MALLOC SI, DX + self.Emit("MOVQ", _DX, _VAR_sv_p) + self.Emit("MOVQ", _VAR_bs_p, _DI) + self.Emit("MOVQ", _VAR_bs_n, _SI) + self.Emit("LEAQ", _VAR_sr, _CX) // LEAQ sr, CX + self.Emit("XORL", _R8, _R8) // XORL R8, R8 + self.Emit("BTQ", jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, fv + self.Emit("SETCC", _R8) // SETCC R8 + self.Emit("SHLQ", jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8 + self.call(_F_unquote) // CALL unquote + self.Emit("MOVQ", _VAR_bs_n, _SI) // MOVQ ${n}, SI + self.Emit("ADDQ", jit.Imm(1), _SI) // ADDQ $1, SI + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_unquote_error) // JS _unquote_error + self.Emit("MOVQ", _AX, _SI) + self.Emit("MOVQ", _VAR_sv_p, _DI) + self.Emit("MOVQ", _VAR_bs_LR, _R9) + self.Rjmp("JMP", _R9) } func (self *_Assembler) escape_string_twice() { - self.Link("_escape_string_twice") - self.Emit("MOVQ" , _DI, _VAR_bs_p) - self.Emit("MOVQ" , _SI, _VAR_bs_n) - self.Emit("MOVQ" , _R9, _VAR_bs_LR) - self.malloc(_SI, _DX) // MALLOC SI, DX - self.Emit("MOVQ" , _DX, _VAR_sv_p) - self.Emit("MOVQ" , _VAR_bs_p, _DI) - self.Emit("MOVQ" , _VAR_bs_n, _SI) - self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX - self.Emit("MOVL" , jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8 - self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, AX - self.Emit("XORL" , _AX, _AX) // XORL AX, AX - self.Emit("SETCC", _AX) // SETCC AX - self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _AX) // SHLQ ${types.B_UNICODE_REPLACE}, AX - self.Emit("ORQ" , _AX, _R8) // ORQ AX, R8 - self.call(_F_unquote) // CALL unquote - self.Emit("MOVQ" , _VAR_bs_n, _SI) // MOVQ ${n}, SI - self.Emit("ADDQ" , jit.Imm(3), _SI) // ADDQ $3, SI - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_unquote_error) // JS _unquote_error - self.Emit("MOVQ" , _AX, _SI) - self.Emit("MOVQ" , _VAR_sv_p, _DI) - self.Emit("MOVQ" , _VAR_bs_LR, _R9) - self.Rjmp("JMP", _R9) + self.Link("_escape_string_twice") + self.Emit("MOVQ", _DI, _VAR_bs_p) + self.Emit("MOVQ", _SI, _VAR_bs_n) + self.Emit("MOVQ", _R9, _VAR_bs_LR) + self.malloc(_SI, _DX) // MALLOC SI, DX + self.Emit("MOVQ", _DX, _VAR_sv_p) + self.Emit("MOVQ", _VAR_bs_p, _DI) + self.Emit("MOVQ", _VAR_bs_n, _SI) + self.Emit("LEAQ", _VAR_sr, _CX) // LEAQ sr, CX + self.Emit("MOVL", jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8 + self.Emit("BTQ", jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, AX + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("SETCC", _AX) // SETCC AX + self.Emit("SHLQ", jit.Imm(types.B_UNICODE_REPLACE), _AX) // SHLQ ${types.B_UNICODE_REPLACE}, AX + self.Emit("ORQ", _AX, _R8) // ORQ AX, R8 + self.call(_F_unquote) // CALL unquote + self.Emit("MOVQ", _VAR_bs_n, _SI) // MOVQ ${n}, SI + self.Emit("ADDQ", jit.Imm(3), _SI) // ADDQ $3, SI + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_unquote_error) // JS _unquote_error + self.Emit("MOVQ", _AX, _SI) + self.Emit("MOVQ", _VAR_sv_p, _DI) + self.Emit("MOVQ", _VAR_bs_LR, _R9) + self.Rjmp("JMP", _R9) } /** Range Checking Routines **/ var ( - _V_max_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_max_f32)))) - _V_min_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_min_f32)))) + _V_max_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_max_f32)))) + _V_min_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_min_f32)))) ) var ( - _Vp_max_f32 = new(float64) - _Vp_min_f32 = new(float64) + _Vp_max_f32 = new(float64) + _Vp_min_f32 = new(float64) ) func init() { - *_Vp_max_f32 = math.MaxFloat32 - *_Vp_min_f32 = -math.MaxFloat32 + *_Vp_max_f32 = math.MaxFloat32 + *_Vp_min_f32 = -math.MaxFloat32 } func (self *_Assembler) range_single() { - self.Emit("MOVSD" , _VAR_st_Dv, _X0) // MOVSD st.Dv, X0 - self.Emit("MOVQ" , _V_max_f32, _AX) // MOVQ _max_f32, AX - self.Emit("MOVQ" , jit.Gitab(_I_float32), _ET) // MOVQ ${itab(float32)}, ET - self.Emit("MOVQ" , jit.Gtype(_T_float32), _EP) // MOVQ ${type(float32)}, EP - self.Emit("UCOMISD" , jit.Ptr(_AX, 0), _X0) // UCOMISD (AX), X0 - self.Sjmp("JA" , _LB_range_error) // JA _range_error - self.Emit("MOVQ" , _V_min_f32, _AX) // MOVQ _min_f32, AX - self.Emit("MOVSD" , jit.Ptr(_AX, 0), _X1) // MOVSD (AX), X1 - self.Emit("UCOMISD" , _X0, _X1) // UCOMISD X0, X1 - self.Sjmp("JA" , _LB_range_error) // JA _range_error - self.Emit("CVTSD2SS", _X0, _X0) // CVTSD2SS X0, X0 + self.Emit("MOVSD", _VAR_st_Dv, _X0) // MOVSD st.Dv, X0 + self.Emit("MOVQ", _V_max_f32, _AX) // MOVQ _max_f32, AX + self.Emit("MOVQ", jit.Gitab(_I_float32), _ET) // MOVQ ${itab(float32)}, ET + self.Emit("MOVQ", jit.Gtype(_T_float32), _EP) // MOVQ ${type(float32)}, EP + self.Emit("UCOMISD", jit.Ptr(_AX, 0), _X0) // UCOMISD (AX), X0 + self.Sjmp("JA", _LB_range_error) // JA _range_error + self.Emit("MOVQ", _V_min_f32, _AX) // MOVQ _min_f32, AX + self.Emit("MOVSD", jit.Ptr(_AX, 0), _X1) // MOVSD (AX), X1 + self.Emit("UCOMISD", _X0, _X1) // UCOMISD X0, X1 + self.Sjmp("JA", _LB_range_error) // JA _range_error + self.Emit("CVTSD2SS", _X0, _X0) // CVTSD2SS X0, X0 } func (self *_Assembler) range_signed(i *rt.GoItab, t *rt.GoType, a int64, b int64) { - self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX - self.Emit("MOVQ", jit.Gitab(i), _ET) // MOVQ ${i}, ET - self.Emit("MOVQ", jit.Gtype(t), _EP) // MOVQ ${t}, EP - self.Emit("CMPQ", _AX, jit.Imm(a)) // CMPQ AX, ${a} - self.Sjmp("JL" , _LB_range_error) // JL _range_error - self.Emit("CMPQ", _AX, jit.Imm(b)) // CMPQ AX, ${B} - self.Sjmp("JG" , _LB_range_error) // JG _range_error + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.Emit("MOVQ", jit.Gitab(i), _ET) // MOVQ ${i}, ET + self.Emit("MOVQ", jit.Gtype(t), _EP) // MOVQ ${t}, EP + self.Emit("CMPQ", _AX, jit.Imm(a)) // CMPQ AX, ${a} + self.Sjmp("JL", _LB_range_error) // JL _range_error + self.Emit("CMPQ", _AX, jit.Imm(b)) // CMPQ AX, ${B} + self.Sjmp("JG", _LB_range_error) // JG _range_error } func (self *_Assembler) range_unsigned(i *rt.GoItab, t *rt.GoType, v uint64) { - self.Emit("MOVQ" , _VAR_st_Iv, _AX) // MOVQ st.Iv, AX - self.Emit("MOVQ" , jit.Gitab(i), _ET) // MOVQ ${i}, ET - self.Emit("MOVQ" , jit.Gtype(t), _EP) // MOVQ ${t}, EP - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_range_error) // JS _range_error - self.Emit("CMPQ" , _AX, jit.Imm(int64(v))) // CMPQ AX, ${a} - self.Sjmp("JA" , _LB_range_error) // JA _range_error + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.Emit("MOVQ", jit.Gitab(i), _ET) // MOVQ ${i}, ET + self.Emit("MOVQ", jit.Gtype(t), _EP) // MOVQ ${t}, EP + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_range_error) // JS _range_error + self.Emit("CMPQ", _AX, jit.Imm(int64(v))) // CMPQ AX, ${a} + self.Sjmp("JA", _LB_range_error) // JA _range_error } /** String Manipulating Routines **/ var ( - _F_unquote = jit.Imm(int64(native.S_unquote)) + _F_unquote = jit.Imm(int64(native.S_unquote)) ) func (self *_Assembler) slice_from(p obj.Addr, d int64) { - self.Emit("MOVQ", p, _SI) // MOVQ ${p}, SI - self.slice_from_r(_SI, d) // SLICE_R SI, ${d} + self.Emit("MOVQ", p, _SI) // MOVQ ${p}, SI + self.slice_from_r(_SI, d) // SLICE_R SI, ${d} } func (self *_Assembler) slice_from_r(p obj.Addr, d int64) { - self.Emit("LEAQ", jit.Sib(_IP, p, 1, 0), _DI) // LEAQ (IP)(${p}), DI - self.Emit("NEGQ", p) // NEGQ ${p} - self.Emit("LEAQ", jit.Sib(_IC, p, 1, d), _SI) // LEAQ d(IC)(${p}), SI + self.Emit("LEAQ", jit.Sib(_IP, p, 1, 0), _DI) // LEAQ (IP)(${p}), DI + self.Emit("NEGQ", p) // NEGQ ${p} + self.Emit("LEAQ", jit.Sib(_IC, p, 1, d), _SI) // LEAQ d(IC)(${p}), SI } func (self *_Assembler) unquote_once(p obj.Addr, n obj.Addr, stack bool, copy bool) { - self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1 - self.Emit("CMPQ" , _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1 - self.Sjmp("JE" , "_noescape_{n}") // JE _noescape_{n} - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref("_unquote_once_write_{n}", 4) - self.Sjmp("JMP" , "_escape_string") - self.Link("_noescape_{n}") // _noescape_{n}: - if copy { - self.Emit("BTQ" , jit.Imm(_F_copy_string), _ARG_fv) - self.Sjmp("JNC", "_unquote_once_write_{n}") - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref("_unquote_once_write_{n}", 4) - self.Sjmp("JMP", "_copy_string") - } - self.Link("_unquote_once_write_{n}") - self.Emit("MOVQ" , _SI, n) // MOVQ SI, ${n} - if stack { - self.Emit("MOVQ", _DI, p) - } else { - self.WriteRecNotAX(10, _DI, p, false, false) - } + self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1 + self.Emit("CMPQ", _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1 + self.Sjmp("JE", "_noescape_{n}") // JE _noescape_{n} + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_once_write_{n}", 4) + self.Sjmp("JMP", "_escape_string") + self.Link("_noescape_{n}") // _noescape_{n}: + if copy { + self.Emit("BTQ", jit.Imm(_F_copy_string), _ARG_fv) + self.Sjmp("JNC", "_unquote_once_write_{n}") + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_once_write_{n}", 4) + self.Sjmp("JMP", "_copy_string") + } + self.Link("_unquote_once_write_{n}") + self.Emit("MOVQ", _SI, n) // MOVQ SI, ${n} + if stack { + self.Emit("MOVQ", _DI, p) + } else { + self.WriteRecNotAX(10, _DI, p, false, false) + } } func (self *_Assembler) unquote_twice(p obj.Addr, n obj.Addr, stack bool) { - self.Emit("CMPQ" , _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1 - self.Sjmp("JE" , _LB_eof_error) // JE _eof_error - self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, -3), jit.Imm('\\')) // CMPB -3(IP)(IC), $'\\' - self.Sjmp("JNE" , _LB_char_m3_error) // JNE _char_m3_error - self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, -2), jit.Imm('"')) // CMPB -2(IP)(IC), $'"' - self.Sjmp("JNE" , _LB_char_m2_error) // JNE _char_m2_error - self.slice_from(_VAR_st_Iv, -3) // SLICE st.Iv, $-3 - self.Emit("MOVQ" , _SI, _AX) // MOVQ SI, AX - self.Emit("ADDQ" , _VAR_st_Iv, _AX) // ADDQ st.Iv, AX - self.Emit("CMPQ" , _VAR_st_Ep, _AX) // CMPQ st.Ep, AX - self.Sjmp("JE" , "_noescape_{n}") // JE _noescape_{n} - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref("_unquote_twice_write_{n}", 4) - self.Sjmp("JMP" , "_escape_string_twice") - self.Link("_noescape_{n}") // _noescape_{n}: - self.Emit("BTQ" , jit.Imm(_F_copy_string), _ARG_fv) - self.Sjmp("JNC", "_unquote_twice_write_{n}") - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref("_unquote_twice_write_{n}", 4) - self.Sjmp("JMP", "_copy_string") - self.Link("_unquote_twice_write_{n}") - self.Emit("MOVQ" , _SI, n) // MOVQ SI, ${n} - if stack { - self.Emit("MOVQ", _DI, p) - } else { - self.WriteRecNotAX(12, _DI, p, false, false) - } + self.Emit("CMPQ", _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1 + self.Sjmp("JE", _LB_eof_error) // JE _eof_error + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, -3), jit.Imm('\\')) // CMPB -3(IP)(IC), $'\\' + self.Sjmp("JNE", _LB_char_m3_error) // JNE _char_m3_error + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, -2), jit.Imm('"')) // CMPB -2(IP)(IC), $'"' + self.Sjmp("JNE", _LB_char_m2_error) // JNE _char_m2_error + self.slice_from(_VAR_st_Iv, -3) // SLICE st.Iv, $-3 + self.Emit("MOVQ", _SI, _AX) // MOVQ SI, AX + self.Emit("ADDQ", _VAR_st_Iv, _AX) // ADDQ st.Iv, AX + self.Emit("CMPQ", _VAR_st_Ep, _AX) // CMPQ st.Ep, AX + self.Sjmp("JE", "_noescape_{n}") // JE _noescape_{n} + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_twice_write_{n}", 4) + self.Sjmp("JMP", "_escape_string_twice") + self.Link("_noescape_{n}") // _noescape_{n}: + self.Emit("BTQ", jit.Imm(_F_copy_string), _ARG_fv) + self.Sjmp("JNC", "_unquote_twice_write_{n}") + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_twice_write_{n}", 4) + self.Sjmp("JMP", "_copy_string") + self.Link("_unquote_twice_write_{n}") + self.Emit("MOVQ", _SI, n) // MOVQ SI, ${n} + if stack { + self.Emit("MOVQ", _DI, p) + } else { + self.WriteRecNotAX(12, _DI, p, false, false) + } } /** Memory Clearing Routines **/ var ( - _F_memclrHasPointers = jit.Func(memclrHasPointers) - _F_memclrNoHeapPointers = jit.Func(memclrNoHeapPointers) + _F_memclrHasPointers = jit.Func(memclrHasPointers) + _F_memclrNoHeapPointers = jit.Func(memclrNoHeapPointers) ) func (self *_Assembler) mem_clear_fn(ptrfree bool) { - if !ptrfree { - self.call_go(_F_memclrHasPointers) - } else { - self.call_go(_F_memclrNoHeapPointers) - } + if !ptrfree { + self.call_go(_F_memclrHasPointers) + } else { + self.call_go(_F_memclrNoHeapPointers) + } } func (self *_Assembler) mem_clear_rem(size int64, ptrfree bool) { - self.Emit("MOVQ", jit.Imm(size), _CX) // MOVQ ${size}, CX - self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX - self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _AX) // MOVQ (ST)(AX), AX - self.Emit("SUBQ", _VP, _AX) // SUBQ VP, AX - self.Emit("ADDQ", _AX, _CX) // ADDQ AX, CX - self.Emit("MOVQ", _VP, jit.Ptr(_SP, 0)) // MOVQ VP, (SP) - self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) - self.mem_clear_fn(ptrfree) // CALL_GO memclr{Has,NoHeap}Pointers + self.Emit("MOVQ", jit.Imm(size), _CX) // MOVQ ${size}, CX + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _AX) // MOVQ (ST)(AX), AX + self.Emit("SUBQ", _VP, _AX) // SUBQ VP, AX + self.Emit("ADDQ", _AX, _CX) // ADDQ AX, CX + self.Emit("MOVQ", _VP, jit.Ptr(_SP, 0)) // MOVQ VP, (SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.mem_clear_fn(ptrfree) // CALL_GO memclr{Has,NoHeap}Pointers } /** Map Assigning Routines **/ var ( - _F_mapassign = jit.Func(mapassign) - _F_mapassign_fast32 = jit.Func(mapassign_fast32) - _F_mapassign_faststr = jit.Func(mapassign_faststr) - _F_mapassign_fast64ptr = jit.Func(mapassign_fast64ptr) + _F_mapassign = jit.Func(mapassign) + _F_mapassign_fast32 = jit.Func(mapassign_fast32) + _F_mapassign_faststr = jit.Func(mapassign_faststr) + _F_mapassign_fast64ptr = jit.Func(mapassign_fast64ptr) ) var ( - _F_decodeJsonUnmarshaler obj.Addr - _F_decodeTextUnmarshaler obj.Addr + _F_decodeJsonUnmarshaler obj.Addr + _F_decodeTextUnmarshaler obj.Addr ) func init() { - _F_decodeJsonUnmarshaler = jit.Func(decodeJsonUnmarshaler) - _F_decodeTextUnmarshaler = jit.Func(decodeTextUnmarshaler) + _F_decodeJsonUnmarshaler = jit.Func(decodeJsonUnmarshaler) + _F_decodeTextUnmarshaler = jit.Func(decodeTextUnmarshaler) } func (self *_Assembler) mapaccess_ptr(t reflect.Type) { - if rt.MapType(rt.UnpackType(t)).IndirectElem() { - self.vfollow(t.Elem()) - } + if rt.MapType(rt.UnpackType(t)).IndirectElem() { + self.vfollow(t.Elem()) + } } func (self *_Assembler) mapassign_std(t reflect.Type, v obj.Addr) { - self.Emit("LEAQ", v, _AX) // LEAQ ${v}, AX - self.mapassign_call(t, _F_mapassign) // MAPASSIGN ${t}, mapassign + self.Emit("LEAQ", v, _AX) // LEAQ ${v}, AX + self.mapassign_call(t, _F_mapassign) // MAPASSIGN ${t}, mapassign } func (self *_Assembler) mapassign_str_fast(t reflect.Type, p obj.Addr, n obj.Addr) { - self.Emit("MOVQ", jit.Type(t), _AX) // MOVQ ${t}, AX - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVQ", _VP, jit.Ptr(_SP, 8)) // MOVQ VP, 8(SP) - self.Emit("MOVQ", p, jit.Ptr(_SP, 16)) // MOVQ ${p}, 16(SP) - self.Emit("MOVQ", n, jit.Ptr(_SP, 24)) // MOVQ ${n}, 24(SP) - self.call_go(_F_mapassign_faststr) // CALL_GO ${fn} - self.Emit("MOVQ", jit.Ptr(_SP, 32), _VP) // MOVQ 32(SP), VP - self.mapaccess_ptr(t) + self.Emit("MOVQ", jit.Type(t), _AX) // MOVQ ${t}, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", _VP, jit.Ptr(_SP, 8)) // MOVQ VP, 8(SP) + self.Emit("MOVQ", p, jit.Ptr(_SP, 16)) // MOVQ ${p}, 16(SP) + self.Emit("MOVQ", n, jit.Ptr(_SP, 24)) // MOVQ ${n}, 24(SP) + self.call_go(_F_mapassign_faststr) // CALL_GO ${fn} + self.Emit("MOVQ", jit.Ptr(_SP, 32), _VP) // MOVQ 32(SP), VP + self.mapaccess_ptr(t) } func (self *_Assembler) mapassign_call(t reflect.Type, fn obj.Addr) { - self.Emit("MOVQ", jit.Type(t), _SI) // MOVQ ${t}, SI - self.Emit("MOVQ", _SI, jit.Ptr(_SP, 0)) // MOVQ SI, (SP) - self.Emit("MOVQ", _VP, jit.Ptr(_SP, 8)) // MOVQ VP, 8(SP) - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) - self.call_go(fn) // CALL_GO ${fn} - self.Emit("MOVQ", jit.Ptr(_SP, 24), _VP) // MOVQ 24(SP), VP + self.Emit("MOVQ", jit.Type(t), _SI) // MOVQ ${t}, SI + self.Emit("MOVQ", _SI, jit.Ptr(_SP, 0)) // MOVQ SI, (SP) + self.Emit("MOVQ", _VP, jit.Ptr(_SP, 8)) // MOVQ VP, 8(SP) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.call_go(fn) // CALL_GO ${fn} + self.Emit("MOVQ", jit.Ptr(_SP, 24), _VP) // MOVQ 24(SP), VP } func (self *_Assembler) mapassign_fastx(t reflect.Type, fn obj.Addr) { - self.mapassign_call(t, fn) - self.mapaccess_ptr(t) + self.mapassign_call(t, fn) + self.mapaccess_ptr(t) } func (self *_Assembler) mapassign_utext(t reflect.Type, addressable bool) { - pv := false - vk := t.Key() - tk := t.Key() - - /* deref pointer if needed */ - if vk.Kind() == reflect.Ptr { - pv = true - vk = vk.Elem() - } - - /* addressable value with pointer receiver */ - if addressable { - pv = false - tk = reflect.PtrTo(tk) - } - - /* allocate the key, and call the unmarshaler */ - self.valloc(vk, _DI) // VALLOC ${vk}, DI - // must spill vk pointer since next call_go may invoke GC - self.Emit("MOVQ" , _DI, _VAR_vk) - self.Emit("MOVQ" , jit.Type(tk), _AX) // MOVQ ${tk}, AX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVQ" , _DI, jit.Ptr(_SP, 8)) // MOVQ DI, 8(SP) - self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0 - self.Emit("MOVOU", _X0, jit.Ptr(_SP, 16)) // MOVOU X0, 16(SP) - self.call_go(_F_decodeTextUnmarshaler) // CALL_GO decodeTextUnmarshaler - self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET - self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error - self.Emit("MOVQ" , _VAR_vk, _AX) - - /* select the correct assignment function */ - if !pv { - self.mapassign_call(t, _F_mapassign) - } else { - self.mapassign_fastx(t, _F_mapassign_fast64ptr) - } + pv := false + vk := t.Key() + tk := t.Key() + + /* deref pointer if needed */ + if vk.Kind() == reflect.Ptr { + pv = true + vk = vk.Elem() + } + + /* addressable value with pointer receiver */ + if addressable { + pv = false + tk = reflect.PtrTo(tk) + } + + /* allocate the key, and call the unmarshaler */ + self.valloc(vk, _DI) // VALLOC ${vk}, DI + // must spill vk pointer since next call_go may invoke GC + self.Emit("MOVQ", _DI, _VAR_vk) + self.Emit("MOVQ", jit.Type(tk), _AX) // MOVQ ${tk}, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", _DI, jit.Ptr(_SP, 8)) // MOVQ DI, 8(SP) + self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 16)) // MOVOU X0, 16(SP) + self.call_go(_F_decodeTextUnmarshaler) // CALL_GO decodeTextUnmarshaler + self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error + self.Emit("MOVQ", _VAR_vk, _AX) + + /* select the correct assignment function */ + if !pv { + self.mapassign_call(t, _F_mapassign) + } else { + self.mapassign_fastx(t, _F_mapassign_fast64ptr) + } } /** External Unmarshaler Routines **/ var ( - _F_skip_one = jit.Imm(int64(native.S_skip_one)) - _F_skip_number = jit.Imm(int64(native.S_skip_number)) + _F_skip_one = jit.Imm(int64(native.S_skip_one)) + _F_skip_number = jit.Imm(int64(native.S_skip_number)) ) func (self *_Assembler) unmarshal_json(t reflect.Type, deref bool) { - self.call_sf(_F_skip_one) // CALL_SF skip_one - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v - self.slice_from_r(_AX, 0) // SLICE_R AX, $0 - self.Emit("MOVQ" , _DI, _VAR_sv_p) // MOVQ DI, sv.p - self.Emit("MOVQ" , _SI, _VAR_sv_n) // MOVQ SI, sv.n - self.unmarshal_func(t, _F_decodeJsonUnmarshaler, deref) // UNMARSHAL json, ${t}, ${deref} + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v + self.slice_from_r(_AX, 0) // SLICE_R AX, $0 + self.Emit("MOVQ", _DI, _VAR_sv_p) // MOVQ DI, sv.p + self.Emit("MOVQ", _SI, _VAR_sv_n) // MOVQ SI, sv.n + self.unmarshal_func(t, _F_decodeJsonUnmarshaler, deref) // UNMARSHAL json, ${t}, ${deref} } func (self *_Assembler) unmarshal_text(t reflect.Type, deref bool) { - self.parse_string() // PARSE STRING - self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, true) // UNQUOTE once, sv.p, sv.n - self.unmarshal_func(t, _F_decodeTextUnmarshaler, deref) // UNMARSHAL text, ${t}, ${deref} + self.parse_string() // PARSE STRING + self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + self.unmarshal_func(t, _F_decodeTextUnmarshaler, deref) // UNMARSHAL text, ${t}, ${deref} } func (self *_Assembler) unmarshal_func(t reflect.Type, fn obj.Addr, deref bool) { - pt := t - vk := t.Kind() - - /* allocate the field if needed */ - if deref && vk == reflect.Ptr { - self.Emit("MOVQ" , _VP, _AX) // MOVQ VP, AX - self.Emit("MOVQ" , jit.Ptr(_AX, 0), _AX) // MOVQ (AX), AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JNZ" , "_deref_{n}") // JNZ _deref_{n} - self.valloc(t.Elem(), _AX) // VALLOC ${t.Elem()}, AX - self.WritePtrAX(3, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) - self.Link("_deref_{n}") // _deref_{n}: - } - - /* set value type */ - self.Emit("MOVQ", jit.Type(pt), _CX) // MOVQ ${pt}, CX - self.Emit("MOVQ", _CX, jit.Ptr(_SP, 0)) // MOVQ CX, (SP) - - /* set value pointer */ - if deref && vk == reflect.Ptr { - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) - } else { - self.Emit("MOVQ", _VP, jit.Ptr(_SP, 8)) // MOVQ VP, 8(SP) - } - - /* set the source string and call the unmarshaler */ - self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0 - self.Emit("MOVOU", _X0, jit.Ptr(_SP, 16)) // MOVOU X0, 16(SP) - self.call_go(fn) // CALL_GO ${fn} - self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET - self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error + pt := t + vk := t.Kind() + + /* allocate the field if needed */ + if deref && vk == reflect.Ptr { + self.Emit("MOVQ", _VP, _AX) // MOVQ VP, AX + self.Emit("MOVQ", jit.Ptr(_AX, 0), _AX) // MOVQ (AX), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ", "_deref_{n}") // JNZ _deref_{n} + self.valloc(t.Elem(), _AX) // VALLOC ${t.Elem()}, AX + self.WritePtrAX(3, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Link("_deref_{n}") // _deref_{n}: + } + + /* set value type */ + self.Emit("MOVQ", jit.Type(pt), _CX) // MOVQ ${pt}, CX + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 0)) // MOVQ CX, (SP) + + /* set value pointer */ + if deref && vk == reflect.Ptr { + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + } else { + self.Emit("MOVQ", _VP, jit.Ptr(_SP, 8)) // MOVQ VP, 8(SP) + } + + /* set the source string and call the unmarshaler */ + self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 16)) // MOVOU X0, 16(SP) + self.call_go(fn) // CALL_GO ${fn} + self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error } /** Dynamic Decoding Routine **/ var ( - _F_decodeTypedPointer obj.Addr + _F_decodeTypedPointer obj.Addr ) func init() { - _F_decodeTypedPointer = jit.Func(decodeTypedPointer) + _F_decodeTypedPointer = jit.Func(decodeTypedPointer) } func (self *_Assembler) decode_dynamic(vt obj.Addr, vp obj.Addr) { - self.Emit("MOVQ" , _ARG_fv, _CX) // MOVQ fv, CX - self.Emit("MOVOU", _ARG_sp, _X0) // MOVOU sp, X0 - self.Emit("MOVOU", _X0, jit.Ptr(_SP, 0)) // MOVOU X0, (SP) - self.Emit("MOVQ" , _IC, jit.Ptr(_SP, 16)) // MOVQ IC, 16(SP) - self.Emit("MOVQ" , vt, jit.Ptr(_SP, 24)) // MOVQ ${vt}, 24(SP) - self.Emit("MOVQ" , vp, jit.Ptr(_SP, 32)) // MOVQ ${vp}, 32(SP) - self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 40)) // MOVQ ST, 40(SP) - self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 48)) // MOVQ CX, 48(SP) - self.call_go(_F_decodeTypedPointer) // CALL_GO decodeTypedPointer - self.Emit("MOVQ" , jit.Ptr(_SP, 64), _ET) // MOVQ 64(SP), ET - self.Emit("MOVQ" , jit.Ptr(_SP, 72), _EP) // MOVQ 72(SP), EP - self.Emit("MOVQ" , jit.Ptr(_SP, 56), _IC) // MOVQ 56(SP), IC - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JE", "_decode_dynamic_end_{n}") // JE, _decode_dynamic_end_{n} - self.Emit("MOVQ", _I_json_MismatchTypeError, _AX) // MOVQ _I_json_MismatchTypeError, AX - self.Emit("CMPQ", _ET, _AX) // CMPQ ET, AX - self.Sjmp("JNE" , _LB_error) // JNE LB_error - self.Emit("MOVQ", _EP, _VAR_ic) // MOVQ EP, VAR_ic - self.Emit("MOVQ", _ET, _VAR_et) // MOVQ ET, VAR_et - self.Link("_decode_dynamic_end_{n}") - + self.Emit("MOVQ", _ARG_fv, _CX) // MOVQ fv, CX + self.Emit("MOVOU", _ARG_sp, _X0) // MOVOU sp, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 0)) // MOVOU X0, (SP) + self.Emit("MOVQ", _IC, jit.Ptr(_SP, 16)) // MOVQ IC, 16(SP) + self.Emit("MOVQ", vt, jit.Ptr(_SP, 24)) // MOVQ ${vt}, 24(SP) + self.Emit("MOVQ", vp, jit.Ptr(_SP, 32)) // MOVQ ${vp}, 32(SP) + self.Emit("MOVQ", _ST, jit.Ptr(_SP, 40)) // MOVQ ST, 40(SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 48)) // MOVQ CX, 48(SP) + self.call_go(_F_decodeTypedPointer) // CALL_GO decodeTypedPointer + self.Emit("MOVQ", jit.Ptr(_SP, 64), _ET) // MOVQ 64(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 72), _EP) // MOVQ 72(SP), EP + self.Emit("MOVQ", jit.Ptr(_SP, 56), _IC) // MOVQ 56(SP), IC + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JE", "_decode_dynamic_end_{n}") // JE, _decode_dynamic_end_{n} + self.Emit("MOVQ", _I_json_MismatchTypeError, _AX) // MOVQ _I_json_MismatchTypeError, AX + self.Emit("CMPQ", _ET, _AX) // CMPQ ET, AX + self.Sjmp("JNE", _LB_error) // JNE LB_error + self.Emit("MOVQ", _EP, _VAR_ic) // MOVQ EP, VAR_ic + self.Emit("MOVQ", _ET, _VAR_et) // MOVQ ET, VAR_et + self.Link("_decode_dynamic_end_{n}") + } /** OpCode Assembler Functions **/ var ( - _F_memequal = jit.Func(memequal) - _F_memmove = jit.Func(memmove) - _F_growslice = jit.Func(growslice) - _F_makeslice = jit.Func(makeslice) - _F_makemap_small = jit.Func(makemap_small) - _F_mapassign_fast64 = jit.Func(mapassign_fast64) + _F_memequal = jit.Func(memequal) + _F_memmove = jit.Func(memmove) + _F_growslice = jit.Func(growslice) + _F_makeslice = jit.Func(makeslice) + _F_makemap_small = jit.Func(makemap_small) + _F_mapassign_fast64 = jit.Func(mapassign_fast64) ) var ( - _F_lspace = jit.Imm(int64(native.S_lspace)) - _F_strhash = jit.Imm(int64(caching.S_strhash)) + _F_lspace = jit.Imm(int64(native.S_lspace)) + _F_strhash = jit.Imm(int64(caching.S_strhash)) ) var ( - _F_b64decode = jit.Imm(int64(_subr__b64decode)) - _F_decodeValue = jit.Imm(int64(_subr_decode_value)) + _F_b64decode = jit.Imm(int64(_subr__b64decode)) + _F_decodeValue = jit.Imm(int64(_subr_decode_value)) ) var ( - _F_skip_array = jit.Imm(int64(native.S_skip_array)) - _F_skip_object = jit.Imm(int64(native.S_skip_object)) + _F_skip_array = jit.Imm(int64(native.S_skip_array)) + _F_skip_object = jit.Imm(int64(native.S_skip_object)) ) var ( - _F_FieldMap_GetCaseInsensitive obj.Addr - _Empty_Slice = make([]byte, 0) - _Zero_Base = int64(uintptr(((*rt.GoSlice)(unsafe.Pointer(&_Empty_Slice))).Ptr)) + _F_FieldMap_GetCaseInsensitive obj.Addr + _Empty_Slice = make([]byte, 0) + _Zero_Base = int64(uintptr(((*rt.GoSlice)(unsafe.Pointer(&_Empty_Slice))).Ptr)) ) const ( - _MODE_AVX2 = 1 << 2 + _MODE_AVX2 = 1 << 2 ) const ( - _Fe_ID = int64(unsafe.Offsetof(caching.FieldEntry{}.ID)) - _Fe_Name = int64(unsafe.Offsetof(caching.FieldEntry{}.Name)) - _Fe_Hash = int64(unsafe.Offsetof(caching.FieldEntry{}.Hash)) + _Fe_ID = int64(unsafe.Offsetof(caching.FieldEntry{}.ID)) + _Fe_Name = int64(unsafe.Offsetof(caching.FieldEntry{}.Name)) + _Fe_Hash = int64(unsafe.Offsetof(caching.FieldEntry{}.Hash)) ) const ( - _Vk_Ptr = int64(reflect.Ptr) - _Gt_KindFlags = int64(unsafe.Offsetof(rt.GoType{}.KindFlags)) + _Vk_Ptr = int64(reflect.Ptr) + _Gt_KindFlags = int64(unsafe.Offsetof(rt.GoType{}.KindFlags)) ) func init() { - _F_FieldMap_GetCaseInsensitive = jit.Func((*caching.FieldMap).GetCaseInsensitive) + _F_FieldMap_GetCaseInsensitive = jit.Func((*caching.FieldMap).GetCaseInsensitive) } func (self *_Assembler) _asm_OP_any(_ *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_VP, 8), _CX) // MOVQ 8(VP), CX - self.Emit("TESTQ" , _CX, _CX) // TESTQ CX, CX - self.Sjmp("JZ" , "_decode_{n}") // JZ _decode_{n} - self.Emit("CMPQ" , _CX, _VP) // CMPQ CX, VP - self.Sjmp("JE" , "_decode_{n}") // JE _decode_{n} - self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX - self.Emit("MOVBLZX", jit.Ptr(_AX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(AX), DX - self.Emit("ANDL" , jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX - self.Emit("CMPL" , _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr} - self.Sjmp("JNE" , "_decode_{n}") // JNE _decode_{n} - self.Emit("LEAQ" , jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI - self.decode_dynamic(_AX, _DI) // DECODE AX, DI - self.Sjmp("JMP" , "_decode_end_{n}") // JMP _decode_end_{n} - self.Link("_decode_{n}") // _decode_{n}: - self.Emit("MOVQ" , _ARG_fv, _DF) // MOVQ fv, DF - self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 0)) // MOVQ _ST, (SP) - self.call(_F_decodeValue) // CALL decodeValue - self.Emit("TESTQ" , _EP, _EP) // TESTQ EP, EP - self.Sjmp("JNZ" , _LB_parsing_error) // JNZ _parsing_error - self.Link("_decode_end_{n}") // _decode_end_{n}: + self.Emit("MOVQ", jit.Ptr(_VP, 8), _CX) // MOVQ 8(VP), CX + self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX + self.Sjmp("JZ", "_decode_{n}") // JZ _decode_{n} + self.Emit("CMPQ", _CX, _VP) // CMPQ CX, VP + self.Sjmp("JE", "_decode_{n}") // JE _decode_{n} + self.Emit("MOVQ", jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("MOVBLZX", jit.Ptr(_AX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(AX), DX + self.Emit("ANDL", jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX + self.Emit("CMPL", _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr} + self.Sjmp("JNE", "_decode_{n}") // JNE _decode_{n} + self.Emit("LEAQ", jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI + self.decode_dynamic(_AX, _DI) // DECODE AX, DI + self.Sjmp("JMP", "_decode_end_{n}") // JMP _decode_end_{n} + self.Link("_decode_{n}") // _decode_{n}: + self.Emit("MOVQ", _ARG_fv, _DF) // MOVQ fv, DF + self.Emit("MOVQ", _ST, jit.Ptr(_SP, 0)) // MOVQ _ST, (SP) + self.call(_F_decodeValue) // CALL decodeValue + self.Emit("TESTQ", _EP, _EP) // TESTQ EP, EP + self.Sjmp("JNZ", _LB_parsing_error) // JNZ _parsing_error + self.Link("_decode_end_{n}") // _decode_end_{n}: } func (self *_Assembler) _asm_OP_dyn(p *_Instr) { - self.Emit("MOVQ" , jit.Type(p.vt()), _ET) // MOVQ ${p.vt()}, ET - self.Emit("CMPQ" , jit.Ptr(_VP, 8), jit.Imm(0)) // CMPQ 8(VP), $0 - self.Sjmp("JE" , _LB_type_error) // JE _type_error - self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX - self.Emit("MOVQ" , jit.Ptr(_AX, 8), _AX) // MOVQ 8(AX), AX - self.Emit("MOVBLZX", jit.Ptr(_AX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(AX), DX - self.Emit("ANDL" , jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX - self.Emit("CMPL" , _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr} - self.Sjmp("JNE" , _LB_type_error) // JNE _type_error - self.Emit("LEAQ" , jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI - self.decode_dynamic(_AX, _DI) // DECODE AX, DI - self.Link("_decode_end_{n}") // _decode_end_{n}: + self.Emit("MOVQ", jit.Type(p.vt()), _ET) // MOVQ ${p.vt()}, ET + self.Emit("CMPQ", jit.Ptr(_VP, 8), jit.Imm(0)) // CMPQ 8(VP), $0 + self.Sjmp("JE", _LB_type_error) // JE _type_error + self.Emit("MOVQ", jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("MOVQ", jit.Ptr(_AX, 8), _AX) // MOVQ 8(AX), AX + self.Emit("MOVBLZX", jit.Ptr(_AX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(AX), DX + self.Emit("ANDL", jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX + self.Emit("CMPL", _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr} + self.Sjmp("JNE", _LB_type_error) // JNE _type_error + self.Emit("LEAQ", jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI + self.decode_dynamic(_AX, _DI) // DECODE AX, DI + self.Link("_decode_end_{n}") // _decode_end_{n}: } func (self *_Assembler) _asm_OP_str(_ *_Instr) { - self.parse_string() // PARSE STRING - self.unquote_once(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false, true) // UNQUOTE once, (VP), 8(VP) + self.parse_string() // PARSE STRING + self.unquote_once(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false, true) // UNQUOTE once, (VP), 8(VP) } func (self *_Assembler) _asm_OP_bin(_ *_Instr) { - self.parse_string() // PARSE STRING - self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1 - self.Emit("MOVQ" , _DI, jit.Ptr(_VP, 0)) // MOVQ DI, (VP) - self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP) - self.Emit("SHRQ" , jit.Imm(2), _SI) // SHRQ $2, SI - self.Emit("LEAQ" , jit.Sib(_SI, _SI, 2, 0), _SI) // LEAQ (SI)(SI*2), SI - self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP) - self.malloc(_SI, _SI) // MALLOC SI, SI + self.parse_string() // PARSE STRING + self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1 + self.Emit("MOVQ", _DI, jit.Ptr(_VP, 0)) // MOVQ DI, (VP) + self.Emit("MOVQ", _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP) + self.Emit("SHRQ", jit.Imm(2), _SI) // SHRQ $2, SI + self.Emit("LEAQ", jit.Sib(_SI, _SI, 2, 0), _SI) // LEAQ (SI)(SI*2), SI + self.Emit("MOVQ", _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP) + self.malloc(_SI, _SI) // MALLOC SI, SI - // TODO: due to base64x's bug, only use AVX mode now - self.Emit("MOVL", jit.Imm(_MODE_JSON), _CX) // MOVL $_MODE_JSON, CX + // TODO: due to base64x's bug, only use AVX mode now + self.Emit("MOVL", jit.Imm(_MODE_JSON), _CX) // MOVL $_MODE_JSON, CX - /* call the decoder */ - self.Emit("XORL" , _DX, _DX) // XORL DX, DX - self.Emit("MOVQ" , _VP, _DI) // MOVQ VP, DI + /* call the decoder */ + self.Emit("XORL", _DX, _DX) // XORL DX, DX + self.Emit("MOVQ", _VP, _DI) // MOVQ VP, DI - self.Emit("MOVQ" , jit.Ptr(_VP, 0), _R9) // MOVQ SI, (VP) - self.WriteRecNotAX(4, _SI, jit.Ptr(_VP, 0), true, false) // XCHGQ SI, (VP) - self.Emit("MOVQ" , _R9, _SI) + self.Emit("MOVQ", jit.Ptr(_VP, 0), _R9) // MOVQ SI, (VP) + self.WriteRecNotAX(4, _SI, jit.Ptr(_VP, 0), true, false) // XCHGQ SI, (VP) + self.Emit("MOVQ", _R9, _SI) - self.Emit("XCHGQ", _DX, jit.Ptr(_VP, 8)) // XCHGQ DX, 8(VP) - self.call(_F_b64decode) // CALL b64decode - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_base64_error) // JS _base64_error - self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) + self.Emit("XCHGQ", _DX, jit.Ptr(_VP, 8)) // XCHGQ DX, 8(VP) + self.call(_F_b64decode) // CALL b64decode + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_base64_error) // JS _base64_error + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) } func (self *_Assembler) _asm_OP_bool(_ *_Instr) { - self.Emit("LEAQ", jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX - self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL - self.Sjmp("JA" , _LB_eof_error) // JA _eof_error - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('f')) // CMPB (IP)(IC), $'f' - self.Sjmp("JE" , "_false_{n}") // JE _false_{n} - self.Emit("MOVL", jit.Imm(_IM_true), _CX) // MOVL $"true", CX - self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC) - self.Sjmp("JE" , "_bool_true_{n}") - - // try to skip the value - self.Emit("MOVQ", _IC, _VAR_ic) - self.Emit("MOVQ", _T_bool, _ET) - self.Emit("MOVQ", _ET, _VAR_et) - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref("_end_{n}", 4) - self.Emit("MOVQ", _R9, _VAR_pc) - self.Sjmp("JMP" , _LB_skip_one) - - self.Link("_bool_true_{n}") - self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC - self.Emit("MOVB", jit.Imm(1), jit.Ptr(_VP, 0)) // MOVB $1, (VP) - self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n} - self.Link("_false_{n}") // _false_{n}: - self.Emit("ADDQ", jit.Imm(1), _AX) // ADDQ $1, AX - self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC - self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL - self.Sjmp("JA" , _LB_eof_error) // JA _eof_error - self.Emit("MOVL", jit.Imm(_IM_alse), _CX) // MOVL $"alse", CX - self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC) - self.Sjmp("JNE" , _LB_im_error) // JNE _im_error - self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC - self.Emit("XORL", _AX, _AX) // XORL AX, AX - self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP) - self.Link("_end_{n}") // _end_{n}: + self.Emit("LEAQ", jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA", _LB_eof_error) // JA _eof_error + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('f')) // CMPB (IP)(IC), $'f' + self.Sjmp("JE", "_false_{n}") // JE _false_{n} + self.Emit("MOVL", jit.Imm(_IM_true), _CX) // MOVL $"true", CX + self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC) + self.Sjmp("JE", "_bool_true_{n}") + + // try to skip the value + self.Emit("MOVQ", _IC, _VAR_ic) + self.Emit("MOVQ", _T_bool, _ET) + self.Emit("MOVQ", _ET, _VAR_et) + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_end_{n}", 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP", _LB_skip_one) + + self.Link("_bool_true_{n}") + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + self.Emit("MOVB", jit.Imm(1), jit.Ptr(_VP, 0)) // MOVB $1, (VP) + self.Sjmp("JMP", "_end_{n}") // JMP _end_{n} + self.Link("_false_{n}") // _false_{n}: + self.Emit("ADDQ", jit.Imm(1), _AX) // ADDQ $1, AX + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA", _LB_eof_error) // JA _eof_error + self.Emit("MOVL", jit.Imm(_IM_alse), _CX) // MOVL $"alse", CX + self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC) + self.Sjmp("JNE", _LB_im_error) // JNE _im_error + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP) + self.Link("_end_{n}") // _end_{n}: } func (self *_Assembler) _asm_OP_num(_ *_Instr) { - self.Emit("MOVQ", jit.Imm(0), _VAR_fl) - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"')) - self.Emit("MOVQ", _IC, _BP) - self.Sjmp("JNE", "_skip_number_{n}") - self.Emit("MOVQ", jit.Imm(1), _VAR_fl) - self.Emit("ADDQ", jit.Imm(1), _IC) - self.Link("_skip_number_{n}") - - /* call skip_number */ - self.call_sf(_F_skip_number) // CALL_SF skip_one - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JNS" , "_num_next_{n}") - - /* call skip one */ - self.Emit("MOVQ", _BP, _VAR_ic) - self.Emit("MOVQ", _T_number, _ET) - self.Emit("MOVQ", _ET, _VAR_et) - self.Byte(0x4c, 0x8d, 0x0d) - self.Sref("_num_end_{n}", 4) - self.Emit("MOVQ", _R9, _VAR_pc) - self.Sjmp("JMP" , _LB_skip_one) - - /* assgin string */ - self.Link("_num_next_{n}") - self.slice_from_r(_AX, 0) - self.Emit("BTQ", jit.Imm(_F_copy_string), _ARG_fv) - self.Sjmp("JNC", "_num_write_{n}") - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref("_num_write_{n}", 4) - self.Sjmp("JMP", "_copy_string") - self.Link("_num_write_{n}") - self.Emit("MOVQ", _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP) - self.WriteRecNotAX(13, _DI, jit.Ptr(_VP, 0), false, false) - - /* check if quoted */ - self.Emit("CMPQ", _VAR_fl, jit.Imm(1)) - self.Sjmp("JNE", "_num_end_{n}") - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"')) - self.Sjmp("JNE", _LB_char_0_error) - self.Emit("ADDQ", jit.Imm(1), _IC) - self.Link("_num_end_{n}") + self.Emit("MOVQ", jit.Imm(0), _VAR_fl) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"')) + self.Emit("MOVQ", _IC, _BP) + self.Sjmp("JNE", "_skip_number_{n}") + self.Emit("MOVQ", jit.Imm(1), _VAR_fl) + self.Emit("ADDQ", jit.Imm(1), _IC) + self.Link("_skip_number_{n}") + + /* call skip_number */ + self.call_sf(_F_skip_number) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNS", "_num_next_{n}") + + /* call skip one */ + self.Emit("MOVQ", _BP, _VAR_ic) + self.Emit("MOVQ", _T_number, _ET) + self.Emit("MOVQ", _ET, _VAR_et) + self.Byte(0x4c, 0x8d, 0x0d) + self.Sref("_num_end_{n}", 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP", _LB_skip_one) + + /* assgin string */ + self.Link("_num_next_{n}") + self.slice_from_r(_AX, 0) + self.Emit("BTQ", jit.Imm(_F_copy_string), _ARG_fv) + self.Sjmp("JNC", "_num_write_{n}") + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_num_write_{n}", 4) + self.Sjmp("JMP", "_copy_string") + self.Link("_num_write_{n}") + self.Emit("MOVQ", _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP) + self.WriteRecNotAX(13, _DI, jit.Ptr(_VP, 0), false, false) + + /* check if quoted */ + self.Emit("CMPQ", _VAR_fl, jit.Imm(1)) + self.Sjmp("JNE", "_num_end_{n}") + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"')) + self.Sjmp("JNE", _LB_char_0_error) + self.Emit("ADDQ", jit.Imm(1), _IC) + self.Link("_num_end_{n}") } func (self *_Assembler) _asm_OP_i8(ins *_Instr) { - var pin = "_i8_end_{n}" - self.parse_signed(int8Type, pin, -1) // PARSE int8 - self.range_signed(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8 - self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP) - self.Link(pin) + var pin = "_i8_end_{n}" + self.parse_signed(int8Type, pin, -1) // PARSE int8 + self.range_signed(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8 + self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_i16(ins *_Instr) { - var pin = "_i16_end_{n}" - self.parse_signed(int16Type, pin, -1) // PARSE int16 - self.range_signed(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16 - self.Emit("MOVW", _AX, jit.Ptr(_VP, 0)) // MOVW AX, (VP) - self.Link(pin) + var pin = "_i16_end_{n}" + self.parse_signed(int16Type, pin, -1) // PARSE int16 + self.range_signed(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16 + self.Emit("MOVW", _AX, jit.Ptr(_VP, 0)) // MOVW AX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_i32(ins *_Instr) { - var pin = "_i32_end_{n}" - self.parse_signed(int32Type, pin, -1) // PARSE int32 - self.range_signed(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32 - self.Emit("MOVL", _AX, jit.Ptr(_VP, 0)) // MOVL AX, (VP) - self.Link(pin) + var pin = "_i32_end_{n}" + self.parse_signed(int32Type, pin, -1) // PARSE int32 + self.range_signed(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32 + self.Emit("MOVL", _AX, jit.Ptr(_VP, 0)) // MOVL AX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_i64(ins *_Instr) { - var pin = "_i64_end_{n}" - self.parse_signed(int64Type, pin, -1) // PARSE int64 - self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX - self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) - self.Link(pin) + var pin = "_i64_end_{n}" + self.parse_signed(int64Type, pin, -1) // PARSE int64 + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_u8(ins *_Instr) { - var pin = "_u8_end_{n}" - self.parse_unsigned(uint8Type, pin, -1) // PARSE uint8 - self.range_unsigned(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8 - self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP) - self.Link(pin) + var pin = "_u8_end_{n}" + self.parse_unsigned(uint8Type, pin, -1) // PARSE uint8 + self.range_unsigned(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8 + self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_u16(ins *_Instr) { - var pin = "_u16_end_{n}" - self.parse_unsigned(uint16Type, pin, -1) // PARSE uint16 - self.range_unsigned(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16 - self.Emit("MOVW", _AX, jit.Ptr(_VP, 0)) // MOVW AX, (VP) - self.Link(pin) + var pin = "_u16_end_{n}" + self.parse_unsigned(uint16Type, pin, -1) // PARSE uint16 + self.range_unsigned(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16 + self.Emit("MOVW", _AX, jit.Ptr(_VP, 0)) // MOVW AX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_u32(ins *_Instr) { - var pin = "_u32_end_{n}" - self.parse_unsigned(uint32Type, pin, -1) // PARSE uint32 - self.range_unsigned(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32 - self.Emit("MOVL", _AX, jit.Ptr(_VP, 0)) // MOVL AX, (VP) - self.Link(pin) + var pin = "_u32_end_{n}" + self.parse_unsigned(uint32Type, pin, -1) // PARSE uint32 + self.range_unsigned(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32 + self.Emit("MOVL", _AX, jit.Ptr(_VP, 0)) // MOVL AX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_u64(ins *_Instr) { - var pin = "_u64_end_{n}" - self.parse_unsigned(uint64Type, pin, -1) // PARSE uint64 - self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX - self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) - self.Link(pin) + var pin = "_u64_end_{n}" + self.parse_unsigned(uint64Type, pin, -1) // PARSE uint64 + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_f32(ins *_Instr) { - var pin = "_f32_end_{n}" - self.parse_number(float32Type, pin, -1) // PARSE NUMBER - self.range_single() // RANGE float32 - self.Emit("MOVSS", _X0, jit.Ptr(_VP, 0)) // MOVSS X0, (VP) - self.Link(pin) + var pin = "_f32_end_{n}" + self.parse_number(float32Type, pin, -1) // PARSE NUMBER + self.range_single() // RANGE float32 + self.Emit("MOVSS", _X0, jit.Ptr(_VP, 0)) // MOVSS X0, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_f64(ins *_Instr) { - var pin = "_f64_end_{n}" - self.parse_number(float64Type, pin, -1) // PARSE NUMBER - self.Emit("MOVSD", _VAR_st_Dv, _X0) // MOVSD st.Dv, X0 - self.Emit("MOVSD", _X0, jit.Ptr(_VP, 0)) // MOVSD X0, (VP) - self.Link(pin) + var pin = "_f64_end_{n}" + self.parse_number(float64Type, pin, -1) // PARSE NUMBER + self.Emit("MOVSD", _VAR_st_Dv, _X0) // MOVSD st.Dv, X0 + self.Emit("MOVSD", _X0, jit.Ptr(_VP, 0)) // MOVSD X0, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_unquote(ins *_Instr) { - self.check_eof(2) - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('\\')) // CMPB (IP)(IC), $'\\' - self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 1), jit.Imm('"')) // CMPB 1(IP)(IC), $'"' - self.Sjmp("JNE" , _LB_char_1_error) // JNE _char_1_error - self.Emit("ADDQ", jit.Imm(2), _IC) // ADDQ $2, IC - self.parse_string() // PARSE STRING - self.unquote_twice(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false) // UNQUOTE twice, (VP), 8(VP) + self.check_eof(2) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('\\')) // CMPB (IP)(IC), $'\\' + self.Sjmp("JNE", _LB_char_0_error) // JNE _char_0_error + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 1), jit.Imm('"')) // CMPB 1(IP)(IC), $'"' + self.Sjmp("JNE", _LB_char_1_error) // JNE _char_1_error + self.Emit("ADDQ", jit.Imm(2), _IC) // ADDQ $2, IC + self.parse_string() // PARSE STRING + self.unquote_twice(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false) // UNQUOTE twice, (VP), 8(VP) } func (self *_Assembler) _asm_OP_nil_1(_ *_Instr) { - self.Emit("XORL", _AX, _AX) // XORL AX, AX - self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) } func (self *_Assembler) _asm_OP_nil_2(_ *_Instr) { - self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 - self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) + self.Emit("PXOR", _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) } func (self *_Assembler) _asm_OP_nil_3(_ *_Instr) { - self.Emit("XORL" , _AX, _AX) // XORL AX, AX - self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 - self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) - self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 16)) // MOVOU X0, 16(VP) + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("PXOR", _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 16)) // MOVOU X0, 16(VP) } func (self *_Assembler) _asm_OP_deref(p *_Instr) { - self.vfollow(p.vt()) + self.vfollow(p.vt()) } func (self *_Assembler) _asm_OP_index(p *_Instr) { - self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ ${p.vi()}, AX - self.Emit("ADDQ", _AX, _VP) // ADDQ _AX, _VP + self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ ${p.vi()}, AX + self.Emit("ADDQ", _AX, _VP) // ADDQ _AX, _VP } func (self *_Assembler) _asm_OP_is_null(p *_Instr) { - self.Emit("LEAQ" , jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX - self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL - self.Sjmp("JA" , "_not_null_{n}") // JA _not_null_{n} - self.Emit("CMPL" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null" - self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC - self.Xjmp("JE" , p.vi()) // JE {p.vi()} - self.Link("_not_null_{n}") // _not_null_{n}: + self.Emit("LEAQ", jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA", "_not_null_{n}") // JA _not_null_{n} + self.Emit("CMPL", jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null" + self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC + self.Xjmp("JE", p.vi()) // JE {p.vi()} + self.Link("_not_null_{n}") // _not_null_{n}: } func (self *_Assembler) _asm_OP_is_null_quote(p *_Instr) { - self.Emit("LEAQ" , jit.Ptr(_IC, 5), _AX) // LEAQ 4(IC), AX - self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL - self.Sjmp("JA" , "_not_null_quote_{n}") // JA _not_null_quote_{n} - self.Emit("CMPL" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null" - self.Sjmp("JNE" , "_not_null_quote_{n}") // JNE _not_null_quote_{n} - self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, 4), jit.Imm('"')) // CMPB 4(IP)(IC), $'"' - self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC - self.Xjmp("JE" , p.vi()) // JE {p.vi()} - self.Link("_not_null_quote_{n}") // _not_null_quote_{n}: + self.Emit("LEAQ", jit.Ptr(_IC, 5), _AX) // LEAQ 4(IC), AX + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA", "_not_null_quote_{n}") // JA _not_null_quote_{n} + self.Emit("CMPL", jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null" + self.Sjmp("JNE", "_not_null_quote_{n}") // JNE _not_null_quote_{n} + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 4), jit.Imm('"')) // CMPB 4(IP)(IC), $'"' + self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC + self.Xjmp("JE", p.vi()) // JE {p.vi()} + self.Link("_not_null_quote_{n}") // _not_null_quote_{n}: } func (self *_Assembler) _asm_OP_map_init(_ *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n} - self.call_go(_F_makemap_small) // CALL_GO makemap_small - self.Emit("MOVQ" , jit.Ptr(_SP, 0), _AX) // MOVQ (SP), AX - self.WritePtrAX(6, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) - self.Link("_end_{n}") // _end_{n}: - self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP + self.Emit("MOVQ", jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ", "_end_{n}") // JNZ _end_{n} + self.call_go(_F_makemap_small) // CALL_GO makemap_small + self.Emit("MOVQ", jit.Ptr(_SP, 0), _AX) // MOVQ (SP), AX + self.WritePtrAX(6, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Link("_end_{n}") // _end_{n}: + self.Emit("MOVQ", _AX, _VP) // MOVQ AX, VP } func (self *_Assembler) _asm_OP_map_key_i8(p *_Instr) { - self.parse_signed(int8Type, "", p.vi()) // PARSE int8 - self.range_signed(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8 - self.match_char('"') - self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int8, mapassign, st.Iv + self.parse_signed(int8Type, "", p.vi()) // PARSE int8 + self.range_signed(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8 + self.match_char('"') + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int8, mapassign, st.Iv } func (self *_Assembler) _asm_OP_map_key_i16(p *_Instr) { - self.parse_signed(int16Type, "", p.vi()) // PARSE int16 - self.range_signed(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16 - self.match_char('"') - self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int16, mapassign, st.Iv + self.parse_signed(int16Type, "", p.vi()) // PARSE int16 + self.range_signed(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16 + self.match_char('"') + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int16, mapassign, st.Iv } func (self *_Assembler) _asm_OP_map_key_i32(p *_Instr) { - self.parse_signed(int32Type, "", p.vi()) // PARSE int32 - self.range_signed(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32 - self.match_char('"') - if vt := p.vt(); !mapfast(vt) { - self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int32, mapassign, st.Iv - } else { - self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN int32, mapassign_fast32 - } + self.parse_signed(int32Type, "", p.vi()) // PARSE int32 + self.range_signed(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32 + self.match_char('"') + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int32, mapassign, st.Iv + } else { + self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN int32, mapassign_fast32 + } } func (self *_Assembler) _asm_OP_map_key_i64(p *_Instr) { - self.parse_signed(int64Type, "", p.vi()) // PARSE int64 - self.match_char('"') - if vt := p.vt(); !mapfast(vt) { - self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int64, mapassign, st.Iv - } else { - self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX - self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN int64, mapassign_fast64 - } + self.parse_signed(int64Type, "", p.vi()) // PARSE int64 + self.match_char('"') + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int64, mapassign, st.Iv + } else { + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN int64, mapassign_fast64 + } } func (self *_Assembler) _asm_OP_map_key_u8(p *_Instr) { - self.parse_unsigned(uint8Type, "", p.vi()) // PARSE uint8 - self.range_unsigned(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8 - self.match_char('"') - self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint8, vt.Iv + self.parse_unsigned(uint8Type, "", p.vi()) // PARSE uint8 + self.range_unsigned(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8 + self.match_char('"') + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint8, vt.Iv } func (self *_Assembler) _asm_OP_map_key_u16(p *_Instr) { - self.parse_unsigned(uint16Type, "", p.vi()) // PARSE uint16 - self.range_unsigned(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16 - self.match_char('"') - self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint16, vt.Iv + self.parse_unsigned(uint16Type, "", p.vi()) // PARSE uint16 + self.range_unsigned(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16 + self.match_char('"') + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint16, vt.Iv } func (self *_Assembler) _asm_OP_map_key_u32(p *_Instr) { - self.parse_unsigned(uint32Type, "", p.vi()) // PARSE uint32 - self.range_unsigned(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32 - self.match_char('"') - if vt := p.vt(); !mapfast(vt) { - self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint32, vt.Iv - } else { - self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN uint32, mapassign_fast32 - } + self.parse_unsigned(uint32Type, "", p.vi()) // PARSE uint32 + self.range_unsigned(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32 + self.match_char('"') + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint32, vt.Iv + } else { + self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN uint32, mapassign_fast32 + } } func (self *_Assembler) _asm_OP_map_key_u64(p *_Instr) { - self.parse_unsigned(uint64Type, "", p.vi()) // PARSE uint64 - self.match_char('"') - if vt := p.vt(); !mapfast(vt) { - self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint64, vt.Iv - } else { - self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX - self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN uint64, mapassign_fast64 - } + self.parse_unsigned(uint64Type, "", p.vi()) // PARSE uint64 + self.match_char('"') + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint64, vt.Iv + } else { + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN uint64, mapassign_fast64 + } } func (self *_Assembler) _asm_OP_map_key_f32(p *_Instr) { - self.parse_number(float32Type, "", p.vi()) // PARSE NUMBER - self.range_single() // RANGE float32 - self.Emit("MOVSS", _X0, _VAR_st_Dv) // MOVSS X0, st.Dv - self.match_char('"') - self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv + self.parse_number(float32Type, "", p.vi()) // PARSE NUMBER + self.range_single() // RANGE float32 + self.Emit("MOVSS", _X0, _VAR_st_Dv) // MOVSS X0, st.Dv + self.match_char('"') + self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv } func (self *_Assembler) _asm_OP_map_key_f64(p *_Instr) { - self.parse_number(float64Type, "", p.vi()) // PARSE NUMBER - self.match_char('"') - self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv + self.parse_number(float64Type, "", p.vi()) // PARSE NUMBER + self.match_char('"') + self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv } func (self *_Assembler) _asm_OP_map_key_str(p *_Instr) { - self.parse_string() // PARSE STRING - self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, true) // UNQUOTE once, sv.p, sv.n - if vt := p.vt(); !mapfast(vt) { - self.valloc(vt.Key(), _DI) - self.Emit("MOVOU", _VAR_sv, _X0) - self.Emit("MOVOU", _X0, jit.Ptr(_DI, 0)) - self.mapassign_std(vt, jit.Ptr(_DI, 0)) - } else { - self.Emit("MOVQ", _VAR_sv_p, _DI) // MOVQ sv.p, DI - self.Emit("MOVQ", _VAR_sv_n, _SI) // MOVQ sv.n, SI - self.mapassign_str_fast(vt, _DI, _SI) // MAPASSIGN string, DI, SI - } + self.parse_string() // PARSE STRING + self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + if vt := p.vt(); !mapfast(vt) { + self.valloc(vt.Key(), _DI) + self.Emit("MOVOU", _VAR_sv, _X0) + self.Emit("MOVOU", _X0, jit.Ptr(_DI, 0)) + self.mapassign_std(vt, jit.Ptr(_DI, 0)) + } else { + self.Emit("MOVQ", _VAR_sv_p, _DI) // MOVQ sv.p, DI + self.Emit("MOVQ", _VAR_sv_n, _SI) // MOVQ sv.n, SI + self.mapassign_str_fast(vt, _DI, _SI) // MAPASSIGN string, DI, SI + } } func (self *_Assembler) _asm_OP_map_key_utext(p *_Instr) { - self.parse_string() // PARSE STRING - self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, true) // UNQUOTE once, sv.p, sv.n - self.mapassign_utext(p.vt(), false) // MAPASSIGN utext, ${p.vt()}, false + self.parse_string() // PARSE STRING + self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + self.mapassign_utext(p.vt(), false) // MAPASSIGN utext, ${p.vt()}, false } func (self *_Assembler) _asm_OP_map_key_utext_p(p *_Instr) { - self.parse_string() // PARSE STRING - self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, false) // UNQUOTE once, sv.p, sv.n - self.mapassign_utext(p.vt(), true) // MAPASSIGN utext, ${p.vt()}, true + self.parse_string() // PARSE STRING + self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, false) // UNQUOTE once, sv.p, sv.n + self.mapassign_utext(p.vt(), true) // MAPASSIGN utext, ${p.vt()}, true } func (self *_Assembler) _asm_OP_array_skip(_ *_Instr) { - self.call_sf(_F_skip_array) // CALL_SF skip_array - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + self.call_sf(_F_skip_array) // CALL_SF skip_array + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v } func (self *_Assembler) _asm_OP_array_clear(p *_Instr) { - self.mem_clear_rem(p.i64(), true) + self.mem_clear_rem(p.i64(), true) } func (self *_Assembler) _asm_OP_array_clear_p(p *_Instr) { - self.mem_clear_rem(p.i64(), false) + self.mem_clear_rem(p.i64(), false) } func (self *_Assembler) _asm_OP_slice_init(p *_Instr) { - self.Emit("XORL" , _AX, _AX) // XORL AX, AX - self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) - self.Emit("MOVQ" , jit.Ptr(_VP, 16), _AX) // MOVQ 16(VP), AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JNZ" , "_done_{n}") // JNZ _done_{n} - self.Emit("MOVQ" , jit.Imm(_MinSlice), _CX) // MOVQ ${_MinSlice}, CX - self.Emit("MOVQ" , _CX, jit.Ptr(_VP, 16)) // MOVQ CX, 16(VP) - self.Emit("MOVQ" , jit.Type(p.vt()), _DX) // MOVQ ${p.vt()}, DX - self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP) - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) - self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 16)) // MOVQ CX, 16(SP) - self.call_go(_F_makeslice) // CALL_GO makeslice - self.Emit("MOVQ" , jit.Ptr(_SP, 24), _AX) // MOVQ 24(SP), AX - self.WritePtrAX(7, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) - self.Link("_done_{n}") // _done_{n}: - self.Emit("XORL" , _AX, _AX) // XORL AX, AX - self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) + self.Emit("MOVQ", jit.Ptr(_VP, 16), _AX) // MOVQ 16(VP), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ", "_done_{n}") // JNZ _done_{n} + self.Emit("MOVQ", jit.Imm(_MinSlice), _CX) // MOVQ ${_MinSlice}, CX + self.Emit("MOVQ", _CX, jit.Ptr(_VP, 16)) // MOVQ CX, 16(VP) + self.Emit("MOVQ", jit.Type(p.vt()), _DX) // MOVQ ${p.vt()}, DX + self.Emit("MOVQ", _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 16)) // MOVQ CX, 16(SP) + self.call_go(_F_makeslice) // CALL_GO makeslice + self.Emit("MOVQ", jit.Ptr(_SP, 24), _AX) // MOVQ 24(SP), AX + self.WritePtrAX(7, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Link("_done_{n}") // _done_{n}: + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) } func (self *_Assembler) _asm_OP_check_empty(p *_Instr) { - rbracket := p.vb() - if rbracket == ']' { - self.check_eof(1) - self.Emit("LEAQ", jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(rbracket))) // CMPB (IP)(IC), ']' - self.Sjmp("JNE" , "_not_empty_array_{n}") // JNE _not_empty_array_{n} - self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC - self.StorePtr(_Zero_Base, jit.Ptr(_VP, 0), _AX) // MOVQ $zerobase, (VP) - self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 - self.Emit("MOVOU", _X0, jit.Ptr(_VP, 8)) // MOVOU X0, 8(VP) - self.Xjmp("JMP" , p.vi()) // JMP {p.vi()} - self.Link("_not_empty_array_{n}") - } else { - panic("only implement check empty array here!") - } + rbracket := p.vb() + if rbracket == ']' { + self.check_eof(1) + self.Emit("LEAQ", jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(rbracket))) // CMPB (IP)(IC), ']' + self.Sjmp("JNE", "_not_empty_array_{n}") // JNE _not_empty_array_{n} + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + self.StorePtr(_Zero_Base, jit.Ptr(_VP, 0), _AX) // MOVQ $zerobase, (VP) + self.Emit("PXOR", _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 8)) // MOVOU X0, 8(VP) + self.Xjmp("JMP", p.vi()) // JMP {p.vi()} + self.Link("_not_empty_array_{n}") + } else { + panic("only implement check empty array here!") + } } func (self *_Assembler) _asm_OP_slice_append(p *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_VP, 8), _AX) // MOVQ 8(VP), AX - self.Emit("CMPQ" , _AX, jit.Ptr(_VP, 16)) // CMPQ AX, 16(VP) - self.Sjmp("JB" , "_index_{n}") // JB _index_{n} - self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVOU", jit.Ptr(_VP, 0), _X0) // MOVOU (VP), X0 - self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) - self.Emit("MOVQ" , jit.Ptr(_VP, 16), _AX) // MOVQ 16(VP), AX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 24)) // MOVQ AX, 24(SP) - self.Emit("SHLQ" , jit.Imm(1), _AX) // SHLQ $1, AX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) - self.call_go(_F_growslice) // CALL_GO growslice - self.Emit("MOVQ" , jit.Ptr(_SP, 40), _DI) // MOVQ 40(SP), DI - self.Emit("MOVQ" , jit.Ptr(_SP, 48), _AX) // MOVQ 48(SP), AX - self.Emit("MOVQ" , jit.Ptr(_SP, 56), _SI) // MOVQ 56(SP), SI - self.WriteRecNotAX(8, _DI, jit.Ptr(_VP, 0), true, true)// MOVQ DI, (VP) - self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) - self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP) - - // because growslice not zero memory {oldcap, newlen} when append et not has ptrdata. - // but we should zero it, avoid decode it as random values. - if rt.UnpackType(p.vt()).PtrData == 0 { - self.Emit("SUBQ" , _AX, _SI) // MOVQ AX, SI - - self.Emit("ADDQ" , jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP) - self.Emit("MOVQ" , _DI, _VP) // MOVQ DI, VP - self.Emit("MOVQ" , jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX - self.From("MULQ" , _CX) // MULQ CX - self.Emit("ADDQ" , _AX, _VP) // ADDQ AX, VP - - self.Emit("MOVQ" , _SI, _AX) // MOVQ SI, AX - self.From("MULQ" , _CX) // MULQ CX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) - - self.Emit("MOVQ" , _VP, jit.Ptr(_SP, 0)) // MOVQ VP, (SP) - self.mem_clear_fn(true) // CALL_GO memclr{Has,NoHeap} - self.Sjmp("JMP", "_append_slice_end_{n}") // JMP _append_slice_end_{n} - } - - self.Link("_index_{n}") // _index_{n}: - self.Emit("ADDQ" , jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP) - self.Emit("MOVQ" , jit.Ptr(_VP, 0), _VP) // MOVQ (VP), VP - self.Emit("MOVQ" , jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX - self.From("MULQ" , _CX) // MULQ CX - self.Emit("ADDQ" , _AX, _VP) // ADDQ AX, VP - self.Link("_append_slice_end_{n}") + self.Emit("MOVQ", jit.Ptr(_VP, 8), _AX) // MOVQ 8(VP), AX + self.Emit("CMPQ", _AX, jit.Ptr(_VP, 16)) // CMPQ AX, 16(VP) + self.Sjmp("JB", "_index_{n}") // JB _index_{n} + self.Emit("MOVQ", jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVOU", jit.Ptr(_VP, 0), _X0) // MOVOU (VP), X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) + self.Emit("MOVQ", jit.Ptr(_VP, 16), _AX) // MOVQ 16(VP), AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 24)) // MOVQ AX, 24(SP) + self.Emit("SHLQ", jit.Imm(1), _AX) // SHLQ $1, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) + self.call_go(_F_growslice) // CALL_GO growslice + self.Emit("MOVQ", jit.Ptr(_SP, 40), _DI) // MOVQ 40(SP), DI + self.Emit("MOVQ", jit.Ptr(_SP, 48), _AX) // MOVQ 48(SP), AX + self.Emit("MOVQ", jit.Ptr(_SP, 56), _SI) // MOVQ 56(SP), SI + self.WriteRecNotAX(8, _DI, jit.Ptr(_VP, 0), true, true) // MOVQ DI, (VP) + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) + self.Emit("MOVQ", _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP) + + // because growslice not zero memory {oldcap, newlen} when append et not has ptrdata. + // but we should zero it, avoid decode it as random values. + if rt.UnpackType(p.vt()).PtrData == 0 { + self.Emit("SUBQ", _AX, _SI) // MOVQ AX, SI + + self.Emit("ADDQ", jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP) + self.Emit("MOVQ", _DI, _VP) // MOVQ DI, VP + self.Emit("MOVQ", jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX + self.From("MULQ", _CX) // MULQ CX + self.Emit("ADDQ", _AX, _VP) // ADDQ AX, VP + + self.Emit("MOVQ", _SI, _AX) // MOVQ SI, AX + self.From("MULQ", _CX) // MULQ CX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + + self.Emit("MOVQ", _VP, jit.Ptr(_SP, 0)) // MOVQ VP, (SP) + self.mem_clear_fn(true) // CALL_GO memclr{Has,NoHeap} + self.Sjmp("JMP", "_append_slice_end_{n}") // JMP _append_slice_end_{n} + } + + self.Link("_index_{n}") // _index_{n}: + self.Emit("ADDQ", jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP) + self.Emit("MOVQ", jit.Ptr(_VP, 0), _VP) // MOVQ (VP), VP + self.Emit("MOVQ", jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX + self.From("MULQ", _CX) // MULQ CX + self.Emit("ADDQ", _AX, _VP) // ADDQ AX, VP + self.Link("_append_slice_end_{n}") } func (self *_Assembler) _asm_OP_object_skip(_ *_Instr) { - self.call_sf(_F_skip_object) // CALL_SF skip_object - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + self.call_sf(_F_skip_object) // CALL_SF skip_object + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v } func (self *_Assembler) _asm_OP_object_next(_ *_Instr) { - self.call_sf(_F_skip_one) // CALL_SF skip_one - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v } func (self *_Assembler) _asm_OP_struct_field(p *_Instr) { - assert_eq(caching.FieldEntrySize, 32, "invalid field entry size") - self.Emit("MOVQ" , jit.Imm(-1), _AX) // MOVQ $-1, AX - self.Emit("MOVQ" , _AX, _VAR_sr) // MOVQ AX, sr - self.parse_string() // PARSE STRING - self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, false) // UNQUOTE once, sv.p, sv.n - self.Emit("LEAQ" , _VAR_sv, _AX) // LEAQ sv, AX - self.Emit("XORL" , _CX, _CX) // XORL CX, CX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) - self.call_go(_F_strhash) // CALL_GO strhash - self.Emit("MOVQ" , jit.Ptr(_SP, 16), _AX) // MOVQ 16(SP), AX - self.Emit("MOVQ" , _AX, _R9) // MOVQ AX, R9 - self.Emit("MOVQ" , jit.Imm(freezeFields(p.vf())), _CX) // MOVQ ${p.vf()}, CX - self.Emit("MOVQ" , jit.Ptr(_CX, caching.FieldMap_b), _SI) // MOVQ FieldMap.b(CX), SI - self.Emit("MOVQ" , jit.Ptr(_CX, caching.FieldMap_N), _CX) // MOVQ FieldMap.N(CX), CX - self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX - self.Sjmp("JZ" , "_try_lowercase_{n}") // JZ _try_lowercase_{n} - self.Link("_loop_{n}") // _loop_{n}: - self.Emit("XORL" , _DX, _DX) // XORL DX, DX - self.From("DIVQ" , _CX) // DIVQ CX - self.Emit("LEAQ" , jit.Ptr(_DX, 1), _AX) // LEAQ 1(DX), AX - self.Emit("SHLQ" , jit.Imm(5), _DX) // SHLQ $5, DX - self.Emit("LEAQ" , jit.Sib(_SI, _DX, 1, 0), _DI) // LEAQ (SI)(DX), DI - self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Hash), _R8) // MOVQ FieldEntry.Hash(DI), R8 - self.Emit("TESTQ", _R8, _R8) // TESTQ R8, R8 - self.Sjmp("JZ" , "_try_lowercase_{n}") // JZ _try_lowercase_{n} - self.Emit("CMPQ" , _R8, _R9) // CMPQ R8, R9 - self.Sjmp("JNE" , "_loop_{n}") // JNE _loop_{n} - self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Name + 8), _DX) // MOVQ FieldEntry.Name+8(DI), DX - self.Emit("CMPQ" , _DX, _VAR_sv_n) // CMPQ DX, sv.n - self.Sjmp("JNE" , "_loop_{n}") // JNE _loop_{n} - self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_ID), _R8) // MOVQ FieldEntry.ID(DI), R8 - self.Emit("MOVQ" , _AX, _VAR_ss_AX) // MOVQ AX, ss.AX - self.Emit("MOVQ" , _CX, _VAR_ss_CX) // MOVQ CX, ss.CX - self.Emit("MOVQ" , _SI, _VAR_ss_SI) // MOVQ SI, ss.SI - self.Emit("MOVQ" , _R8, _VAR_ss_R8) // MOVQ R8, ss.R8 - self.Emit("MOVQ" , _R9, _VAR_ss_R9) // MOVQ R9, ss.R9 - self.Emit("MOVQ" , _VAR_sv_p, _AX) // MOVQ _VAR_sv_p, AX - self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Name), _CX) // MOVQ FieldEntry.Name(DI), CX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) - self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 16)) // MOVQ DX, 16(SP) - self.call_go(_F_memequal) // CALL_GO memequal - self.Emit("MOVQ" , _VAR_ss_AX, _AX) // MOVQ ss.AX, AX - self.Emit("MOVQ" , _VAR_ss_CX, _CX) // MOVQ ss.CX, CX - self.Emit("MOVQ" , _VAR_ss_SI, _SI) // MOVQ ss.SI, SI - self.Emit("MOVQ" , _VAR_ss_R9, _R9) // MOVQ ss.R9, R9 - self.Emit("MOVB" , jit.Ptr(_SP, 24), _DX) // MOVB 24(SP), DX - self.Emit("TESTB", _DX, _DX) // TESTB DX, DX - self.Sjmp("JZ" , "_loop_{n}") // JZ _loop_{n} - self.Emit("MOVQ" , _VAR_ss_R8, _R8) // MOVQ ss.R8, R8 - self.Emit("MOVQ" , _R8, _VAR_sr) // MOVQ R8, sr - self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n} - self.Link("_try_lowercase_{n}") // _try_lowercase_{n}: - self.Emit("MOVQ" , jit.Imm(referenceFields(p.vf())), _AX) // MOVQ ${p.vf()}, AX - self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0 - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) - self.call_go(_F_FieldMap_GetCaseInsensitive) // CALL_GO FieldMap::GetCaseInsensitive - self.Emit("MOVQ" , jit.Ptr(_SP, 24), _AX) // MOVQ 24(SP), AX - self.Emit("MOVQ" , _AX, _VAR_sr) // MOVQ AX, _VAR_sr - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JNS" , "_end_{n}") // JNS _end_{n} - self.Emit("BTQ" , jit.Imm(_F_disable_unknown), _ARG_fv) // BTQ ${_F_disable_unknown}, fv - self.Sjmp("JC" , _LB_field_error) // JC _field_error - self.Link("_end_{n}") // _end_{n}: + assert_eq(caching.FieldEntrySize, 32, "invalid field entry size") + self.Emit("MOVQ", jit.Imm(-1), _AX) // MOVQ $-1, AX + self.Emit("MOVQ", _AX, _VAR_sr) // MOVQ AX, sr + self.parse_string() // PARSE STRING + self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, false) // UNQUOTE once, sv.p, sv.n + self.Emit("LEAQ", _VAR_sv, _AX) // LEAQ sv, AX + self.Emit("XORL", _CX, _CX) // XORL CX, CX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.call_go(_F_strhash) // CALL_GO strhash + self.Emit("MOVQ", jit.Ptr(_SP, 16), _AX) // MOVQ 16(SP), AX + self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9 + self.Emit("MOVQ", jit.Imm(freezeFields(p.vf())), _CX) // MOVQ ${p.vf()}, CX + self.Emit("MOVQ", jit.Ptr(_CX, caching.FieldMap_b), _SI) // MOVQ FieldMap.b(CX), SI + self.Emit("MOVQ", jit.Ptr(_CX, caching.FieldMap_N), _CX) // MOVQ FieldMap.N(CX), CX + self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX + self.Sjmp("JZ", "_try_lowercase_{n}") // JZ _try_lowercase_{n} + self.Link("_loop_{n}") // _loop_{n}: + self.Emit("XORL", _DX, _DX) // XORL DX, DX + self.From("DIVQ", _CX) // DIVQ CX + self.Emit("LEAQ", jit.Ptr(_DX, 1), _AX) // LEAQ 1(DX), AX + self.Emit("SHLQ", jit.Imm(5), _DX) // SHLQ $5, DX + self.Emit("LEAQ", jit.Sib(_SI, _DX, 1, 0), _DI) // LEAQ (SI)(DX), DI + self.Emit("MOVQ", jit.Ptr(_DI, _Fe_Hash), _R8) // MOVQ FieldEntry.Hash(DI), R8 + self.Emit("TESTQ", _R8, _R8) // TESTQ R8, R8 + self.Sjmp("JZ", "_try_lowercase_{n}") // JZ _try_lowercase_{n} + self.Emit("CMPQ", _R8, _R9) // CMPQ R8, R9 + self.Sjmp("JNE", "_loop_{n}") // JNE _loop_{n} + self.Emit("MOVQ", jit.Ptr(_DI, _Fe_Name+8), _DX) // MOVQ FieldEntry.Name+8(DI), DX + self.Emit("CMPQ", _DX, _VAR_sv_n) // CMPQ DX, sv.n + self.Sjmp("JNE", "_loop_{n}") // JNE _loop_{n} + self.Emit("MOVQ", jit.Ptr(_DI, _Fe_ID), _R8) // MOVQ FieldEntry.ID(DI), R8 + self.Emit("MOVQ", _AX, _VAR_ss_AX) // MOVQ AX, ss.AX + self.Emit("MOVQ", _CX, _VAR_ss_CX) // MOVQ CX, ss.CX + self.Emit("MOVQ", _SI, _VAR_ss_SI) // MOVQ SI, ss.SI + self.Emit("MOVQ", _R8, _VAR_ss_R8) // MOVQ R8, ss.R8 + self.Emit("MOVQ", _R9, _VAR_ss_R9) // MOVQ R9, ss.R9 + self.Emit("MOVQ", _VAR_sv_p, _AX) // MOVQ _VAR_sv_p, AX + self.Emit("MOVQ", jit.Ptr(_DI, _Fe_Name), _CX) // MOVQ FieldEntry.Name(DI), CX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.Emit("MOVQ", _DX, jit.Ptr(_SP, 16)) // MOVQ DX, 16(SP) + self.call_go(_F_memequal) // CALL_GO memequal + self.Emit("MOVQ", _VAR_ss_AX, _AX) // MOVQ ss.AX, AX + self.Emit("MOVQ", _VAR_ss_CX, _CX) // MOVQ ss.CX, CX + self.Emit("MOVQ", _VAR_ss_SI, _SI) // MOVQ ss.SI, SI + self.Emit("MOVQ", _VAR_ss_R9, _R9) // MOVQ ss.R9, R9 + self.Emit("MOVB", jit.Ptr(_SP, 24), _DX) // MOVB 24(SP), DX + self.Emit("TESTB", _DX, _DX) // TESTB DX, DX + self.Sjmp("JZ", "_loop_{n}") // JZ _loop_{n} + self.Emit("MOVQ", _VAR_ss_R8, _R8) // MOVQ ss.R8, R8 + self.Emit("MOVQ", _R8, _VAR_sr) // MOVQ R8, sr + self.Sjmp("JMP", "_end_{n}") // JMP _end_{n} + self.Link("_try_lowercase_{n}") // _try_lowercase_{n}: + self.Emit("MOVQ", jit.Imm(referenceFields(p.vf())), _AX) // MOVQ ${p.vf()}, AX + self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0 + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) + self.call_go(_F_FieldMap_GetCaseInsensitive) // CALL_GO FieldMap::GetCaseInsensitive + self.Emit("MOVQ", jit.Ptr(_SP, 24), _AX) // MOVQ 24(SP), AX + self.Emit("MOVQ", _AX, _VAR_sr) // MOVQ AX, _VAR_sr + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNS", "_end_{n}") // JNS _end_{n} + self.Emit("BTQ", jit.Imm(_F_disable_unknown), _ARG_fv) // BTQ ${_F_disable_unknown}, fv + self.Sjmp("JC", _LB_field_error) // JC _field_error + self.Link("_end_{n}") // _end_{n}: } func (self *_Assembler) _asm_OP_unmarshal(p *_Instr) { - self.unmarshal_json(p.vt(), true) + self.unmarshal_json(p.vt(), true) } func (self *_Assembler) _asm_OP_unmarshal_p(p *_Instr) { - self.unmarshal_json(p.vt(), false) + self.unmarshal_json(p.vt(), false) } func (self *_Assembler) _asm_OP_unmarshal_text(p *_Instr) { - self.unmarshal_text(p.vt(), true) + self.unmarshal_text(p.vt(), true) } func (self *_Assembler) _asm_OP_unmarshal_text_p(p *_Instr) { - self.unmarshal_text(p.vt(), false) + self.unmarshal_text(p.vt(), false) } func (self *_Assembler) _asm_OP_lspace(_ *_Instr) { - self.lspace("_{n}") + self.lspace("_{n}") } func (self *_Assembler) lspace(subfix string) { - var label = "_lspace" + subfix - - self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL - self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error - self.Emit("MOVQ" , jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX - self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX - self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' - self.Sjmp("JA" , label) // JA _nospace_{n} - self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX - self.Sjmp("JNC" , label) // JNC _nospace_{n} - - /* test up to 4 characters */ - for i := 0; i < 3; i++ { - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL - self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error - self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX - self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' - self.Sjmp("JA" , label) // JA _nospace_{n} - self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX - self.Sjmp("JNC" , label) // JNC _nospace_{n} - } - - /* handle over to the native function */ - self.Emit("MOVQ" , _IP, _DI) // MOVQ IP, DI - self.Emit("MOVQ" , _IL, _SI) // MOVQ IL, SI - self.Emit("MOVQ" , _IC, _DX) // MOVQ IC, DX - self.call(_F_lspace) // CALL lspace - self.Emit("TESTQ" , _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parsing_error_v - self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL - self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error - self.Emit("MOVQ" , _AX, _IC) // MOVQ AX, IC - self.Link(label) // _nospace_{n}: + var label = "_lspace" + subfix + + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE", _LB_eof_error) // JAE _eof_error + self.Emit("MOVQ", jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("CMPQ", _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA", label) // JA _nospace_{n} + self.Emit("BTQ", _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC", label) // JNC _nospace_{n} + + /* test up to 4 characters */ + for i := 0; i < 3; i++ { + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE", _LB_eof_error) // JAE _eof_error + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("CMPQ", _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA", label) // JA _nospace_{n} + self.Emit("BTQ", _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC", label) // JNC _nospace_{n} + } + + /* handle over to the native function */ + self.Emit("MOVQ", _IP, _DI) // MOVQ IP, DI + self.Emit("MOVQ", _IL, _SI) // MOVQ IL, SI + self.Emit("MOVQ", _IC, _DX) // MOVQ IC, DX + self.call(_F_lspace) // CALL lspace + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parsing_error_v + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JAE", _LB_eof_error) // JAE _eof_error + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + self.Link(label) // _nospace_{n}: } func (self *_Assembler) _asm_OP_match_char(p *_Instr) { - self.match_char(p.vb()) + self.match_char(p.vb()) } func (self *_Assembler) match_char(char byte) { - self.check_eof(1) - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(char))) // CMPB (IP)(IC), ${p.vb()} - self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error - self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.check_eof(1) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(char))) // CMPB (IP)(IC), ${p.vb()} + self.Sjmp("JNE", _LB_char_0_error) // JNE _char_0_error + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC } func (self *_Assembler) _asm_OP_check_char(p *_Instr) { - self.check_eof(1) - self.Emit("LEAQ" , jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX - self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} - self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC - self.Xjmp("JE" , p.vi()) // JE {p.vi()} + self.check_eof(1) + self.Emit("LEAQ", jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} + self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC + self.Xjmp("JE", p.vi()) // JE {p.vi()} } func (self *_Assembler) _asm_OP_check_char_0(p *_Instr) { - self.check_eof(1) - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} - self.Xjmp("JE" , p.vi()) // JE {p.vi()} + self.check_eof(1) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} + self.Xjmp("JE", p.vi()) // JE {p.vi()} } func (self *_Assembler) _asm_OP_add(p *_Instr) { - self.Emit("ADDQ", jit.Imm(int64(p.vi())), _IC) // ADDQ ${p.vi()}, IC + self.Emit("ADDQ", jit.Imm(int64(p.vi())), _IC) // ADDQ ${p.vi()}, IC } func (self *_Assembler) _asm_OP_load(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX - self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _VP) // MOVQ (ST)(AX), VP + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _VP) // MOVQ (ST)(AX), VP } func (self *_Assembler) _asm_OP_save(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX - self.Emit("CMPQ", _CX, jit.Imm(_MaxStackBytes)) // CMPQ CX, ${_MaxStackBytes} - self.Sjmp("JAE" , _LB_stack_error) // JA _stack_error - self.WriteRecNotAX(0 , _VP, jit.Sib(_ST, _CX, 1, 8), false, false) // MOVQ VP, 8(ST)(CX) - self.Emit("ADDQ", jit.Imm(8), _CX) // ADDQ $8, CX - self.Emit("MOVQ", _CX, jit.Ptr(_ST, 0)) // MOVQ CX, (ST) + self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX + self.Emit("CMPQ", _CX, jit.Imm(_MaxStackBytes)) // CMPQ CX, ${_MaxStackBytes} + self.Sjmp("JAE", _LB_stack_error) // JA _stack_error + self.WriteRecNotAX(0, _VP, jit.Sib(_ST, _CX, 1, 8), false, false) // MOVQ VP, 8(ST)(CX) + self.Emit("ADDQ", jit.Imm(8), _CX) // ADDQ $8, CX + self.Emit("MOVQ", _CX, jit.Ptr(_ST, 0)) // MOVQ CX, (ST) } func (self *_Assembler) _asm_OP_drop(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX - self.Emit("SUBQ", jit.Imm(8), _AX) // SUBQ $8, AX - self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP - self.Emit("MOVQ", _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) - self.Emit("XORL", _ET, _ET) // XORL ET, ET - self.Emit("MOVQ", _ET, jit.Sib(_ST, _AX, 1, 8)) // MOVQ ET, 8(ST)(AX) + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("SUBQ", jit.Imm(8), _AX) // SUBQ $8, AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP + self.Emit("MOVQ", _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) + self.Emit("XORL", _ET, _ET) // XORL ET, ET + self.Emit("MOVQ", _ET, jit.Sib(_ST, _AX, 1, 8)) // MOVQ ET, 8(ST)(AX) } func (self *_Assembler) _asm_OP_drop_2(_ *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX - self.Emit("SUBQ" , jit.Imm(16), _AX) // SUBQ $16, AX - self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP - self.Emit("MOVQ" , _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) - self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 - self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX) + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("SUBQ", jit.Imm(16), _AX) // SUBQ $16, AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP + self.Emit("MOVQ", _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) + self.Emit("PXOR", _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX) } func (self *_Assembler) _asm_OP_recurse(p *_Instr) { - self.Emit("MOVQ", jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX - self.decode_dynamic(_AX, _VP) // DECODE AX, VP + self.Emit("MOVQ", jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX + self.decode_dynamic(_AX, _VP) // DECODE AX, VP } func (self *_Assembler) _asm_OP_goto(p *_Instr) { - self.Xjmp("JMP", p.vi()) + self.Xjmp("JMP", p.vi()) } func (self *_Assembler) _asm_OP_switch(p *_Instr) { - self.Emit("MOVQ", _VAR_sr, _AX) // MOVQ sr, AX - self.Emit("CMPQ", _AX, jit.Imm(p.i64())) // CMPQ AX, ${len(p.vs())} - self.Sjmp("JAE" , "_default_{n}") // JAE _default_{n} + self.Emit("MOVQ", _VAR_sr, _AX) // MOVQ sr, AX + self.Emit("CMPQ", _AX, jit.Imm(p.i64())) // CMPQ AX, ${len(p.vs())} + self.Sjmp("JAE", "_default_{n}") // JAE _default_{n} - /* jump table selector */ - self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI - self.Sref("_switch_table_{n}", 4) // .... &_switch_table_{n} - self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX - self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX - self.Rjmp("JMP" , _AX) // JMP AX - self.Link("_switch_table_{n}") // _switch_table_{n}: + /* jump table selector */ + self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI + self.Sref("_switch_table_{n}", 4) // .... &_switch_table_{n} + self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX + self.Emit("ADDQ", _DI, _AX) // ADDQ DI, AX + self.Rjmp("JMP", _AX) // JMP AX + self.Link("_switch_table_{n}") // _switch_table_{n}: - /* generate the jump table */ - for i, v := range p.vs() { - self.Xref(v, int64(-i) * 4) - } + /* generate the jump table */ + for i, v := range p.vs() { + self.Xref(v, int64(-i)*4) + } - /* default case */ - self.Link("_default_{n}") - self.NOP() + /* default case */ + self.Link("_default_{n}") + self.NOP() } func (self *_Assembler) print_gc(i int, p1 *_Instr, p2 *_Instr) { - self.Emit("MOVQ", jit.Imm(int64(p2.op())), jit.Ptr(_SP, 16))// MOVQ $(p2.op()), 16(SP) - self.Emit("MOVQ", jit.Imm(int64(p1.op())), jit.Ptr(_SP, 8)) // MOVQ $(p1.op()), 8(SP) - self.Emit("MOVQ", jit.Imm(int64(i)), jit.Ptr(_SP, 0)) // MOVQ $(i), (SP) - self.call_go(_F_println) + self.Emit("MOVQ", jit.Imm(int64(p2.op())), jit.Ptr(_SP, 16)) // MOVQ $(p2.op()), 16(SP) + self.Emit("MOVQ", jit.Imm(int64(p1.op())), jit.Ptr(_SP, 8)) // MOVQ $(p1.op()), 8(SP) + self.Emit("MOVQ", jit.Imm(int64(i)), jit.Ptr(_SP, 0)) // MOVQ $(i), (SP) + self.call_go(_F_println) } var _runtime_writeBarrier uintptr = rt.GcwbAddr() @@ -1958,56 +1956,56 @@ var _runtime_writeBarrier uintptr = rt.GcwbAddr() func gcWriteBarrierAX() var ( - _V_writeBarrier = jit.Imm(int64(_runtime_writeBarrier)) + _V_writeBarrier = jit.Imm(int64(_runtime_writeBarrier)) - _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX) + _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX) ) func (self *_Assembler) WritePtrAX(i int, rec obj.Addr, saveDI bool) { - self.Emit("MOVQ", _V_writeBarrier, _R10) - self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) - self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - if saveDI { - self.save(_DI) - } - self.Emit("LEAQ", rec, _DI) - self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX - self.Rjmp("CALL", _R10) - if saveDI { - self.load(_DI) - } - self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Emit("MOVQ", _AX, rec) - self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", _V_writeBarrier, _R10) + self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier"+strconv.Itoa(i)+"_{n}") + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX + self.Rjmp("CALL", _R10) + if saveDI { + self.load(_DI) + } + self.Sjmp("JMP", "_end_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", _AX, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") } func (self *_Assembler) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool, saveAX bool) { - if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { - panic("rec contains AX!") - } - self.Emit("MOVQ", _V_writeBarrier, _R10) - self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) - self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - if saveAX { - self.Emit("XCHGQ", ptr, _AX) - } else { - self.Emit("MOVQ", ptr, _AX) - } - if saveDI { - self.save(_DI) - } - self.Emit("LEAQ", rec, _DI) - self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX - self.Rjmp("CALL", _R10) - if saveDI { - self.load(_DI) - } - if saveAX { - self.Emit("XCHGQ", ptr, _AX) - } - self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Emit("MOVQ", ptr, rec) - self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") -} \ No newline at end of file + if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { + panic("rec contains AX!") + } + self.Emit("MOVQ", _V_writeBarrier, _R10) + self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier"+strconv.Itoa(i)+"_{n}") + if saveAX { + self.Emit("XCHGQ", ptr, _AX) + } else { + self.Emit("MOVQ", ptr, _AX) + } + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX + self.Rjmp("CALL", _R10) + if saveDI { + self.load(_DI) + } + if saveAX { + self.Emit("XCHGQ", ptr, _AX) + } + self.Sjmp("JMP", "_end_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/assembler_amd64_go117.go b/vendor/github.com/bytedance/sonic/internal/decoder/assembler_amd64_go117.go index 27413739d..c0d3bfcd2 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/assembler_amd64_go117.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/assembler_amd64_go117.go @@ -20,20 +20,20 @@ package decoder import ( - `encoding/json` - `fmt` - `math` - `reflect` - `strconv` - `unsafe` - - `github.com/bytedance/sonic/internal/caching` - `github.com/bytedance/sonic/internal/jit` - `github.com/bytedance/sonic/internal/native` - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/internal/rt` - `github.com/twitchyliquid64/golang-asm/obj` - `github.com/twitchyliquid64/golang-asm/obj/x86` + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "unsafe" + + "github.com/bytedance/sonic/internal/caching" + "github.com/bytedance/sonic/internal/jit" + "github.com/bytedance/sonic/internal/native" + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/internal/rt" + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/obj/x86" ) /** Register Allocations @@ -68,1868 +68,1864 @@ import ( */ const ( - _FP_args = 72 // 72 bytes to pass and spill register arguements - _FP_fargs = 80 // 80 bytes for passing arguments to other Go functions - _FP_saves = 48 // 48 bytes for saving the registers before CALL instructions - _FP_locals = 144 // 144 bytes for local variables + _FP_args = 72 // 72 bytes to pass and spill register arguements + _FP_fargs = 80 // 80 bytes for passing arguments to other Go functions + _FP_saves = 48 // 48 bytes for saving the registers before CALL instructions + _FP_locals = 144 // 144 bytes for local variables ) const ( - _FP_offs = _FP_fargs + _FP_saves + _FP_locals - _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer - _FP_base = _FP_size + 8 // 8 bytes for the return address + _FP_offs = _FP_fargs + _FP_saves + _FP_locals + _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer + _FP_base = _FP_size + 8 // 8 bytes for the return address ) const ( - _IM_null = 0x6c6c756e // 'null' - _IM_true = 0x65757274 // 'true' - _IM_alse = 0x65736c61 // 'alse' ('false' without the 'f') + _IM_null = 0x6c6c756e // 'null' + _IM_true = 0x65757274 // 'true' + _IM_alse = 0x65736c61 // 'alse' ('false' without the 'f') ) const ( - _BM_space = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n') + _BM_space = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n') ) const ( - _MODE_JSON = 1 << 3 // base64 mode + _MODE_JSON = 1 << 3 // base64 mode ) const ( - _LB_error = "_error" - _LB_im_error = "_im_error" - _LB_eof_error = "_eof_error" - _LB_type_error = "_type_error" - _LB_field_error = "_field_error" - _LB_range_error = "_range_error" - _LB_stack_error = "_stack_error" - _LB_base64_error = "_base64_error" - _LB_unquote_error = "_unquote_error" - _LB_parsing_error = "_parsing_error" - _LB_parsing_error_v = "_parsing_error_v" - _LB_mismatch_error = "_mismatch_error" + _LB_error = "_error" + _LB_im_error = "_im_error" + _LB_eof_error = "_eof_error" + _LB_type_error = "_type_error" + _LB_field_error = "_field_error" + _LB_range_error = "_range_error" + _LB_stack_error = "_stack_error" + _LB_base64_error = "_base64_error" + _LB_unquote_error = "_unquote_error" + _LB_parsing_error = "_parsing_error" + _LB_parsing_error_v = "_parsing_error_v" + _LB_mismatch_error = "_mismatch_error" ) const ( - _LB_char_0_error = "_char_0_error" - _LB_char_1_error = "_char_1_error" - _LB_char_2_error = "_char_2_error" - _LB_char_3_error = "_char_3_error" - _LB_char_4_error = "_char_4_error" - _LB_char_m2_error = "_char_m2_error" - _LB_char_m3_error = "_char_m3_error" + _LB_char_0_error = "_char_0_error" + _LB_char_1_error = "_char_1_error" + _LB_char_2_error = "_char_2_error" + _LB_char_3_error = "_char_3_error" + _LB_char_4_error = "_char_4_error" + _LB_char_m2_error = "_char_m2_error" + _LB_char_m3_error = "_char_m3_error" ) const ( - _LB_skip_one = "_skip_one" - _LB_skip_key_value = "_skip_key_value" + _LB_skip_one = "_skip_one" + _LB_skip_key_value = "_skip_key_value" ) var ( - _AX = jit.Reg("AX") - _BX = jit.Reg("BX") - _CX = jit.Reg("CX") - _DX = jit.Reg("DX") - _DI = jit.Reg("DI") - _SI = jit.Reg("SI") - _BP = jit.Reg("BP") - _SP = jit.Reg("SP") - _R8 = jit.Reg("R8") - _R9 = jit.Reg("R9") - _X0 = jit.Reg("X0") - _X1 = jit.Reg("X1") + _AX = jit.Reg("AX") + _BX = jit.Reg("BX") + _CX = jit.Reg("CX") + _DX = jit.Reg("DX") + _DI = jit.Reg("DI") + _SI = jit.Reg("SI") + _BP = jit.Reg("BP") + _SP = jit.Reg("SP") + _R8 = jit.Reg("R8") + _R9 = jit.Reg("R9") + _X0 = jit.Reg("X0") + _X1 = jit.Reg("X1") ) var ( - _IP = jit.Reg("R10") // saved on BP when callc - _IC = jit.Reg("R11") // saved on BX when call_c - _IL = jit.Reg("R12") - _ST = jit.Reg("R13") - _VP = jit.Reg("R15") + _IP = jit.Reg("R10") // saved on BP when callc + _IC = jit.Reg("R11") // saved on BX when call_c + _IL = jit.Reg("R12") + _ST = jit.Reg("R13") + _VP = jit.Reg("R15") ) var ( - _DF = jit.Reg("AX") // reuse AX in generic decoder for flags - _ET = jit.Reg("AX") - _EP = jit.Reg("BX") + _DF = jit.Reg("AX") // reuse AX in generic decoder for flags + _ET = jit.Reg("AX") + _EP = jit.Reg("BX") ) - - var ( - _ARG_s = _ARG_sp - _ARG_sp = jit.Ptr(_SP, _FP_base + 0) - _ARG_sl = jit.Ptr(_SP, _FP_base + 8) - _ARG_ic = jit.Ptr(_SP, _FP_base + 16) - _ARG_vp = jit.Ptr(_SP, _FP_base + 24) - _ARG_sb = jit.Ptr(_SP, _FP_base + 32) - _ARG_fv = jit.Ptr(_SP, _FP_base + 40) + _ARG_s = _ARG_sp + _ARG_sp = jit.Ptr(_SP, _FP_base+0) + _ARG_sl = jit.Ptr(_SP, _FP_base+8) + _ARG_ic = jit.Ptr(_SP, _FP_base+16) + _ARG_vp = jit.Ptr(_SP, _FP_base+24) + _ARG_sb = jit.Ptr(_SP, _FP_base+32) + _ARG_fv = jit.Ptr(_SP, _FP_base+40) ) var ( - _ARG_sv = _ARG_sv_p - _ARG_sv_p = jit.Ptr(_SP, _FP_base + 48) - _ARG_sv_n = jit.Ptr(_SP, _FP_base + 56) - _ARG_vk = jit.Ptr(_SP, _FP_base + 64) + _ARG_sv = _ARG_sv_p + _ARG_sv_p = jit.Ptr(_SP, _FP_base+48) + _ARG_sv_n = jit.Ptr(_SP, _FP_base+56) + _ARG_vk = jit.Ptr(_SP, _FP_base+64) ) var ( - _VAR_st = _VAR_st_Vt - _VAR_sr = jit.Ptr(_SP, _FP_fargs + _FP_saves) + _VAR_st = _VAR_st_Vt + _VAR_sr = jit.Ptr(_SP, _FP_fargs+_FP_saves) ) var ( - _VAR_st_Vt = jit.Ptr(_SP, _FP_fargs + _FP_saves + 0) - _VAR_st_Dv = jit.Ptr(_SP, _FP_fargs + _FP_saves + 8) - _VAR_st_Iv = jit.Ptr(_SP, _FP_fargs + _FP_saves + 16) - _VAR_st_Ep = jit.Ptr(_SP, _FP_fargs + _FP_saves + 24) - _VAR_st_Db = jit.Ptr(_SP, _FP_fargs + _FP_saves + 32) - _VAR_st_Dc = jit.Ptr(_SP, _FP_fargs + _FP_saves + 40) + _VAR_st_Vt = jit.Ptr(_SP, _FP_fargs+_FP_saves+0) + _VAR_st_Dv = jit.Ptr(_SP, _FP_fargs+_FP_saves+8) + _VAR_st_Iv = jit.Ptr(_SP, _FP_fargs+_FP_saves+16) + _VAR_st_Ep = jit.Ptr(_SP, _FP_fargs+_FP_saves+24) + _VAR_st_Db = jit.Ptr(_SP, _FP_fargs+_FP_saves+32) + _VAR_st_Dc = jit.Ptr(_SP, _FP_fargs+_FP_saves+40) ) var ( - _VAR_ss_AX = jit.Ptr(_SP, _FP_fargs + _FP_saves + 48) - _VAR_ss_CX = jit.Ptr(_SP, _FP_fargs + _FP_saves + 56) - _VAR_ss_SI = jit.Ptr(_SP, _FP_fargs + _FP_saves + 64) - _VAR_ss_R8 = jit.Ptr(_SP, _FP_fargs + _FP_saves + 72) - _VAR_ss_R9 = jit.Ptr(_SP, _FP_fargs + _FP_saves + 80) + _VAR_ss_AX = jit.Ptr(_SP, _FP_fargs+_FP_saves+48) + _VAR_ss_CX = jit.Ptr(_SP, _FP_fargs+_FP_saves+56) + _VAR_ss_SI = jit.Ptr(_SP, _FP_fargs+_FP_saves+64) + _VAR_ss_R8 = jit.Ptr(_SP, _FP_fargs+_FP_saves+72) + _VAR_ss_R9 = jit.Ptr(_SP, _FP_fargs+_FP_saves+80) ) var ( - _VAR_bs_p = jit.Ptr(_SP, _FP_fargs + _FP_saves + 88) - _VAR_bs_n = jit.Ptr(_SP, _FP_fargs + _FP_saves + 96) - _VAR_bs_LR = jit.Ptr(_SP, _FP_fargs + _FP_saves + 104) + _VAR_bs_p = jit.Ptr(_SP, _FP_fargs+_FP_saves+88) + _VAR_bs_n = jit.Ptr(_SP, _FP_fargs+_FP_saves+96) + _VAR_bs_LR = jit.Ptr(_SP, _FP_fargs+_FP_saves+104) ) -var _VAR_fl = jit.Ptr(_SP, _FP_fargs + _FP_saves + 112) +var _VAR_fl = jit.Ptr(_SP, _FP_fargs+_FP_saves+112) var ( - _VAR_et = jit.Ptr(_SP, _FP_fargs + _FP_saves + 120) // save dismatched type - _VAR_pc = jit.Ptr(_SP, _FP_fargs + _FP_saves + 128) // save skip return pc - _VAR_ic = jit.Ptr(_SP, _FP_fargs + _FP_saves + 136) // save dismatched position + _VAR_et = jit.Ptr(_SP, _FP_fargs+_FP_saves+120) // save dismatched type + _VAR_pc = jit.Ptr(_SP, _FP_fargs+_FP_saves+128) // save skip return pc + _VAR_ic = jit.Ptr(_SP, _FP_fargs+_FP_saves+136) // save dismatched position ) type _Assembler struct { - jit.BaseAssembler - p _Program - name string + jit.BaseAssembler + p _Program + name string } func newAssembler(p _Program) *_Assembler { - return new(_Assembler).Init(p) + return new(_Assembler).Init(p) } /** Assembler Interface **/ func (self *_Assembler) Load() _Decoder { - return ptodec(self.BaseAssembler.Load("decode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs)) + return ptodec(self.BaseAssembler.Load("decode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs)) } func (self *_Assembler) Init(p _Program) *_Assembler { - self.p = p - self.BaseAssembler.Init(self.compile) - return self + self.p = p + self.BaseAssembler.Init(self.compile) + return self } func (self *_Assembler) compile() { - self.prologue() - self.instrs() - self.epilogue() - self.copy_string() - self.escape_string() - self.escape_string_twice() - self.skip_one() - self.skip_key_value() - self.type_error() - self.mismatch_error() - self.field_error() - self.range_error() - self.stack_error() - self.base64_error() - self.parsing_error() + self.prologue() + self.instrs() + self.epilogue() + self.copy_string() + self.escape_string() + self.escape_string_twice() + self.skip_one() + self.skip_key_value() + self.type_error() + self.mismatch_error() + self.field_error() + self.range_error() + self.stack_error() + self.base64_error() + self.parsing_error() } /** Assembler Stages **/ -var _OpFuncTab = [256]func(*_Assembler, *_Instr) { - _OP_any : (*_Assembler)._asm_OP_any, - _OP_dyn : (*_Assembler)._asm_OP_dyn, - _OP_str : (*_Assembler)._asm_OP_str, - _OP_bin : (*_Assembler)._asm_OP_bin, - _OP_bool : (*_Assembler)._asm_OP_bool, - _OP_num : (*_Assembler)._asm_OP_num, - _OP_i8 : (*_Assembler)._asm_OP_i8, - _OP_i16 : (*_Assembler)._asm_OP_i16, - _OP_i32 : (*_Assembler)._asm_OP_i32, - _OP_i64 : (*_Assembler)._asm_OP_i64, - _OP_u8 : (*_Assembler)._asm_OP_u8, - _OP_u16 : (*_Assembler)._asm_OP_u16, - _OP_u32 : (*_Assembler)._asm_OP_u32, - _OP_u64 : (*_Assembler)._asm_OP_u64, - _OP_f32 : (*_Assembler)._asm_OP_f32, - _OP_f64 : (*_Assembler)._asm_OP_f64, - _OP_unquote : (*_Assembler)._asm_OP_unquote, - _OP_nil_1 : (*_Assembler)._asm_OP_nil_1, - _OP_nil_2 : (*_Assembler)._asm_OP_nil_2, - _OP_nil_3 : (*_Assembler)._asm_OP_nil_3, - _OP_deref : (*_Assembler)._asm_OP_deref, - _OP_index : (*_Assembler)._asm_OP_index, - _OP_is_null : (*_Assembler)._asm_OP_is_null, - _OP_is_null_quote : (*_Assembler)._asm_OP_is_null_quote, - _OP_map_init : (*_Assembler)._asm_OP_map_init, - _OP_map_key_i8 : (*_Assembler)._asm_OP_map_key_i8, - _OP_map_key_i16 : (*_Assembler)._asm_OP_map_key_i16, - _OP_map_key_i32 : (*_Assembler)._asm_OP_map_key_i32, - _OP_map_key_i64 : (*_Assembler)._asm_OP_map_key_i64, - _OP_map_key_u8 : (*_Assembler)._asm_OP_map_key_u8, - _OP_map_key_u16 : (*_Assembler)._asm_OP_map_key_u16, - _OP_map_key_u32 : (*_Assembler)._asm_OP_map_key_u32, - _OP_map_key_u64 : (*_Assembler)._asm_OP_map_key_u64, - _OP_map_key_f32 : (*_Assembler)._asm_OP_map_key_f32, - _OP_map_key_f64 : (*_Assembler)._asm_OP_map_key_f64, - _OP_map_key_str : (*_Assembler)._asm_OP_map_key_str, - _OP_map_key_utext : (*_Assembler)._asm_OP_map_key_utext, - _OP_map_key_utext_p : (*_Assembler)._asm_OP_map_key_utext_p, - _OP_array_skip : (*_Assembler)._asm_OP_array_skip, - _OP_array_clear : (*_Assembler)._asm_OP_array_clear, - _OP_array_clear_p : (*_Assembler)._asm_OP_array_clear_p, - _OP_slice_init : (*_Assembler)._asm_OP_slice_init, - _OP_slice_append : (*_Assembler)._asm_OP_slice_append, - _OP_object_skip : (*_Assembler)._asm_OP_object_skip, - _OP_object_next : (*_Assembler)._asm_OP_object_next, - _OP_struct_field : (*_Assembler)._asm_OP_struct_field, - _OP_unmarshal : (*_Assembler)._asm_OP_unmarshal, - _OP_unmarshal_p : (*_Assembler)._asm_OP_unmarshal_p, - _OP_unmarshal_text : (*_Assembler)._asm_OP_unmarshal_text, - _OP_unmarshal_text_p : (*_Assembler)._asm_OP_unmarshal_text_p, - _OP_lspace : (*_Assembler)._asm_OP_lspace, - _OP_match_char : (*_Assembler)._asm_OP_match_char, - _OP_check_char : (*_Assembler)._asm_OP_check_char, - _OP_load : (*_Assembler)._asm_OP_load, - _OP_save : (*_Assembler)._asm_OP_save, - _OP_drop : (*_Assembler)._asm_OP_drop, - _OP_drop_2 : (*_Assembler)._asm_OP_drop_2, - _OP_recurse : (*_Assembler)._asm_OP_recurse, - _OP_goto : (*_Assembler)._asm_OP_goto, - _OP_switch : (*_Assembler)._asm_OP_switch, - _OP_check_char_0 : (*_Assembler)._asm_OP_check_char_0, - _OP_dismatch_err : (*_Assembler)._asm_OP_dismatch_err, - _OP_go_skip : (*_Assembler)._asm_OP_go_skip, - _OP_add : (*_Assembler)._asm_OP_add, - _OP_check_empty : (*_Assembler)._asm_OP_check_empty, - _OP_debug : (*_Assembler)._asm_OP_debug, +var _OpFuncTab = [256]func(*_Assembler, *_Instr){ + _OP_any: (*_Assembler)._asm_OP_any, + _OP_dyn: (*_Assembler)._asm_OP_dyn, + _OP_str: (*_Assembler)._asm_OP_str, + _OP_bin: (*_Assembler)._asm_OP_bin, + _OP_bool: (*_Assembler)._asm_OP_bool, + _OP_num: (*_Assembler)._asm_OP_num, + _OP_i8: (*_Assembler)._asm_OP_i8, + _OP_i16: (*_Assembler)._asm_OP_i16, + _OP_i32: (*_Assembler)._asm_OP_i32, + _OP_i64: (*_Assembler)._asm_OP_i64, + _OP_u8: (*_Assembler)._asm_OP_u8, + _OP_u16: (*_Assembler)._asm_OP_u16, + _OP_u32: (*_Assembler)._asm_OP_u32, + _OP_u64: (*_Assembler)._asm_OP_u64, + _OP_f32: (*_Assembler)._asm_OP_f32, + _OP_f64: (*_Assembler)._asm_OP_f64, + _OP_unquote: (*_Assembler)._asm_OP_unquote, + _OP_nil_1: (*_Assembler)._asm_OP_nil_1, + _OP_nil_2: (*_Assembler)._asm_OP_nil_2, + _OP_nil_3: (*_Assembler)._asm_OP_nil_3, + _OP_deref: (*_Assembler)._asm_OP_deref, + _OP_index: (*_Assembler)._asm_OP_index, + _OP_is_null: (*_Assembler)._asm_OP_is_null, + _OP_is_null_quote: (*_Assembler)._asm_OP_is_null_quote, + _OP_map_init: (*_Assembler)._asm_OP_map_init, + _OP_map_key_i8: (*_Assembler)._asm_OP_map_key_i8, + _OP_map_key_i16: (*_Assembler)._asm_OP_map_key_i16, + _OP_map_key_i32: (*_Assembler)._asm_OP_map_key_i32, + _OP_map_key_i64: (*_Assembler)._asm_OP_map_key_i64, + _OP_map_key_u8: (*_Assembler)._asm_OP_map_key_u8, + _OP_map_key_u16: (*_Assembler)._asm_OP_map_key_u16, + _OP_map_key_u32: (*_Assembler)._asm_OP_map_key_u32, + _OP_map_key_u64: (*_Assembler)._asm_OP_map_key_u64, + _OP_map_key_f32: (*_Assembler)._asm_OP_map_key_f32, + _OP_map_key_f64: (*_Assembler)._asm_OP_map_key_f64, + _OP_map_key_str: (*_Assembler)._asm_OP_map_key_str, + _OP_map_key_utext: (*_Assembler)._asm_OP_map_key_utext, + _OP_map_key_utext_p: (*_Assembler)._asm_OP_map_key_utext_p, + _OP_array_skip: (*_Assembler)._asm_OP_array_skip, + _OP_array_clear: (*_Assembler)._asm_OP_array_clear, + _OP_array_clear_p: (*_Assembler)._asm_OP_array_clear_p, + _OP_slice_init: (*_Assembler)._asm_OP_slice_init, + _OP_slice_append: (*_Assembler)._asm_OP_slice_append, + _OP_object_skip: (*_Assembler)._asm_OP_object_skip, + _OP_object_next: (*_Assembler)._asm_OP_object_next, + _OP_struct_field: (*_Assembler)._asm_OP_struct_field, + _OP_unmarshal: (*_Assembler)._asm_OP_unmarshal, + _OP_unmarshal_p: (*_Assembler)._asm_OP_unmarshal_p, + _OP_unmarshal_text: (*_Assembler)._asm_OP_unmarshal_text, + _OP_unmarshal_text_p: (*_Assembler)._asm_OP_unmarshal_text_p, + _OP_lspace: (*_Assembler)._asm_OP_lspace, + _OP_match_char: (*_Assembler)._asm_OP_match_char, + _OP_check_char: (*_Assembler)._asm_OP_check_char, + _OP_load: (*_Assembler)._asm_OP_load, + _OP_save: (*_Assembler)._asm_OP_save, + _OP_drop: (*_Assembler)._asm_OP_drop, + _OP_drop_2: (*_Assembler)._asm_OP_drop_2, + _OP_recurse: (*_Assembler)._asm_OP_recurse, + _OP_goto: (*_Assembler)._asm_OP_goto, + _OP_switch: (*_Assembler)._asm_OP_switch, + _OP_check_char_0: (*_Assembler)._asm_OP_check_char_0, + _OP_dismatch_err: (*_Assembler)._asm_OP_dismatch_err, + _OP_go_skip: (*_Assembler)._asm_OP_go_skip, + _OP_add: (*_Assembler)._asm_OP_add, + _OP_check_empty: (*_Assembler)._asm_OP_check_empty, + _OP_debug: (*_Assembler)._asm_OP_debug, } func (self *_Assembler) _asm_OP_debug(_ *_Instr) { - self.Byte(0xcc) + self.Byte(0xcc) } func (self *_Assembler) instr(v *_Instr) { - if fn := _OpFuncTab[v.op()]; fn != nil { - fn(self, v) - } else { - panic(fmt.Sprintf("invalid opcode: %d", v.op())) - } + if fn := _OpFuncTab[v.op()]; fn != nil { + fn(self, v) + } else { + panic(fmt.Sprintf("invalid opcode: %d", v.op())) + } } func (self *_Assembler) instrs() { - for i, v := range self.p { - self.Mark(i) - self.instr(&v) - self.debug_instr(i, &v) - } + for i, v := range self.p { + self.Mark(i) + self.instr(&v) + self.debug_instr(i, &v) + } } func (self *_Assembler) epilogue() { - self.Mark(len(self.p)) - self.Emit("XORL", _EP, _EP) // XORL EP, EP - self.Emit("MOVQ", _VAR_et, _ET) // MOVQ VAR_et, ET - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ", _LB_mismatch_error) // JNZ _LB_mismatch_error - self.Link(_LB_error) // _error: - self.Emit("MOVQ", _EP, _CX) // MOVQ BX, CX - self.Emit("MOVQ", _ET, _BX) // MOVQ AX, BX - self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX - self.Emit("MOVQ", jit.Imm(0), _ARG_sp) // MOVQ $0, sv.p<>+48(FP) - self.Emit("MOVQ", jit.Imm(0), _ARG_vp) // MOVQ $0, sv.p<>+48(FP) - self.Emit("MOVQ", jit.Imm(0), _ARG_sv_p) // MOVQ $0, sv.p<>+48(FP) - self.Emit("MOVQ", jit.Imm(0), _ARG_vk) // MOVQ $0, vk<>+64(FP) - self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP - self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP - self.Emit("RET") // RET + self.Mark(len(self.p)) + self.Emit("XORL", _EP, _EP) // XORL EP, EP + self.Emit("MOVQ", _VAR_et, _ET) // MOVQ VAR_et, ET + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_mismatch_error) // JNZ _LB_mismatch_error + self.Link(_LB_error) // _error: + self.Emit("MOVQ", _EP, _CX) // MOVQ BX, CX + self.Emit("MOVQ", _ET, _BX) // MOVQ AX, BX + self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX + self.Emit("MOVQ", jit.Imm(0), _ARG_sp) // MOVQ $0, sv.p<>+48(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_vp) // MOVQ $0, sv.p<>+48(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_sv_p) // MOVQ $0, sv.p<>+48(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_vk) // MOVQ $0, vk<>+64(FP) + self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP + self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP + self.Emit("RET") // RET } func (self *_Assembler) prologue() { - self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP - self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP) - self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP - self.Emit("MOVQ", _AX, _ARG_sp) // MOVQ AX, s.p<>+0(FP) - self.Emit("MOVQ", _AX, _IP) // MOVQ AX, IP - self.Emit("MOVQ", _BX, _ARG_sl) // MOVQ BX, s.l<>+8(FP) - self.Emit("MOVQ", _BX, _IL) // MOVQ BX, IL - self.Emit("MOVQ", _CX, _ARG_ic) // MOVQ CX, ic<>+16(FP) - self.Emit("MOVQ", _CX, _IC) // MOVQ CX, IC - self.Emit("MOVQ", _DI, _ARG_vp) // MOVQ DI, vp<>+24(FP) - self.Emit("MOVQ", _DI, _VP) // MOVQ DI, VP - self.Emit("MOVQ", _SI, _ARG_sb) // MOVQ SI, sb<>+32(FP) - self.Emit("MOVQ", _SI, _ST) // MOVQ SI, ST - self.Emit("MOVQ", _R8, _ARG_fv) // MOVQ R8, fv<>+40(FP) - self.Emit("MOVQ", jit.Imm(0), _ARG_sv_p) // MOVQ $0, sv.p<>+48(FP) - self.Emit("MOVQ", jit.Imm(0), _ARG_sv_n) // MOVQ $0, sv.n<>+56(FP) - self.Emit("MOVQ", jit.Imm(0), _ARG_vk) // MOVQ $0, vk<>+64(FP) - self.Emit("MOVQ", jit.Imm(0), _VAR_et) // MOVQ $0, et<>+120(FP) - // initialize digital buffer first - self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_st_Dc) // MOVQ $_MaxDigitNums, ss.Dcap - self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX - self.Emit("MOVQ", _AX, _VAR_st_Db) // MOVQ AX, ss.Dbuf + self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP + self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP) + self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP + self.Emit("MOVQ", _AX, _ARG_sp) // MOVQ AX, s.p<>+0(FP) + self.Emit("MOVQ", _AX, _IP) // MOVQ AX, IP + self.Emit("MOVQ", _BX, _ARG_sl) // MOVQ BX, s.l<>+8(FP) + self.Emit("MOVQ", _BX, _IL) // MOVQ BX, IL + self.Emit("MOVQ", _CX, _ARG_ic) // MOVQ CX, ic<>+16(FP) + self.Emit("MOVQ", _CX, _IC) // MOVQ CX, IC + self.Emit("MOVQ", _DI, _ARG_vp) // MOVQ DI, vp<>+24(FP) + self.Emit("MOVQ", _DI, _VP) // MOVQ DI, VP + self.Emit("MOVQ", _SI, _ARG_sb) // MOVQ SI, sb<>+32(FP) + self.Emit("MOVQ", _SI, _ST) // MOVQ SI, ST + self.Emit("MOVQ", _R8, _ARG_fv) // MOVQ R8, fv<>+40(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_sv_p) // MOVQ $0, sv.p<>+48(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_sv_n) // MOVQ $0, sv.n<>+56(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_vk) // MOVQ $0, vk<>+64(FP) + self.Emit("MOVQ", jit.Imm(0), _VAR_et) // MOVQ $0, et<>+120(FP) + // initialize digital buffer first + self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_st_Dc) // MOVQ $_MaxDigitNums, ss.Dcap + self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX + self.Emit("MOVQ", _AX, _VAR_st_Db) // MOVQ AX, ss.Dbuf } /** Function Calling Helpers **/ var ( - _REG_go = []obj.Addr { _ST, _VP, _IP, _IL, _IC } - _REG_rt = []obj.Addr { _ST, _VP, _IP, _IL, _IC, _IL } + _REG_go = []obj.Addr{_ST, _VP, _IP, _IL, _IC} + _REG_rt = []obj.Addr{_ST, _VP, _IP, _IL, _IC, _IL} ) func (self *_Assembler) save(r ...obj.Addr) { - for i, v := range r { - if i > _FP_saves / 8 - 1 { - panic("too many registers to save") - } else { - self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + int64(i) * 8)) - } - } + for i, v := range r { + if i > _FP_saves/8-1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs+int64(i)*8)) + } + } } func (self *_Assembler) load(r ...obj.Addr) { - for i, v := range r { - if i > _FP_saves / 8 - 1 { - panic("too many registers to load") - } else { - self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + int64(i) * 8), v) - } - } + for i, v := range r { + if i > _FP_saves/8-1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs+int64(i)*8), v) + } + } } func (self *_Assembler) call(fn obj.Addr) { - self.Emit("MOVQ", fn, _R9) // MOVQ ${fn}, R11 - self.Rjmp("CALL", _R9) // CALL R11 + self.Emit("MOVQ", fn, _R9) // MOVQ ${fn}, R11 + self.Rjmp("CALL", _R9) // CALL R11 } func (self *_Assembler) call_go(fn obj.Addr) { - self.save(_REG_go...) // SAVE $REG_go - self.call(fn) - self.load(_REG_go...) // LOAD $REG_go + self.save(_REG_go...) // SAVE $REG_go + self.call(fn) + self.load(_REG_go...) // LOAD $REG_go } func (self *_Assembler) callc(fn obj.Addr) { - self.Emit("XCHGQ", _IP, _BP) - self.call(fn) - self.Emit("XCHGQ", _IP, _BP) + self.Emit("XCHGQ", _IP, _BP) + self.call(fn) + self.Emit("XCHGQ", _IP, _BP) } func (self *_Assembler) call_c(fn obj.Addr) { - self.Emit("XCHGQ", _IC, _BX) - self.callc(fn) - self.Emit("XCHGQ", _IC, _BX) + self.Emit("XCHGQ", _IC, _BX) + self.callc(fn) + self.Emit("XCHGQ", _IC, _BX) } func (self *_Assembler) call_sf(fn obj.Addr) { - self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI - self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) - self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI - self.Emit("LEAQ", jit.Ptr(_ST, _FsmOffset), _DX) // LEAQ _FsmOffset(ST), DX - self.Emit("MOVQ", _ARG_fv, _CX) - self.callc(fn) - self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC + self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI + self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) + self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI + self.Emit("LEAQ", jit.Ptr(_ST, _FsmOffset), _DX) // LEAQ _FsmOffset(ST), DX + self.Emit("MOVQ", _ARG_fv, _CX) + self.callc(fn) + self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC } func (self *_Assembler) call_vf(fn obj.Addr) { - self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI - self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) - self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI - self.Emit("LEAQ", _VAR_st, _DX) // LEAQ st, DX - self.callc(fn) - self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC + self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI + self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) + self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI + self.Emit("LEAQ", _VAR_st, _DX) // LEAQ st, DX + self.callc(fn) + self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC } /** Assembler Error Handlers **/ var ( - _F_convT64 = jit.Func(convT64) - _F_error_wrap = jit.Func(error_wrap) - _F_error_type = jit.Func(error_type) - _F_error_field = jit.Func(error_field) - _F_error_value = jit.Func(error_value) - _F_error_mismatch = jit.Func(error_mismatch) + _F_convT64 = jit.Func(convT64) + _F_error_wrap = jit.Func(error_wrap) + _F_error_type = jit.Func(error_type) + _F_error_field = jit.Func(error_field) + _F_error_value = jit.Func(error_value) + _F_error_mismatch = jit.Func(error_mismatch) ) var ( - _I_int8 , _T_int8 = rtype(reflect.TypeOf(int8(0))) - _I_int16 , _T_int16 = rtype(reflect.TypeOf(int16(0))) - _I_int32 , _T_int32 = rtype(reflect.TypeOf(int32(0))) - _I_uint8 , _T_uint8 = rtype(reflect.TypeOf(uint8(0))) - _I_uint16 , _T_uint16 = rtype(reflect.TypeOf(uint16(0))) - _I_uint32 , _T_uint32 = rtype(reflect.TypeOf(uint32(0))) - _I_float32 , _T_float32 = rtype(reflect.TypeOf(float32(0))) + _I_int8, _T_int8 = rtype(reflect.TypeOf(int8(0))) + _I_int16, _T_int16 = rtype(reflect.TypeOf(int16(0))) + _I_int32, _T_int32 = rtype(reflect.TypeOf(int32(0))) + _I_uint8, _T_uint8 = rtype(reflect.TypeOf(uint8(0))) + _I_uint16, _T_uint16 = rtype(reflect.TypeOf(uint16(0))) + _I_uint32, _T_uint32 = rtype(reflect.TypeOf(uint32(0))) + _I_float32, _T_float32 = rtype(reflect.TypeOf(float32(0))) ) var ( - _T_error = rt.UnpackType(errorType) - _I_base64_CorruptInputError = jit.Itab(_T_error, base64CorruptInputError) + _T_error = rt.UnpackType(errorType) + _I_base64_CorruptInputError = jit.Itab(_T_error, base64CorruptInputError) ) var ( - _V_stackOverflow = jit.Imm(int64(uintptr(unsafe.Pointer(&stackOverflow)))) - _I_json_UnsupportedValueError = jit.Itab(_T_error, reflect.TypeOf(new(json.UnsupportedValueError))) - _I_json_MismatchTypeError = jit.Itab(_T_error, reflect.TypeOf(new(MismatchTypeError))) + _V_stackOverflow = jit.Imm(int64(uintptr(unsafe.Pointer(&stackOverflow)))) + _I_json_UnsupportedValueError = jit.Itab(_T_error, reflect.TypeOf(new(json.UnsupportedValueError))) + _I_json_MismatchTypeError = jit.Itab(_T_error, reflect.TypeOf(new(MismatchTypeError))) ) func (self *_Assembler) type_error() { - self.Link(_LB_type_error) // _type_error: - self.call_go(_F_error_type) // CALL_GO error_type - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_type_error) // _type_error: + self.call_go(_F_error_type) // CALL_GO error_type + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) mismatch_error() { - self.Link(_LB_mismatch_error) // _type_error: - self.Emit("MOVQ", _VAR_et, _ET) // MOVQ _VAR_et, ET - self.Emit("MOVQ", _VAR_ic, _EP) // MOVQ _VAR_ic, EP - self.Emit("MOVQ", _I_json_MismatchTypeError, _CX) // MOVQ _I_json_MismatchType, CX - self.Emit("CMPQ", _ET, _CX) // CMPQ ET, CX - self.Sjmp("JE" , _LB_error) // JE _LB_error - self.Emit("MOVQ", _ARG_sp, _AX) - self.Emit("MOVQ", _ARG_sl, _BX) - self.Emit("MOVQ", _VAR_ic, _CX) - self.Emit("MOVQ", _VAR_et, _DI) - self.call_go(_F_error_mismatch) // CALL_GO error_type - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_mismatch_error) // _type_error: + self.Emit("MOVQ", _VAR_et, _ET) // MOVQ _VAR_et, ET + self.Emit("MOVQ", _VAR_ic, _EP) // MOVQ _VAR_ic, EP + self.Emit("MOVQ", _I_json_MismatchTypeError, _CX) // MOVQ _I_json_MismatchType, CX + self.Emit("CMPQ", _ET, _CX) // CMPQ ET, CX + self.Sjmp("JE", _LB_error) // JE _LB_error + self.Emit("MOVQ", _ARG_sp, _AX) + self.Emit("MOVQ", _ARG_sl, _BX) + self.Emit("MOVQ", _VAR_ic, _CX) + self.Emit("MOVQ", _VAR_et, _DI) + self.call_go(_F_error_mismatch) // CALL_GO error_type + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) field_error() { - self.Link(_LB_field_error) // _field_error: - self.Emit("MOVQ", _ARG_sv_p, _AX) // MOVQ sv.p, AX - self.Emit("MOVQ", _ARG_sv_n, _BX) // MOVQ sv.n, BX - self.call_go(_F_error_field) // CALL_GO error_field - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_field_error) // _field_error: + self.Emit("MOVQ", _ARG_sv_p, _AX) // MOVQ sv.p, AX + self.Emit("MOVQ", _ARG_sv_n, _BX) // MOVQ sv.n, BX + self.call_go(_F_error_field) // CALL_GO error_field + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) range_error() { - self.Link(_LB_range_error) // _range_error: - self.Emit("MOVQ", _ET, _CX) // MOVQ ET, CX - self.slice_from(_VAR_st_Ep, 0) // SLICE st.Ep, $0 - self.Emit("MOVQ", _DI, _AX) // MOVQ DI, AX - self.Emit("MOVQ", _EP, _DI) // MOVQ EP, DI - self.Emit("MOVQ", _SI, _BX) // MOVQ SI, BX - self.call_go(_F_error_value) // CALL_GO error_value - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_range_error) // _range_error: + self.Emit("MOVQ", _ET, _CX) // MOVQ ET, CX + self.slice_from(_VAR_st_Ep, 0) // SLICE st.Ep, $0 + self.Emit("MOVQ", _DI, _AX) // MOVQ DI, AX + self.Emit("MOVQ", _EP, _DI) // MOVQ EP, DI + self.Emit("MOVQ", _SI, _BX) // MOVQ SI, BX + self.call_go(_F_error_value) // CALL_GO error_value + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) stack_error() { - self.Link(_LB_stack_error) // _stack_error: - self.Emit("MOVQ", _V_stackOverflow, _EP) // MOVQ ${_V_stackOverflow}, EP - self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ ${_I_json_UnsupportedValueError}, ET - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_stack_error) // _stack_error: + self.Emit("MOVQ", _V_stackOverflow, _EP) // MOVQ ${_V_stackOverflow}, EP + self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ ${_I_json_UnsupportedValueError}, ET + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) base64_error() { - self.Link(_LB_base64_error) - self.Emit("NEGQ", _AX) // NEGQ AX - self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX - self.call_go(_F_convT64) // CALL_GO convT64 - self.Emit("MOVQ", _AX, _EP) // MOVQ AX, EP - self.Emit("MOVQ", _I_base64_CorruptInputError, _ET) // MOVQ ${itab(base64.CorruptInputError)}, ET - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_base64_error) + self.Emit("NEGQ", _AX) // NEGQ AX + self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX + self.call_go(_F_convT64) // CALL_GO convT64 + self.Emit("MOVQ", _AX, _EP) // MOVQ AX, EP + self.Emit("MOVQ", _I_base64_CorruptInputError, _ET) // MOVQ ${itab(base64.CorruptInputError)}, ET + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) parsing_error() { - self.Link(_LB_eof_error) // _eof_error: - self.Emit("MOVQ" , _IL, _IC) // MOVQ IL, IC - self.Emit("MOVL" , jit.Imm(int64(types.ERR_EOF)), _EP) // MOVL ${types.ERR_EOF}, EP - self.Sjmp("JMP" , _LB_parsing_error) // JMP _parsing_error - self.Link(_LB_unquote_error) // _unquote_error: - self.Emit("SUBQ" , _VAR_sr, _SI) // SUBQ sr, SI - self.Emit("SUBQ" , _SI, _IC) // SUBQ IL, IC - self.Link(_LB_parsing_error_v) // _parsing_error_v: - self.Emit("MOVQ" , _AX, _EP) // MOVQ AX, EP - self.Emit("NEGQ" , _EP) // NEGQ EP - self.Sjmp("JMP" , _LB_parsing_error) // JMP _parsing_error - self.Link(_LB_char_m3_error) // _char_m3_error: - self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC - self.Link(_LB_char_m2_error) // _char_m2_error: - self.Emit("SUBQ" , jit.Imm(2), _IC) // SUBQ $2, IC - self.Sjmp("JMP" , _LB_char_0_error) // JMP _char_0_error - self.Link(_LB_im_error) // _im_error: - self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPB CX, (IP)(IC) - self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error - self.Emit("SHRL" , jit.Imm(8), _CX) // SHRL $8, CX - self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 1)) // CMPB CX, 1(IP)(IC) - self.Sjmp("JNE" , _LB_char_1_error) // JNE _char_1_error - self.Emit("SHRL" , jit.Imm(8), _CX) // SHRL $8, CX - self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 2)) // CMPB CX, 2(IP)(IC) - self.Sjmp("JNE" , _LB_char_2_error) // JNE _char_2_error - self.Sjmp("JMP" , _LB_char_3_error) // JNE _char_3_error - self.Link(_LB_char_4_error) // _char_4_error: - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - self.Link(_LB_char_3_error) // _char_3_error: - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - self.Link(_LB_char_2_error) // _char_2_error: - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - self.Link(_LB_char_1_error) // _char_1_error: - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - self.Link(_LB_char_0_error) // _char_0_error: - self.Emit("MOVL" , jit.Imm(int64(types.ERR_INVALID_CHAR)), _EP) // MOVL ${types.ERR_INVALID_CHAR}, EP - self.Link(_LB_parsing_error) // _parsing_error: - self.Emit("MOVQ" , _EP, _DI) // MOVQ EP, DI - self.Emit("MOVQ", _ARG_sp, _AX) // MOVQ sp, AX - self.Emit("MOVQ", _ARG_sl, _BX) // MOVQ sl, BX - self.Emit("MOVQ" , _IC, _CX) // MOVQ IC, CX - self.call_go(_F_error_wrap) // CALL_GO error_wrap - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_eof_error) // _eof_error: + self.Emit("MOVQ", _IL, _IC) // MOVQ IL, IC + self.Emit("MOVL", jit.Imm(int64(types.ERR_EOF)), _EP) // MOVL ${types.ERR_EOF}, EP + self.Sjmp("JMP", _LB_parsing_error) // JMP _parsing_error + self.Link(_LB_unquote_error) // _unquote_error: + self.Emit("SUBQ", _VAR_sr, _SI) // SUBQ sr, SI + self.Emit("SUBQ", _SI, _IC) // SUBQ IL, IC + self.Link(_LB_parsing_error_v) // _parsing_error_v: + self.Emit("MOVQ", _AX, _EP) // MOVQ AX, EP + self.Emit("NEGQ", _EP) // NEGQ EP + self.Sjmp("JMP", _LB_parsing_error) // JMP _parsing_error + self.Link(_LB_char_m3_error) // _char_m3_error: + self.Emit("SUBQ", jit.Imm(1), _IC) // SUBQ $1, IC + self.Link(_LB_char_m2_error) // _char_m2_error: + self.Emit("SUBQ", jit.Imm(2), _IC) // SUBQ $2, IC + self.Sjmp("JMP", _LB_char_0_error) // JMP _char_0_error + self.Link(_LB_im_error) // _im_error: + self.Emit("CMPB", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPB CX, (IP)(IC) + self.Sjmp("JNE", _LB_char_0_error) // JNE _char_0_error + self.Emit("SHRL", jit.Imm(8), _CX) // SHRL $8, CX + self.Emit("CMPB", _CX, jit.Sib(_IP, _IC, 1, 1)) // CMPB CX, 1(IP)(IC) + self.Sjmp("JNE", _LB_char_1_error) // JNE _char_1_error + self.Emit("SHRL", jit.Imm(8), _CX) // SHRL $8, CX + self.Emit("CMPB", _CX, jit.Sib(_IP, _IC, 1, 2)) // CMPB CX, 2(IP)(IC) + self.Sjmp("JNE", _LB_char_2_error) // JNE _char_2_error + self.Sjmp("JMP", _LB_char_3_error) // JNE _char_3_error + self.Link(_LB_char_4_error) // _char_4_error: + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_3_error) // _char_3_error: + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_2_error) // _char_2_error: + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_1_error) // _char_1_error: + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_0_error) // _char_0_error: + self.Emit("MOVL", jit.Imm(int64(types.ERR_INVALID_CHAR)), _EP) // MOVL ${types.ERR_INVALID_CHAR}, EP + self.Link(_LB_parsing_error) // _parsing_error: + self.Emit("MOVQ", _EP, _DI) // MOVQ EP, DI + self.Emit("MOVQ", _ARG_sp, _AX) // MOVQ sp, AX + self.Emit("MOVQ", _ARG_sl, _BX) // MOVQ sl, BX + self.Emit("MOVQ", _IC, _CX) // MOVQ IC, CX + self.call_go(_F_error_wrap) // CALL_GO error_wrap + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) _asm_OP_dismatch_err(p *_Instr) { - self.Emit("MOVQ", _IC, _VAR_ic) - self.Emit("MOVQ", jit.Type(p.vt()), _ET) - self.Emit("MOVQ", _ET, _VAR_et) + self.Emit("MOVQ", _IC, _VAR_ic) + self.Emit("MOVQ", jit.Type(p.vt()), _ET) + self.Emit("MOVQ", _ET, _VAR_et) } func (self *_Assembler) _asm_OP_go_skip(p *_Instr) { - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Xref(p.vi(), 4) - // self.Byte(0xcc) - self.Emit("MOVQ", _R9, _VAR_pc) - self.Sjmp("JMP" , _LB_skip_one) // JMP _skip_one + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Xref(p.vi(), 4) + // self.Byte(0xcc) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP", _LB_skip_one) // JMP _skip_one } func (self *_Assembler) skip_one() { - self.Link(_LB_skip_one) // _skip: - self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC - self.call_sf(_F_skip_one) // CALL_SF skip_one - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v - self.Emit("MOVQ" , _VAR_pc, _R9) // MOVQ pc, R9 - // self.Byte(0xcc) - self.Rjmp("JMP" , _R9) // JMP (R9) + self.Link(_LB_skip_one) // _skip: + self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v + self.Emit("MOVQ", _VAR_pc, _R9) // MOVQ pc, R9 + // self.Byte(0xcc) + self.Rjmp("JMP", _R9) // JMP (R9) } func (self *_Assembler) skip_key_value() { - self.Link(_LB_skip_key_value) // _skip: - // skip the key - self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC - self.call_sf(_F_skip_one) // CALL_SF skip_one - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v - // match char ':' - self.lspace("_global_1") - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(':')) - self.Sjmp("JNE" , _LB_parsing_error_v) // JNE _parse_error_v - self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC - self.lspace("_global_2") - // skip the value - self.call_sf(_F_skip_one) // CALL_SF skip_one - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v - // jump back to specified address - self.Emit("MOVQ" , _VAR_pc, _R9) // MOVQ pc, R9 - self.Rjmp("JMP" , _R9) // JMP (R9) + self.Link(_LB_skip_key_value) // _skip: + // skip the key + self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v + // match char ':' + self.lspace("_global_1") + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(':')) + self.Sjmp("JNE", _LB_parsing_error_v) // JNE _parse_error_v + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.lspace("_global_2") + // skip the value + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v + // jump back to specified address + self.Emit("MOVQ", _VAR_pc, _R9) // MOVQ pc, R9 + self.Rjmp("JMP", _R9) // JMP (R9) } - /** Memory Management Routines **/ var ( - _T_byte = jit.Type(byteType) - _F_mallocgc = jit.Func(mallocgc) + _T_byte = jit.Type(byteType) + _F_mallocgc = jit.Func(mallocgc) ) func (self *_Assembler) malloc_AX(nb obj.Addr, ret obj.Addr) { - self.Emit("MOVQ", nb, _AX) // MOVQ ${nb}, AX - self.Emit("MOVQ", _T_byte, _BX) // MOVQ ${type(byte)}, BX - self.Emit("XORL", _CX, _CX) // XORL CX, CX - self.call_go(_F_mallocgc) // CALL_GO mallocgc - self.Emit("MOVQ", _AX, ret) // MOVQ AX, ${ret} + self.Emit("MOVQ", nb, _AX) // MOVQ ${nb}, AX + self.Emit("MOVQ", _T_byte, _BX) // MOVQ ${type(byte)}, BX + self.Emit("XORL", _CX, _CX) // XORL CX, CX + self.call_go(_F_mallocgc) // CALL_GO mallocgc + self.Emit("MOVQ", _AX, ret) // MOVQ AX, ${ret} } func (self *_Assembler) valloc(vt reflect.Type, ret obj.Addr) { - self.Emit("MOVQ", jit.Imm(int64(vt.Size())), _AX) // MOVQ ${vt.Size()}, AX - self.Emit("MOVQ", jit.Type(vt), _BX) // MOVQ ${vt}, BX - self.Emit("MOVB", jit.Imm(1), _CX) // MOVB $1, CX - self.call_go(_F_mallocgc) // CALL_GO mallocgc - self.Emit("MOVQ", _AX, ret) // MOVQ AX, ${ret} + self.Emit("MOVQ", jit.Imm(int64(vt.Size())), _AX) // MOVQ ${vt.Size()}, AX + self.Emit("MOVQ", jit.Type(vt), _BX) // MOVQ ${vt}, BX + self.Emit("MOVB", jit.Imm(1), _CX) // MOVB $1, CX + self.call_go(_F_mallocgc) // CALL_GO mallocgc + self.Emit("MOVQ", _AX, ret) // MOVQ AX, ${ret} } func (self *_Assembler) valloc_AX(vt reflect.Type) { - self.Emit("MOVQ", jit.Imm(int64(vt.Size())), _AX) // MOVQ ${vt.Size()}, AX - self.Emit("MOVQ", jit.Type(vt), _BX) // MOVQ ${vt}, BX - self.Emit("MOVB", jit.Imm(1), _CX) // MOVB $1, CX - self.call_go(_F_mallocgc) // CALL_GO mallocgc + self.Emit("MOVQ", jit.Imm(int64(vt.Size())), _AX) // MOVQ ${vt.Size()}, AX + self.Emit("MOVQ", jit.Type(vt), _BX) // MOVQ ${vt}, BX + self.Emit("MOVB", jit.Imm(1), _CX) // MOVB $1, CX + self.call_go(_F_mallocgc) // CALL_GO mallocgc } func (self *_Assembler) vfollow(vt reflect.Type) { - self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n} - self.valloc_AX(vt) // VALLOC ${vt}, AX - self.WritePtrAX(1, jit.Ptr(_VP, 0), true) // MOVQ AX, (VP) - self.Link("_end_{n}") // _end_{n}: - self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP + self.Emit("MOVQ", jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ", "_end_{n}") // JNZ _end_{n} + self.valloc_AX(vt) // VALLOC ${vt}, AX + self.WritePtrAX(1, jit.Ptr(_VP, 0), true) // MOVQ AX, (VP) + self.Link("_end_{n}") // _end_{n}: + self.Emit("MOVQ", _AX, _VP) // MOVQ AX, VP } /** Value Parsing Routines **/ var ( - _F_vstring = jit.Imm(int64(native.S_vstring)) - _F_vnumber = jit.Imm(int64(native.S_vnumber)) - _F_vsigned = jit.Imm(int64(native.S_vsigned)) - _F_vunsigned = jit.Imm(int64(native.S_vunsigned)) + _F_vstring = jit.Imm(int64(native.S_vstring)) + _F_vnumber = jit.Imm(int64(native.S_vnumber)) + _F_vsigned = jit.Imm(int64(native.S_vsigned)) + _F_vunsigned = jit.Imm(int64(native.S_vunsigned)) ) func (self *_Assembler) check_err(vt reflect.Type, pin string, pin2 int) { - self.Emit("MOVQ" , _VAR_st_Vt, _AX) // MOVQ st.Vt, AX - self.Emit("TESTQ", _AX, _AX) // CMPQ AX, ${native.V_STRING} - // try to skip the value - if vt != nil { - self.Sjmp("JNS" , "_check_err_{n}") // JNE _parsing_error_v - self.Emit("MOVQ", jit.Type(vt), _ET) - self.Emit("MOVQ", _ET, _VAR_et) - if pin2 != -1 { - self.Emit("SUBQ", jit.Imm(1), _BX) - self.Emit("MOVQ", _BX, _VAR_ic) - self.Byte(0x4c , 0x8d, 0x0d) // LEAQ (PC), R9 - self.Xref(pin2, 4) - self.Emit("MOVQ", _R9, _VAR_pc) - self.Sjmp("JMP" , _LB_skip_key_value) - } else { - self.Emit("MOVQ", _BX, _VAR_ic) - self.Byte(0x4c , 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref(pin, 4) - self.Emit("MOVQ", _R9, _VAR_pc) - self.Sjmp("JMP" , _LB_skip_one) - } - self.Link("_check_err_{n}") - } else { - self.Sjmp("JS" , _LB_parsing_error_v) // JNE _parsing_error_v - } + self.Emit("MOVQ", _VAR_st_Vt, _AX) // MOVQ st.Vt, AX + self.Emit("TESTQ", _AX, _AX) // CMPQ AX, ${native.V_STRING} + // try to skip the value + if vt != nil { + self.Sjmp("JNS", "_check_err_{n}") // JNE _parsing_error_v + self.Emit("MOVQ", jit.Type(vt), _ET) + self.Emit("MOVQ", _ET, _VAR_et) + if pin2 != -1 { + self.Emit("SUBQ", jit.Imm(1), _BX) + self.Emit("MOVQ", _BX, _VAR_ic) + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Xref(pin2, 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP", _LB_skip_key_value) + } else { + self.Emit("MOVQ", _BX, _VAR_ic) + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref(pin, 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP", _LB_skip_one) + } + self.Link("_check_err_{n}") + } else { + self.Sjmp("JS", _LB_parsing_error_v) // JNE _parsing_error_v + } } func (self *_Assembler) check_eof(d int64) { - if d == 1 { - self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL - self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error - } else { - self.Emit("LEAQ", jit.Ptr(_IC, d), _AX) // LEAQ ${d}(IC), AX - self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL - self.Sjmp("JA" , _LB_eof_error) // JA _eof_error - } + if d == 1 { + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE", _LB_eof_error) // JAE _eof_error + } else { + self.Emit("LEAQ", jit.Ptr(_IC, d), _AX) // LEAQ ${d}(IC), AX + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA", _LB_eof_error) // JA _eof_error + } } - func (self *_Assembler) parse_string() { - self.Emit("MOVQ", _ARG_fv, _CX) - self.call_vf(_F_vstring) - self.check_err(nil, "", -1) + self.Emit("MOVQ", _ARG_fv, _CX) + self.call_vf(_F_vstring) + self.check_err(nil, "", -1) } func (self *_Assembler) parse_number(vt reflect.Type, pin string, pin2 int) { - self.Emit("MOVQ", _IC, _BX) // save ic when call native func - self.call_vf(_F_vnumber) - self.check_err(vt, pin, pin2) + self.Emit("MOVQ", _IC, _BX) // save ic when call native func + self.call_vf(_F_vnumber) + self.check_err(vt, pin, pin2) } func (self *_Assembler) parse_signed(vt reflect.Type, pin string, pin2 int) { - self.Emit("MOVQ", _IC, _BX) // save ic when call native func - self.call_vf(_F_vsigned) - self.check_err(vt, pin, pin2) + self.Emit("MOVQ", _IC, _BX) // save ic when call native func + self.call_vf(_F_vsigned) + self.check_err(vt, pin, pin2) } func (self *_Assembler) parse_unsigned(vt reflect.Type, pin string, pin2 int) { - self.Emit("MOVQ", _IC, _BX) // save ic when call native func - self.call_vf(_F_vunsigned) - self.check_err(vt, pin, pin2) + self.Emit("MOVQ", _IC, _BX) // save ic when call native func + self.call_vf(_F_vunsigned) + self.check_err(vt, pin, pin2) } -// Pointer: DI, Size: SI, Return: R9 +// Pointer: DI, Size: SI, Return: R9 func (self *_Assembler) copy_string() { - self.Link("_copy_string") - self.Emit("MOVQ", _DI, _VAR_bs_p) - self.Emit("MOVQ", _SI, _VAR_bs_n) - self.Emit("MOVQ", _R9, _VAR_bs_LR) - self.malloc_AX(_SI, _ARG_sv_p) - self.Emit("MOVQ", _VAR_bs_p, _BX) - self.Emit("MOVQ", _VAR_bs_n, _CX) - self.call_go(_F_memmove) - self.Emit("MOVQ", _ARG_sv_p, _DI) - self.Emit("MOVQ", _VAR_bs_n, _SI) - self.Emit("MOVQ", _VAR_bs_LR, _R9) - self.Rjmp("JMP", _R9) + self.Link("_copy_string") + self.Emit("MOVQ", _DI, _VAR_bs_p) + self.Emit("MOVQ", _SI, _VAR_bs_n) + self.Emit("MOVQ", _R9, _VAR_bs_LR) + self.malloc_AX(_SI, _ARG_sv_p) + self.Emit("MOVQ", _VAR_bs_p, _BX) + self.Emit("MOVQ", _VAR_bs_n, _CX) + self.call_go(_F_memmove) + self.Emit("MOVQ", _ARG_sv_p, _DI) + self.Emit("MOVQ", _VAR_bs_n, _SI) + self.Emit("MOVQ", _VAR_bs_LR, _R9) + self.Rjmp("JMP", _R9) } // Pointer: DI, Size: SI, Return: R9 func (self *_Assembler) escape_string() { - self.Link("_escape_string") - self.Emit("MOVQ" , _DI, _VAR_bs_p) - self.Emit("MOVQ" , _SI, _VAR_bs_n) - self.Emit("MOVQ" , _R9, _VAR_bs_LR) - self.malloc_AX(_SI, _DX) // MALLOC SI, DX - self.Emit("MOVQ" , _DX, _ARG_sv_p) - self.Emit("MOVQ" , _VAR_bs_p, _DI) - self.Emit("MOVQ" , _VAR_bs_n, _SI) - self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX - self.Emit("XORL" , _R8, _R8) // XORL R8, R8 - self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, fv - self.Emit("SETCC", _R8) // SETCC R8 - self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8 - self.call_c(_F_unquote) // CALL unquote - self.Emit("MOVQ" , _VAR_bs_n, _SI) // MOVQ ${n}, SI - self.Emit("ADDQ" , jit.Imm(1), _SI) // ADDQ $1, SI - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_unquote_error) // JS _unquote_error - self.Emit("MOVQ" , _AX, _SI) - self.Emit("MOVQ" , _ARG_sv_p, _DI) - self.Emit("MOVQ" , _VAR_bs_LR, _R9) - self.Rjmp("JMP", _R9) + self.Link("_escape_string") + self.Emit("MOVQ", _DI, _VAR_bs_p) + self.Emit("MOVQ", _SI, _VAR_bs_n) + self.Emit("MOVQ", _R9, _VAR_bs_LR) + self.malloc_AX(_SI, _DX) // MALLOC SI, DX + self.Emit("MOVQ", _DX, _ARG_sv_p) + self.Emit("MOVQ", _VAR_bs_p, _DI) + self.Emit("MOVQ", _VAR_bs_n, _SI) + self.Emit("LEAQ", _VAR_sr, _CX) // LEAQ sr, CX + self.Emit("XORL", _R8, _R8) // XORL R8, R8 + self.Emit("BTQ", jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, fv + self.Emit("SETCC", _R8) // SETCC R8 + self.Emit("SHLQ", jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8 + self.call_c(_F_unquote) // CALL unquote + self.Emit("MOVQ", _VAR_bs_n, _SI) // MOVQ ${n}, SI + self.Emit("ADDQ", jit.Imm(1), _SI) // ADDQ $1, SI + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_unquote_error) // JS _unquote_error + self.Emit("MOVQ", _AX, _SI) + self.Emit("MOVQ", _ARG_sv_p, _DI) + self.Emit("MOVQ", _VAR_bs_LR, _R9) + self.Rjmp("JMP", _R9) } func (self *_Assembler) escape_string_twice() { - self.Link("_escape_string_twice") - self.Emit("MOVQ" , _DI, _VAR_bs_p) - self.Emit("MOVQ" , _SI, _VAR_bs_n) - self.Emit("MOVQ" , _R9, _VAR_bs_LR) - self.malloc_AX(_SI, _DX) // MALLOC SI, DX - self.Emit("MOVQ" , _DX, _ARG_sv_p) - self.Emit("MOVQ" , _VAR_bs_p, _DI) - self.Emit("MOVQ" , _VAR_bs_n, _SI) - self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX - self.Emit("MOVL" , jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8 - self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, AX - self.Emit("XORL" , _AX, _AX) // XORL AX, AX - self.Emit("SETCC", _AX) // SETCC AX - self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _AX) // SHLQ ${types.B_UNICODE_REPLACE}, AX - self.Emit("ORQ" , _AX, _R8) // ORQ AX, R8 - self.call_c(_F_unquote) // CALL unquote - self.Emit("MOVQ" , _VAR_bs_n, _SI) // MOVQ ${n}, SI - self.Emit("ADDQ" , jit.Imm(3), _SI) // ADDQ $3, SI - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_unquote_error) // JS _unquote_error - self.Emit("MOVQ" , _AX, _SI) - self.Emit("MOVQ" , _ARG_sv_p, _DI) - self.Emit("MOVQ" , _VAR_bs_LR, _R9) - self.Rjmp("JMP", _R9) + self.Link("_escape_string_twice") + self.Emit("MOVQ", _DI, _VAR_bs_p) + self.Emit("MOVQ", _SI, _VAR_bs_n) + self.Emit("MOVQ", _R9, _VAR_bs_LR) + self.malloc_AX(_SI, _DX) // MALLOC SI, DX + self.Emit("MOVQ", _DX, _ARG_sv_p) + self.Emit("MOVQ", _VAR_bs_p, _DI) + self.Emit("MOVQ", _VAR_bs_n, _SI) + self.Emit("LEAQ", _VAR_sr, _CX) // LEAQ sr, CX + self.Emit("MOVL", jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8 + self.Emit("BTQ", jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, AX + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("SETCC", _AX) // SETCC AX + self.Emit("SHLQ", jit.Imm(types.B_UNICODE_REPLACE), _AX) // SHLQ ${types.B_UNICODE_REPLACE}, AX + self.Emit("ORQ", _AX, _R8) // ORQ AX, R8 + self.call_c(_F_unquote) // CALL unquote + self.Emit("MOVQ", _VAR_bs_n, _SI) // MOVQ ${n}, SI + self.Emit("ADDQ", jit.Imm(3), _SI) // ADDQ $3, SI + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_unquote_error) // JS _unquote_error + self.Emit("MOVQ", _AX, _SI) + self.Emit("MOVQ", _ARG_sv_p, _DI) + self.Emit("MOVQ", _VAR_bs_LR, _R9) + self.Rjmp("JMP", _R9) } /** Range Checking Routines **/ var ( - _V_max_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_max_f32)))) - _V_min_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_min_f32)))) + _V_max_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_max_f32)))) + _V_min_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_min_f32)))) ) var ( - _Vp_max_f32 = new(float64) - _Vp_min_f32 = new(float64) + _Vp_max_f32 = new(float64) + _Vp_min_f32 = new(float64) ) func init() { - *_Vp_max_f32 = math.MaxFloat32 - *_Vp_min_f32 = -math.MaxFloat32 + *_Vp_max_f32 = math.MaxFloat32 + *_Vp_min_f32 = -math.MaxFloat32 } func (self *_Assembler) range_single_X0() { - self.Emit("MOVSD" , _VAR_st_Dv, _X0) // MOVSD st.Dv, X0 - self.Emit("MOVQ" , _V_max_f32, _CX) // MOVQ _max_f32, CX - self.Emit("MOVQ" , jit.Gitab(_I_float32), _ET) // MOVQ ${itab(float32)}, ET - self.Emit("MOVQ" , jit.Gtype(_T_float32), _EP) // MOVQ ${type(float32)}, EP - self.Emit("UCOMISD" , jit.Ptr(_CX, 0), _X0) // UCOMISD (CX), X0 - self.Sjmp("JA" , _LB_range_error) // JA _range_error - self.Emit("MOVQ" , _V_min_f32, _CX) // MOVQ _min_f32, CX - self.Emit("MOVSD" , jit.Ptr(_CX, 0), _X1) // MOVSD (CX), X1 - self.Emit("UCOMISD" , _X0, _X1) // UCOMISD X0, X1 - self.Sjmp("JA" , _LB_range_error) // JA _range_error - self.Emit("CVTSD2SS", _X0, _X0) // CVTSD2SS X0, X0 + self.Emit("MOVSD", _VAR_st_Dv, _X0) // MOVSD st.Dv, X0 + self.Emit("MOVQ", _V_max_f32, _CX) // MOVQ _max_f32, CX + self.Emit("MOVQ", jit.Gitab(_I_float32), _ET) // MOVQ ${itab(float32)}, ET + self.Emit("MOVQ", jit.Gtype(_T_float32), _EP) // MOVQ ${type(float32)}, EP + self.Emit("UCOMISD", jit.Ptr(_CX, 0), _X0) // UCOMISD (CX), X0 + self.Sjmp("JA", _LB_range_error) // JA _range_error + self.Emit("MOVQ", _V_min_f32, _CX) // MOVQ _min_f32, CX + self.Emit("MOVSD", jit.Ptr(_CX, 0), _X1) // MOVSD (CX), X1 + self.Emit("UCOMISD", _X0, _X1) // UCOMISD X0, X1 + self.Sjmp("JA", _LB_range_error) // JA _range_error + self.Emit("CVTSD2SS", _X0, _X0) // CVTSD2SS X0, X0 } func (self *_Assembler) range_signed_CX(i *rt.GoItab, t *rt.GoType, a int64, b int64) { - self.Emit("MOVQ", _VAR_st_Iv, _CX) // MOVQ st.Iv, CX - self.Emit("MOVQ", jit.Gitab(i), _ET) // MOVQ ${i}, ET - self.Emit("MOVQ", jit.Gtype(t), _EP) // MOVQ ${t}, EP - self.Emit("CMPQ", _CX, jit.Imm(a)) // CMPQ CX, ${a} - self.Sjmp("JL" , _LB_range_error) // JL _range_error - self.Emit("CMPQ", _CX, jit.Imm(b)) // CMPQ CX, ${B} - self.Sjmp("JG" , _LB_range_error) // JG _range_error + self.Emit("MOVQ", _VAR_st_Iv, _CX) // MOVQ st.Iv, CX + self.Emit("MOVQ", jit.Gitab(i), _ET) // MOVQ ${i}, ET + self.Emit("MOVQ", jit.Gtype(t), _EP) // MOVQ ${t}, EP + self.Emit("CMPQ", _CX, jit.Imm(a)) // CMPQ CX, ${a} + self.Sjmp("JL", _LB_range_error) // JL _range_error + self.Emit("CMPQ", _CX, jit.Imm(b)) // CMPQ CX, ${B} + self.Sjmp("JG", _LB_range_error) // JG _range_error } func (self *_Assembler) range_unsigned_CX(i *rt.GoItab, t *rt.GoType, v uint64) { - self.Emit("MOVQ" , _VAR_st_Iv, _CX) // MOVQ st.Iv, CX - self.Emit("MOVQ" , jit.Gitab(i), _ET) // MOVQ ${i}, ET - self.Emit("MOVQ" , jit.Gtype(t), _EP) // MOVQ ${t}, EP - self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX - self.Sjmp("JS" , _LB_range_error) // JS _range_error - self.Emit("CMPQ" , _CX, jit.Imm(int64(v))) // CMPQ CX, ${a} - self.Sjmp("JA" , _LB_range_error) // JA _range_error + self.Emit("MOVQ", _VAR_st_Iv, _CX) // MOVQ st.Iv, CX + self.Emit("MOVQ", jit.Gitab(i), _ET) // MOVQ ${i}, ET + self.Emit("MOVQ", jit.Gtype(t), _EP) // MOVQ ${t}, EP + self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX + self.Sjmp("JS", _LB_range_error) // JS _range_error + self.Emit("CMPQ", _CX, jit.Imm(int64(v))) // CMPQ CX, ${a} + self.Sjmp("JA", _LB_range_error) // JA _range_error } /** String Manipulating Routines **/ var ( - _F_unquote = jit.Imm(int64(native.S_unquote)) + _F_unquote = jit.Imm(int64(native.S_unquote)) ) func (self *_Assembler) slice_from(p obj.Addr, d int64) { - self.Emit("MOVQ", p, _SI) // MOVQ ${p}, SI - self.slice_from_r(_SI, d) // SLICE_R SI, ${d} + self.Emit("MOVQ", p, _SI) // MOVQ ${p}, SI + self.slice_from_r(_SI, d) // SLICE_R SI, ${d} } func (self *_Assembler) slice_from_r(p obj.Addr, d int64) { - self.Emit("LEAQ", jit.Sib(_IP, p, 1, 0), _DI) // LEAQ (IP)(${p}), DI - self.Emit("NEGQ", p) // NEGQ ${p} - self.Emit("LEAQ", jit.Sib(_IC, p, 1, d), _SI) // LEAQ d(IC)(${p}), SI + self.Emit("LEAQ", jit.Sib(_IP, p, 1, 0), _DI) // LEAQ (IP)(${p}), DI + self.Emit("NEGQ", p) // NEGQ ${p} + self.Emit("LEAQ", jit.Sib(_IC, p, 1, d), _SI) // LEAQ d(IC)(${p}), SI } func (self *_Assembler) unquote_once(p obj.Addr, n obj.Addr, stack bool, copy bool) { - self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1 - self.Emit("CMPQ", _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1 - self.Sjmp("JE" , "_noescape_{n}") // JE _escape_{n} - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref("_unquote_once_write_{n}", 4) - self.Sjmp("JMP" , "_escape_string") - self.Link("_noescape_{n}") - if copy { - self.Emit("BTQ" , jit.Imm(_F_copy_string), _ARG_fv) - self.Sjmp("JNC", "_unquote_once_write_{n}") - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref("_unquote_once_write_{n}", 4) - self.Sjmp("JMP", "_copy_string") - } - self.Link("_unquote_once_write_{n}") - self.Emit("MOVQ", _SI, n) // MOVQ SI, ${n} - if stack { - self.Emit("MOVQ", _DI, p) - } else { - self.WriteRecNotAX(10, _DI, p, false, false) - } + self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1 + self.Emit("CMPQ", _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1 + self.Sjmp("JE", "_noescape_{n}") // JE _escape_{n} + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_once_write_{n}", 4) + self.Sjmp("JMP", "_escape_string") + self.Link("_noescape_{n}") + if copy { + self.Emit("BTQ", jit.Imm(_F_copy_string), _ARG_fv) + self.Sjmp("JNC", "_unquote_once_write_{n}") + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_once_write_{n}", 4) + self.Sjmp("JMP", "_copy_string") + } + self.Link("_unquote_once_write_{n}") + self.Emit("MOVQ", _SI, n) // MOVQ SI, ${n} + if stack { + self.Emit("MOVQ", _DI, p) + } else { + self.WriteRecNotAX(10, _DI, p, false, false) + } } func (self *_Assembler) unquote_twice(p obj.Addr, n obj.Addr, stack bool) { - self.Emit("CMPQ" , _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1 - self.Sjmp("JE" , _LB_eof_error) // JE _eof_error - self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, -3), jit.Imm('\\')) // CMPB -3(IP)(IC), $'\\' - self.Sjmp("JNE" , _LB_char_m3_error) // JNE _char_m3_error - self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, -2), jit.Imm('"')) // CMPB -2(IP)(IC), $'"' - self.Sjmp("JNE" , _LB_char_m2_error) // JNE _char_m2_error - self.slice_from(_VAR_st_Iv, -3) // SLICE st.Iv, $-3 - self.Emit("MOVQ" , _SI, _AX) // MOVQ SI, AX - self.Emit("ADDQ" , _VAR_st_Iv, _AX) // ADDQ st.Iv, AX - self.Emit("CMPQ" , _VAR_st_Ep, _AX) // CMPQ st.Ep, AX - self.Sjmp("JE" , "_noescape_{n}") // JE _noescape_{n} - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref("_unquote_twice_write_{n}", 4) - self.Sjmp("JMP" , "_escape_string_twice") - self.Link("_noescape_{n}") // _noescape_{n}: - self.Emit("BTQ" , jit.Imm(_F_copy_string), _ARG_fv) - self.Sjmp("JNC", "_unquote_twice_write_{n}") - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref("_unquote_twice_write_{n}", 4) - self.Sjmp("JMP", "_copy_string") - self.Link("_unquote_twice_write_{n}") - self.Emit("MOVQ" , _SI, n) // MOVQ SI, ${n} - if stack { - self.Emit("MOVQ", _DI, p) - } else { - self.WriteRecNotAX(12, _DI, p, false, false) - } - self.Link("_unquote_twice_end_{n}") + self.Emit("CMPQ", _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1 + self.Sjmp("JE", _LB_eof_error) // JE _eof_error + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, -3), jit.Imm('\\')) // CMPB -3(IP)(IC), $'\\' + self.Sjmp("JNE", _LB_char_m3_error) // JNE _char_m3_error + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, -2), jit.Imm('"')) // CMPB -2(IP)(IC), $'"' + self.Sjmp("JNE", _LB_char_m2_error) // JNE _char_m2_error + self.slice_from(_VAR_st_Iv, -3) // SLICE st.Iv, $-3 + self.Emit("MOVQ", _SI, _AX) // MOVQ SI, AX + self.Emit("ADDQ", _VAR_st_Iv, _AX) // ADDQ st.Iv, AX + self.Emit("CMPQ", _VAR_st_Ep, _AX) // CMPQ st.Ep, AX + self.Sjmp("JE", "_noescape_{n}") // JE _noescape_{n} + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_twice_write_{n}", 4) + self.Sjmp("JMP", "_escape_string_twice") + self.Link("_noescape_{n}") // _noescape_{n}: + self.Emit("BTQ", jit.Imm(_F_copy_string), _ARG_fv) + self.Sjmp("JNC", "_unquote_twice_write_{n}") + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_twice_write_{n}", 4) + self.Sjmp("JMP", "_copy_string") + self.Link("_unquote_twice_write_{n}") + self.Emit("MOVQ", _SI, n) // MOVQ SI, ${n} + if stack { + self.Emit("MOVQ", _DI, p) + } else { + self.WriteRecNotAX(12, _DI, p, false, false) + } + self.Link("_unquote_twice_end_{n}") } /** Memory Clearing Routines **/ var ( - _F_memclrHasPointers = jit.Func(memclrHasPointers) - _F_memclrNoHeapPointers = jit.Func(memclrNoHeapPointers) + _F_memclrHasPointers = jit.Func(memclrHasPointers) + _F_memclrNoHeapPointers = jit.Func(memclrNoHeapPointers) ) func (self *_Assembler) mem_clear_fn(ptrfree bool) { - if !ptrfree { - self.call_go(_F_memclrHasPointers) - } else { - self.call_go(_F_memclrNoHeapPointers) - } + if !ptrfree { + self.call_go(_F_memclrHasPointers) + } else { + self.call_go(_F_memclrNoHeapPointers) + } } func (self *_Assembler) mem_clear_rem(size int64, ptrfree bool) { - self.Emit("MOVQ", jit.Imm(size), _BX) // MOVQ ${size}, BX - self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX - self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _AX) // MOVQ (ST)(AX), AX - self.Emit("SUBQ", _VP, _AX) // SUBQ VP, AX - self.Emit("ADDQ", _AX, _BX) // ADDQ AX, BX - self.Emit("MOVQ", _VP, _AX) // MOVQ VP, (SP) - self.mem_clear_fn(ptrfree) // CALL_GO memclr{Has,NoHeap}Pointers + self.Emit("MOVQ", jit.Imm(size), _BX) // MOVQ ${size}, BX + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _AX) // MOVQ (ST)(AX), AX + self.Emit("SUBQ", _VP, _AX) // SUBQ VP, AX + self.Emit("ADDQ", _AX, _BX) // ADDQ AX, BX + self.Emit("MOVQ", _VP, _AX) // MOVQ VP, (SP) + self.mem_clear_fn(ptrfree) // CALL_GO memclr{Has,NoHeap}Pointers } /** Map Assigning Routines **/ var ( - _F_mapassign = jit.Func(mapassign) - _F_mapassign_fast32 = jit.Func(mapassign_fast32) - _F_mapassign_faststr = jit.Func(mapassign_faststr) - _F_mapassign_fast64ptr = jit.Func(mapassign_fast64ptr) + _F_mapassign = jit.Func(mapassign) + _F_mapassign_fast32 = jit.Func(mapassign_fast32) + _F_mapassign_faststr = jit.Func(mapassign_faststr) + _F_mapassign_fast64ptr = jit.Func(mapassign_fast64ptr) ) var ( - _F_decodeJsonUnmarshaler obj.Addr - _F_decodeTextUnmarshaler obj.Addr + _F_decodeJsonUnmarshaler obj.Addr + _F_decodeTextUnmarshaler obj.Addr ) func init() { - _F_decodeJsonUnmarshaler = jit.Func(decodeJsonUnmarshaler) - _F_decodeTextUnmarshaler = jit.Func(decodeTextUnmarshaler) + _F_decodeJsonUnmarshaler = jit.Func(decodeJsonUnmarshaler) + _F_decodeTextUnmarshaler = jit.Func(decodeTextUnmarshaler) } func (self *_Assembler) mapaccess_ptr(t reflect.Type) { - if rt.MapType(rt.UnpackType(t)).IndirectElem() { - self.vfollow(t.Elem()) - } + if rt.MapType(rt.UnpackType(t)).IndirectElem() { + self.vfollow(t.Elem()) + } } func (self *_Assembler) mapassign_std(t reflect.Type, v obj.Addr) { - self.Emit("LEAQ", v, _AX) // LEAQ ${v}, AX - self.mapassign_call_from_AX(t, _F_mapassign) // MAPASSIGN ${t}, mapassign + self.Emit("LEAQ", v, _AX) // LEAQ ${v}, AX + self.mapassign_call_from_AX(t, _F_mapassign) // MAPASSIGN ${t}, mapassign } func (self *_Assembler) mapassign_str_fast(t reflect.Type, p obj.Addr, n obj.Addr) { - self.Emit("MOVQ", jit.Type(t), _AX) // MOVQ ${t}, AX - self.Emit("MOVQ", _VP, _BX) // MOVQ VP, BX - self.Emit("MOVQ", p, _CX) // MOVQ ${p}, CX - self.Emit("MOVQ", n, _DI) // MOVQ ${n}, DI - self.call_go(_F_mapassign_faststr) // CALL_GO ${fn} - self.Emit("MOVQ", _AX, _VP) // MOVQ AX, VP - self.mapaccess_ptr(t) + self.Emit("MOVQ", jit.Type(t), _AX) // MOVQ ${t}, AX + self.Emit("MOVQ", _VP, _BX) // MOVQ VP, BX + self.Emit("MOVQ", p, _CX) // MOVQ ${p}, CX + self.Emit("MOVQ", n, _DI) // MOVQ ${n}, DI + self.call_go(_F_mapassign_faststr) // CALL_GO ${fn} + self.Emit("MOVQ", _AX, _VP) // MOVQ AX, VP + self.mapaccess_ptr(t) } func (self *_Assembler) mapassign_call_from_AX(t reflect.Type, fn obj.Addr) { - self.Emit("MOVQ", _AX, _CX) - self.Emit("MOVQ", jit.Type(t), _AX) // MOVQ ${t}, AX - self.Emit("MOVQ", _VP, _BX) // MOVQ VP, _BX - self.call_go(fn) // CALL_GO ${fn} - self.Emit("MOVQ", _AX, _VP) // MOVQ AX, VP + self.Emit("MOVQ", _AX, _CX) + self.Emit("MOVQ", jit.Type(t), _AX) // MOVQ ${t}, AX + self.Emit("MOVQ", _VP, _BX) // MOVQ VP, _BX + self.call_go(fn) // CALL_GO ${fn} + self.Emit("MOVQ", _AX, _VP) // MOVQ AX, VP } func (self *_Assembler) mapassign_fastx(t reflect.Type, fn obj.Addr) { - self.mapassign_call_from_AX(t, fn) - self.mapaccess_ptr(t) + self.mapassign_call_from_AX(t, fn) + self.mapaccess_ptr(t) } func (self *_Assembler) mapassign_utext(t reflect.Type, addressable bool) { - pv := false - vk := t.Key() - tk := t.Key() - - /* deref pointer if needed */ - if vk.Kind() == reflect.Ptr { - pv = true - vk = vk.Elem() - } - - /* addressable value with pointer receiver */ - if addressable { - pv = false - tk = reflect.PtrTo(tk) - } - - /* allocate the key, and call the unmarshaler */ - self.valloc(vk, _BX) // VALLOC ${vk}, BX - // must spill vk pointer since next call_go may invoke GC - self.Emit("MOVQ" , _BX, _ARG_vk) - self.Emit("MOVQ" , jit.Type(tk), _AX) // MOVQ ${tk}, AX - self.Emit("MOVQ" , _ARG_sv_p, _CX) // MOVQ sv.p, CX - self.Emit("MOVQ" , _ARG_sv_n, _DI) // MOVQ sv.n, DI - self.call_go(_F_decodeTextUnmarshaler) // CALL_GO decodeTextUnmarshaler - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error - self.Emit("MOVQ" , _ARG_vk, _AX) // MOVQ VAR.vk, AX - self.Emit("MOVQ", jit.Imm(0), _ARG_vk) - - /* select the correct assignment function */ - if !pv { - self.mapassign_call_from_AX(t, _F_mapassign) - } else { - self.mapassign_fastx(t, _F_mapassign_fast64ptr) - } + pv := false + vk := t.Key() + tk := t.Key() + + /* deref pointer if needed */ + if vk.Kind() == reflect.Ptr { + pv = true + vk = vk.Elem() + } + + /* addressable value with pointer receiver */ + if addressable { + pv = false + tk = reflect.PtrTo(tk) + } + + /* allocate the key, and call the unmarshaler */ + self.valloc(vk, _BX) // VALLOC ${vk}, BX + // must spill vk pointer since next call_go may invoke GC + self.Emit("MOVQ", _BX, _ARG_vk) + self.Emit("MOVQ", jit.Type(tk), _AX) // MOVQ ${tk}, AX + self.Emit("MOVQ", _ARG_sv_p, _CX) // MOVQ sv.p, CX + self.Emit("MOVQ", _ARG_sv_n, _DI) // MOVQ sv.n, DI + self.call_go(_F_decodeTextUnmarshaler) // CALL_GO decodeTextUnmarshaler + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error + self.Emit("MOVQ", _ARG_vk, _AX) // MOVQ VAR.vk, AX + self.Emit("MOVQ", jit.Imm(0), _ARG_vk) + + /* select the correct assignment function */ + if !pv { + self.mapassign_call_from_AX(t, _F_mapassign) + } else { + self.mapassign_fastx(t, _F_mapassign_fast64ptr) + } } /** External Unmarshaler Routines **/ var ( - _F_skip_one = jit.Imm(int64(native.S_skip_one)) - _F_skip_array = jit.Imm(int64(native.S_skip_array)) - _F_skip_object = jit.Imm(int64(native.S_skip_object)) - _F_skip_number = jit.Imm(int64(native.S_skip_number)) + _F_skip_one = jit.Imm(int64(native.S_skip_one)) + _F_skip_array = jit.Imm(int64(native.S_skip_array)) + _F_skip_object = jit.Imm(int64(native.S_skip_object)) + _F_skip_number = jit.Imm(int64(native.S_skip_number)) ) func (self *_Assembler) unmarshal_json(t reflect.Type, deref bool) { - self.call_sf(_F_skip_one) // CALL_SF skip_one - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v - self.slice_from_r(_AX, 0) // SLICE_R AX, $0 - self.Emit("MOVQ" , _DI, _ARG_sv_p) // MOVQ DI, sv.p - self.Emit("MOVQ" , _SI, _ARG_sv_n) // MOVQ SI, sv.n - self.unmarshal_func(t, _F_decodeJsonUnmarshaler, deref) // UNMARSHAL json, ${t}, ${deref} + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v + self.slice_from_r(_AX, 0) // SLICE_R AX, $0 + self.Emit("MOVQ", _DI, _ARG_sv_p) // MOVQ DI, sv.p + self.Emit("MOVQ", _SI, _ARG_sv_n) // MOVQ SI, sv.n + self.unmarshal_func(t, _F_decodeJsonUnmarshaler, deref) // UNMARSHAL json, ${t}, ${deref} } func (self *_Assembler) unmarshal_text(t reflect.Type, deref bool) { - self.parse_string() // PARSE STRING - self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n - self.unmarshal_func(t, _F_decodeTextUnmarshaler, deref) // UNMARSHAL text, ${t}, ${deref} + self.parse_string() // PARSE STRING + self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + self.unmarshal_func(t, _F_decodeTextUnmarshaler, deref) // UNMARSHAL text, ${t}, ${deref} } func (self *_Assembler) unmarshal_func(t reflect.Type, fn obj.Addr, deref bool) { - pt := t - vk := t.Kind() - - /* allocate the field if needed */ - if deref && vk == reflect.Ptr { - self.Emit("MOVQ" , _VP, _BX) // MOVQ VP, BX - self.Emit("MOVQ" , jit.Ptr(_BX, 0), _BX) // MOVQ (BX), BX - self.Emit("TESTQ", _BX, _BX) // TESTQ BX, BX - self.Sjmp("JNZ" , "_deref_{n}") // JNZ _deref_{n} - self.valloc(t.Elem(), _BX) // VALLOC ${t.Elem()}, BX - self.WriteRecNotAX(3, _BX, jit.Ptr(_VP, 0), false, false) // MOVQ BX, (VP) - self.Link("_deref_{n}") // _deref_{n}: - } else { - /* set value pointer */ - self.Emit("MOVQ", _VP, _BX) // MOVQ (VP), BX - } - - /* set value type */ - self.Emit("MOVQ", jit.Type(pt), _AX) // MOVQ ${pt}, AX - - /* set the source string and call the unmarshaler */ - self.Emit("MOVQ" , _ARG_sv_p, _CX) // MOVQ sv.p, CX - self.Emit("MOVQ" , _ARG_sv_n, _DI) // MOVQ sv.n, DI - self.call_go(fn) // CALL_GO ${fn} - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error + pt := t + vk := t.Kind() + + /* allocate the field if needed */ + if deref && vk == reflect.Ptr { + self.Emit("MOVQ", _VP, _BX) // MOVQ VP, BX + self.Emit("MOVQ", jit.Ptr(_BX, 0), _BX) // MOVQ (BX), BX + self.Emit("TESTQ", _BX, _BX) // TESTQ BX, BX + self.Sjmp("JNZ", "_deref_{n}") // JNZ _deref_{n} + self.valloc(t.Elem(), _BX) // VALLOC ${t.Elem()}, BX + self.WriteRecNotAX(3, _BX, jit.Ptr(_VP, 0), false, false) // MOVQ BX, (VP) + self.Link("_deref_{n}") // _deref_{n}: + } else { + /* set value pointer */ + self.Emit("MOVQ", _VP, _BX) // MOVQ (VP), BX + } + + /* set value type */ + self.Emit("MOVQ", jit.Type(pt), _AX) // MOVQ ${pt}, AX + + /* set the source string and call the unmarshaler */ + self.Emit("MOVQ", _ARG_sv_p, _CX) // MOVQ sv.p, CX + self.Emit("MOVQ", _ARG_sv_n, _DI) // MOVQ sv.n, DI + self.call_go(fn) // CALL_GO ${fn} + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error } /** Dynamic Decoding Routine **/ var ( - _F_decodeTypedPointer obj.Addr + _F_decodeTypedPointer obj.Addr ) func init() { - _F_decodeTypedPointer = jit.Func(decodeTypedPointer) + _F_decodeTypedPointer = jit.Func(decodeTypedPointer) } func (self *_Assembler) decode_dynamic(vt obj.Addr, vp obj.Addr) { - self.Emit("MOVQ" , vp, _SI) // MOVQ ${vp}, SI - self.Emit("MOVQ" , vt, _DI) // MOVQ ${vt}, DI - self.Emit("MOVQ", _ARG_sp, _AX) // MOVQ sp, AX - self.Emit("MOVQ", _ARG_sl, _BX) // MOVQ sp, BX - self.Emit("MOVQ" , _IC, _CX) // MOVQ IC, CX - self.Emit("MOVQ" , _ST, _R8) // MOVQ ST, R8 - self.Emit("MOVQ" , _ARG_fv, _R9) // MOVQ fv, R9 - self.save(_REG_rt...) - self.Emit("MOVQ", _F_decodeTypedPointer, _IL) // MOVQ ${fn}, R11 - self.Rjmp("CALL", _IL) // CALL R11 - self.load(_REG_rt...) - self.Emit("MOVQ" , _AX, _IC) // MOVQ AX, IC - self.Emit("MOVQ" , _BX, _ET) // MOVQ BX, ET - self.Emit("MOVQ" , _CX, _EP) // MOVQ CX, EP - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JE", "_decode_dynamic_end_{n}") // JE, _decode_dynamic_end_{n} - self.Emit("MOVQ", _I_json_MismatchTypeError, _CX) // MOVQ _I_json_MismatchTypeError, CX - self.Emit("CMPQ", _ET, _CX) // CMPQ ET, CX - self.Sjmp("JNE", _LB_error) // JNE LB_error - self.Emit("MOVQ", _EP, _VAR_ic) // MOVQ EP, VAR_ic - self.Emit("MOVQ", _ET, _VAR_et) // MOVQ ET, VAR_et - self.Link("_decode_dynamic_end_{n}") + self.Emit("MOVQ", vp, _SI) // MOVQ ${vp}, SI + self.Emit("MOVQ", vt, _DI) // MOVQ ${vt}, DI + self.Emit("MOVQ", _ARG_sp, _AX) // MOVQ sp, AX + self.Emit("MOVQ", _ARG_sl, _BX) // MOVQ sp, BX + self.Emit("MOVQ", _IC, _CX) // MOVQ IC, CX + self.Emit("MOVQ", _ST, _R8) // MOVQ ST, R8 + self.Emit("MOVQ", _ARG_fv, _R9) // MOVQ fv, R9 + self.save(_REG_rt...) + self.Emit("MOVQ", _F_decodeTypedPointer, _IL) // MOVQ ${fn}, R11 + self.Rjmp("CALL", _IL) // CALL R11 + self.load(_REG_rt...) + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + self.Emit("MOVQ", _BX, _ET) // MOVQ BX, ET + self.Emit("MOVQ", _CX, _EP) // MOVQ CX, EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JE", "_decode_dynamic_end_{n}") // JE, _decode_dynamic_end_{n} + self.Emit("MOVQ", _I_json_MismatchTypeError, _CX) // MOVQ _I_json_MismatchTypeError, CX + self.Emit("CMPQ", _ET, _CX) // CMPQ ET, CX + self.Sjmp("JNE", _LB_error) // JNE LB_error + self.Emit("MOVQ", _EP, _VAR_ic) // MOVQ EP, VAR_ic + self.Emit("MOVQ", _ET, _VAR_et) // MOVQ ET, VAR_et + self.Link("_decode_dynamic_end_{n}") } /** OpCode Assembler Functions **/ var ( - _F_memequal = jit.Func(memequal) - _F_memmove = jit.Func(memmove) - _F_growslice = jit.Func(growslice) - _F_makeslice = jit.Func(makeslice) - _F_makemap_small = jit.Func(makemap_small) - _F_mapassign_fast64 = jit.Func(mapassign_fast64) + _F_memequal = jit.Func(memequal) + _F_memmove = jit.Func(memmove) + _F_growslice = jit.Func(growslice) + _F_makeslice = jit.Func(makeslice) + _F_makemap_small = jit.Func(makemap_small) + _F_mapassign_fast64 = jit.Func(mapassign_fast64) ) var ( - _F_lspace = jit.Imm(int64(native.S_lspace)) - _F_strhash = jit.Imm(int64(caching.S_strhash)) + _F_lspace = jit.Imm(int64(native.S_lspace)) + _F_strhash = jit.Imm(int64(caching.S_strhash)) ) var ( - _F_b64decode = jit.Imm(int64(_subr__b64decode)) - _F_decodeValue = jit.Imm(int64(_subr_decode_value)) + _F_b64decode = jit.Imm(int64(_subr__b64decode)) + _F_decodeValue = jit.Imm(int64(_subr_decode_value)) ) var ( - _F_FieldMap_GetCaseInsensitive obj.Addr - _Empty_Slice = make([]byte, 0) - _Zero_Base = int64(uintptr(((*rt.GoSlice)(unsafe.Pointer(&_Empty_Slice))).Ptr)) + _F_FieldMap_GetCaseInsensitive obj.Addr + _Empty_Slice = make([]byte, 0) + _Zero_Base = int64(uintptr(((*rt.GoSlice)(unsafe.Pointer(&_Empty_Slice))).Ptr)) ) const ( - _MODE_AVX2 = 1 << 2 + _MODE_AVX2 = 1 << 2 ) const ( - _Fe_ID = int64(unsafe.Offsetof(caching.FieldEntry{}.ID)) - _Fe_Name = int64(unsafe.Offsetof(caching.FieldEntry{}.Name)) - _Fe_Hash = int64(unsafe.Offsetof(caching.FieldEntry{}.Hash)) + _Fe_ID = int64(unsafe.Offsetof(caching.FieldEntry{}.ID)) + _Fe_Name = int64(unsafe.Offsetof(caching.FieldEntry{}.Name)) + _Fe_Hash = int64(unsafe.Offsetof(caching.FieldEntry{}.Hash)) ) const ( - _Vk_Ptr = int64(reflect.Ptr) - _Gt_KindFlags = int64(unsafe.Offsetof(rt.GoType{}.KindFlags)) + _Vk_Ptr = int64(reflect.Ptr) + _Gt_KindFlags = int64(unsafe.Offsetof(rt.GoType{}.KindFlags)) ) func init() { - _F_FieldMap_GetCaseInsensitive = jit.Func((*caching.FieldMap).GetCaseInsensitive) + _F_FieldMap_GetCaseInsensitive = jit.Func((*caching.FieldMap).GetCaseInsensitive) } func (self *_Assembler) _asm_OP_any(_ *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_VP, 8), _CX) // MOVQ 8(VP), CX - self.Emit("TESTQ" , _CX, _CX) // TESTQ CX, CX - self.Sjmp("JZ" , "_decode_{n}") // JZ _decode_{n} - self.Emit("CMPQ" , _CX, _VP) // CMPQ CX, VP - self.Sjmp("JE" , "_decode_{n}") // JE _decode_{n} - self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX - self.Emit("MOVBLZX", jit.Ptr(_AX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(AX), DX - self.Emit("ANDL" , jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX - self.Emit("CMPL" , _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr} - self.Sjmp("JNE" , "_decode_{n}") // JNE _decode_{n} - self.Emit("LEAQ" , jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI - self.decode_dynamic(_AX, _DI) // DECODE AX, DI - self.Sjmp("JMP" , "_decode_end_{n}") // JMP _decode_end_{n} - self.Link("_decode_{n}") // _decode_{n}: - self.Emit("MOVQ" , _ARG_fv, _DF) // MOVQ fv, DF - self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 0)) // MOVQ _ST, (SP) - self.call(_F_decodeValue) // CALL decodeValue - self.Emit("MOVQ" , jit.Imm(0), jit.Ptr(_SP, 0)) // MOVQ _ST, (SP) - self.Emit("TESTQ" , _EP, _EP) // TESTQ EP, EP - self.Sjmp("JNZ" , _LB_parsing_error) // JNZ _parsing_error - self.Link("_decode_end_{n}") // _decode_end_{n}: + self.Emit("MOVQ", jit.Ptr(_VP, 8), _CX) // MOVQ 8(VP), CX + self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX + self.Sjmp("JZ", "_decode_{n}") // JZ _decode_{n} + self.Emit("CMPQ", _CX, _VP) // CMPQ CX, VP + self.Sjmp("JE", "_decode_{n}") // JE _decode_{n} + self.Emit("MOVQ", jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("MOVBLZX", jit.Ptr(_AX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(AX), DX + self.Emit("ANDL", jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX + self.Emit("CMPL", _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr} + self.Sjmp("JNE", "_decode_{n}") // JNE _decode_{n} + self.Emit("LEAQ", jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI + self.decode_dynamic(_AX, _DI) // DECODE AX, DI + self.Sjmp("JMP", "_decode_end_{n}") // JMP _decode_end_{n} + self.Link("_decode_{n}") // _decode_{n}: + self.Emit("MOVQ", _ARG_fv, _DF) // MOVQ fv, DF + self.Emit("MOVQ", _ST, jit.Ptr(_SP, 0)) // MOVQ _ST, (SP) + self.call(_F_decodeValue) // CALL decodeValue + self.Emit("MOVQ", jit.Imm(0), jit.Ptr(_SP, 0)) // MOVQ _ST, (SP) + self.Emit("TESTQ", _EP, _EP) // TESTQ EP, EP + self.Sjmp("JNZ", _LB_parsing_error) // JNZ _parsing_error + self.Link("_decode_end_{n}") // _decode_end_{n}: } func (self *_Assembler) _asm_OP_dyn(p *_Instr) { - self.Emit("MOVQ" , jit.Type(p.vt()), _ET) // MOVQ ${p.vt()}, ET - self.Emit("CMPQ" , jit.Ptr(_VP, 8), jit.Imm(0)) // CMPQ 8(VP), $0 - self.Sjmp("JE" , _LB_type_error) // JE _type_error - self.Emit("MOVQ" , jit.Ptr(_VP, 0), _CX) // MOVQ (VP), CX - self.Emit("MOVQ" , jit.Ptr(_CX, 8), _CX) // MOVQ 8(CX), CX - self.Emit("MOVBLZX", jit.Ptr(_CX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(CX), DX - self.Emit("ANDL" , jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX - self.Emit("CMPL" , _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr} - self.Sjmp("JNE" , _LB_type_error) // JNE _type_error - self.Emit("LEAQ" , jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI - self.decode_dynamic(_CX, _DI) // DECODE CX, DI - self.Link("_decode_end_{n}") // _decode_end_{n}: + self.Emit("MOVQ", jit.Type(p.vt()), _ET) // MOVQ ${p.vt()}, ET + self.Emit("CMPQ", jit.Ptr(_VP, 8), jit.Imm(0)) // CMPQ 8(VP), $0 + self.Sjmp("JE", _LB_type_error) // JE _type_error + self.Emit("MOVQ", jit.Ptr(_VP, 0), _CX) // MOVQ (VP), CX + self.Emit("MOVQ", jit.Ptr(_CX, 8), _CX) // MOVQ 8(CX), CX + self.Emit("MOVBLZX", jit.Ptr(_CX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(CX), DX + self.Emit("ANDL", jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX + self.Emit("CMPL", _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr} + self.Sjmp("JNE", _LB_type_error) // JNE _type_error + self.Emit("LEAQ", jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI + self.decode_dynamic(_CX, _DI) // DECODE CX, DI + self.Link("_decode_end_{n}") // _decode_end_{n}: } func (self *_Assembler) _asm_OP_str(_ *_Instr) { - self.parse_string() // PARSE STRING - self.unquote_once(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false, true) // UNQUOTE once, (VP), 8(VP) + self.parse_string() // PARSE STRING + self.unquote_once(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false, true) // UNQUOTE once, (VP), 8(VP) } func (self *_Assembler) _asm_OP_bin(_ *_Instr) { - self.parse_string() // PARSE STRING - self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1 - self.Emit("MOVQ" , _DI, jit.Ptr(_VP, 0)) // MOVQ DI, (VP) - self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP) - self.Emit("SHRQ" , jit.Imm(2), _SI) // SHRQ $2, SI - self.Emit("LEAQ" , jit.Sib(_SI, _SI, 2, 0), _SI) // LEAQ (SI)(SI*2), SI - self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP) - self.malloc_AX(_SI, _SI) // MALLOC SI, SI + self.parse_string() // PARSE STRING + self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1 + self.Emit("MOVQ", _DI, jit.Ptr(_VP, 0)) // MOVQ DI, (VP) + self.Emit("MOVQ", _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP) + self.Emit("SHRQ", jit.Imm(2), _SI) // SHRQ $2, SI + self.Emit("LEAQ", jit.Sib(_SI, _SI, 2, 0), _SI) // LEAQ (SI)(SI*2), SI + self.Emit("MOVQ", _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP) + self.malloc_AX(_SI, _SI) // MALLOC SI, SI - // TODO: due to base64x's bug, only use AVX mode now - self.Emit("MOVL", jit.Imm(_MODE_JSON), _CX) // MOVL $_MODE_JSON, CX + // TODO: due to base64x's bug, only use AVX mode now + self.Emit("MOVL", jit.Imm(_MODE_JSON), _CX) // MOVL $_MODE_JSON, CX - /* call the decoder */ - self.Emit("XORL" , _DX, _DX) // XORL DX, DX - self.Emit("MOVQ" , _VP, _DI) // MOVQ VP, DI + /* call the decoder */ + self.Emit("XORL", _DX, _DX) // XORL DX, DX + self.Emit("MOVQ", _VP, _DI) // MOVQ VP, DI - self.Emit("MOVQ" , jit.Ptr(_VP, 0), _R8) // MOVQ SI, (VP) - self.WriteRecNotAX(4, _SI, jit.Ptr(_VP, 0), true, false) // XCHGQ SI, (VP) - self.Emit("MOVQ" , _R8, _SI) + self.Emit("MOVQ", jit.Ptr(_VP, 0), _R8) // MOVQ SI, (VP) + self.WriteRecNotAX(4, _SI, jit.Ptr(_VP, 0), true, false) // XCHGQ SI, (VP) + self.Emit("MOVQ", _R8, _SI) - self.Emit("XCHGQ", _DX, jit.Ptr(_VP, 8)) // XCHGQ DX, 8(VP) - self.call_c(_F_b64decode) // CALL b64decode - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_base64_error) // JS _base64_error - self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) + self.Emit("XCHGQ", _DX, jit.Ptr(_VP, 8)) // XCHGQ DX, 8(VP) + self.call_c(_F_b64decode) // CALL b64decode + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_base64_error) // JS _base64_error + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) } func (self *_Assembler) _asm_OP_bool(_ *_Instr) { - self.Emit("LEAQ", jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX - self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL - self.Sjmp("JA" , _LB_eof_error) // JA _eof_error - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('f')) // CMPB (IP)(IC), $'f' - self.Sjmp("JE" , "_false_{n}") // JE _false_{n} - self.Emit("MOVL", jit.Imm(_IM_true), _CX) // MOVL $"true", CX - self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC) - self.Sjmp("JE" , "_bool_true_{n}") - // try to skip the value - self.Emit("MOVQ", _IC, _VAR_ic) - self.Emit("MOVQ", _T_bool, _ET) - self.Emit("MOVQ", _ET, _VAR_et) - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref("_end_{n}", 4) - self.Emit("MOVQ", _R9, _VAR_pc) - self.Sjmp("JMP" , _LB_skip_one) - - self.Link("_bool_true_{n}") - self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC - self.Emit("MOVB", jit.Imm(1), jit.Ptr(_VP, 0)) // MOVB $1, (VP) - self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n} - self.Link("_false_{n}") // _false_{n}: - self.Emit("ADDQ", jit.Imm(1), _AX) // ADDQ $1, AX - self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC - self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL - self.Sjmp("JA" , _LB_eof_error) // JA _eof_error - self.Emit("MOVL", jit.Imm(_IM_alse), _CX) // MOVL $"alse", CX - self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC) - self.Sjmp("JNE" , _LB_im_error) // JNE _im_error - self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC - self.Emit("XORL", _AX, _AX) // XORL AX, AX - self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP) - self.Link("_end_{n}") // _end_{n}: + self.Emit("LEAQ", jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA", _LB_eof_error) // JA _eof_error + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('f')) // CMPB (IP)(IC), $'f' + self.Sjmp("JE", "_false_{n}") // JE _false_{n} + self.Emit("MOVL", jit.Imm(_IM_true), _CX) // MOVL $"true", CX + self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC) + self.Sjmp("JE", "_bool_true_{n}") + // try to skip the value + self.Emit("MOVQ", _IC, _VAR_ic) + self.Emit("MOVQ", _T_bool, _ET) + self.Emit("MOVQ", _ET, _VAR_et) + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_end_{n}", 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP", _LB_skip_one) + + self.Link("_bool_true_{n}") + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + self.Emit("MOVB", jit.Imm(1), jit.Ptr(_VP, 0)) // MOVB $1, (VP) + self.Sjmp("JMP", "_end_{n}") // JMP _end_{n} + self.Link("_false_{n}") // _false_{n}: + self.Emit("ADDQ", jit.Imm(1), _AX) // ADDQ $1, AX + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA", _LB_eof_error) // JA _eof_error + self.Emit("MOVL", jit.Imm(_IM_alse), _CX) // MOVL $"alse", CX + self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC) + self.Sjmp("JNE", _LB_im_error) // JNE _im_error + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP) + self.Link("_end_{n}") // _end_{n}: } func (self *_Assembler) _asm_OP_num(_ *_Instr) { - self.Emit("MOVQ", jit.Imm(0), _VAR_fl) - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"')) - self.Emit("MOVQ", _IC, _BX) - self.Sjmp("JNE", "_skip_number_{n}") - self.Emit("MOVQ", jit.Imm(1), _VAR_fl) - self.Emit("ADDQ", jit.Imm(1), _IC) - self.Link("_skip_number_{n}") - - /* call skip_number */ - self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI - self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) - self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI - self.callc(_F_skip_number) // CALL _F_skip_number - self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JNS" , "_num_next_{n}") - - /* call skip one */ - self.Emit("MOVQ", _BX, _VAR_ic) - self.Emit("MOVQ", _T_number, _ET) - self.Emit("MOVQ", _ET, _VAR_et) - self.Byte(0x4c, 0x8d, 0x0d) - self.Sref("_num_end_{n}", 4) - self.Emit("MOVQ", _R9, _VAR_pc) - self.Sjmp("JMP" , _LB_skip_one) - - /* assgin string */ - self.Link("_num_next_{n}") - self.slice_from_r(_AX, 0) - self.Emit("BTQ", jit.Imm(_F_copy_string), _ARG_fv) - self.Sjmp("JNC", "_num_write_{n}") - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 - self.Sref("_num_write_{n}", 4) - self.Sjmp("JMP", "_copy_string") - self.Link("_num_write_{n}") - self.Emit("MOVQ", _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP) - self.WriteRecNotAX(13, _DI, jit.Ptr(_VP, 0), false, false) - self.Emit("CMPQ", _VAR_fl, jit.Imm(1)) - self.Sjmp("JNE", "_num_end_{n}") - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"')) - self.Sjmp("JNE", _LB_char_0_error) - self.Emit("ADDQ", jit.Imm(1), _IC) - self.Link("_num_end_{n}") + self.Emit("MOVQ", jit.Imm(0), _VAR_fl) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"')) + self.Emit("MOVQ", _IC, _BX) + self.Sjmp("JNE", "_skip_number_{n}") + self.Emit("MOVQ", jit.Imm(1), _VAR_fl) + self.Emit("ADDQ", jit.Imm(1), _IC) + self.Link("_skip_number_{n}") + + /* call skip_number */ + self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI + self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) + self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI + self.callc(_F_skip_number) // CALL _F_skip_number + self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNS", "_num_next_{n}") + + /* call skip one */ + self.Emit("MOVQ", _BX, _VAR_ic) + self.Emit("MOVQ", _T_number, _ET) + self.Emit("MOVQ", _ET, _VAR_et) + self.Byte(0x4c, 0x8d, 0x0d) + self.Sref("_num_end_{n}", 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP", _LB_skip_one) + + /* assgin string */ + self.Link("_num_next_{n}") + self.slice_from_r(_AX, 0) + self.Emit("BTQ", jit.Imm(_F_copy_string), _ARG_fv) + self.Sjmp("JNC", "_num_write_{n}") + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_num_write_{n}", 4) + self.Sjmp("JMP", "_copy_string") + self.Link("_num_write_{n}") + self.Emit("MOVQ", _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP) + self.WriteRecNotAX(13, _DI, jit.Ptr(_VP, 0), false, false) + self.Emit("CMPQ", _VAR_fl, jit.Imm(1)) + self.Sjmp("JNE", "_num_end_{n}") + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"')) + self.Sjmp("JNE", _LB_char_0_error) + self.Emit("ADDQ", jit.Imm(1), _IC) + self.Link("_num_end_{n}") } func (self *_Assembler) _asm_OP_i8(_ *_Instr) { - var pin = "_i8_end_{n}" - self.parse_signed(int8Type, pin, -1) // PARSE int8 - self.range_signed_CX(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8 - self.Emit("MOVB", _CX, jit.Ptr(_VP, 0)) // MOVB CX, (VP) - self.Link(pin) + var pin = "_i8_end_{n}" + self.parse_signed(int8Type, pin, -1) // PARSE int8 + self.range_signed_CX(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8 + self.Emit("MOVB", _CX, jit.Ptr(_VP, 0)) // MOVB CX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_i16(_ *_Instr) { - var pin = "_i16_end_{n}" - self.parse_signed(int16Type, pin, -1) // PARSE int16 - self.range_signed_CX(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16 - self.Emit("MOVW", _CX, jit.Ptr(_VP, 0)) // MOVW CX, (VP) - self.Link(pin) + var pin = "_i16_end_{n}" + self.parse_signed(int16Type, pin, -1) // PARSE int16 + self.range_signed_CX(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16 + self.Emit("MOVW", _CX, jit.Ptr(_VP, 0)) // MOVW CX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_i32(_ *_Instr) { - var pin = "_i32_end_{n}" - self.parse_signed(int32Type, pin, -1) // PARSE int32 - self.range_signed_CX(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32 - self.Emit("MOVL", _CX, jit.Ptr(_VP, 0)) // MOVL CX, (VP) - self.Link(pin) + var pin = "_i32_end_{n}" + self.parse_signed(int32Type, pin, -1) // PARSE int32 + self.range_signed_CX(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32 + self.Emit("MOVL", _CX, jit.Ptr(_VP, 0)) // MOVL CX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_i64(_ *_Instr) { - var pin = "_i64_end_{n}" - self.parse_signed(int64Type, pin, -1) // PARSE int64 - self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX - self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) - self.Link(pin) + var pin = "_i64_end_{n}" + self.parse_signed(int64Type, pin, -1) // PARSE int64 + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_u8(_ *_Instr) { - var pin = "_u8_end_{n}" - self.parse_unsigned(uint8Type, pin, -1) // PARSE uint8 - self.range_unsigned_CX(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8 - self.Emit("MOVB", _CX, jit.Ptr(_VP, 0)) // MOVB CX, (VP) - self.Link(pin) + var pin = "_u8_end_{n}" + self.parse_unsigned(uint8Type, pin, -1) // PARSE uint8 + self.range_unsigned_CX(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8 + self.Emit("MOVB", _CX, jit.Ptr(_VP, 0)) // MOVB CX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_u16(_ *_Instr) { - var pin = "_u16_end_{n}" - self.parse_unsigned(uint16Type, pin, -1) // PARSE uint16 - self.range_unsigned_CX(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16 - self.Emit("MOVW", _CX, jit.Ptr(_VP, 0)) // MOVW CX, (VP) - self.Link(pin) + var pin = "_u16_end_{n}" + self.parse_unsigned(uint16Type, pin, -1) // PARSE uint16 + self.range_unsigned_CX(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16 + self.Emit("MOVW", _CX, jit.Ptr(_VP, 0)) // MOVW CX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_u32(_ *_Instr) { - var pin = "_u32_end_{n}" - self.parse_unsigned(uint32Type, pin, -1) // PARSE uint32 - self.range_unsigned_CX(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32 - self.Emit("MOVL", _CX, jit.Ptr(_VP, 0)) // MOVL CX, (VP) - self.Link(pin) + var pin = "_u32_end_{n}" + self.parse_unsigned(uint32Type, pin, -1) // PARSE uint32 + self.range_unsigned_CX(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32 + self.Emit("MOVL", _CX, jit.Ptr(_VP, 0)) // MOVL CX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_u64(_ *_Instr) { - var pin = "_u64_end_{n}" - self.parse_unsigned(uint64Type, pin, -1) // PARSE uint64 - self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX - self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) - self.Link(pin) + var pin = "_u64_end_{n}" + self.parse_unsigned(uint64Type, pin, -1) // PARSE uint64 + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_f32(_ *_Instr) { - var pin = "_f32_end_{n}" - self.parse_number(float32Type, pin, -1) // PARSE NUMBER - self.range_single_X0() // RANGE float32 - self.Emit("MOVSS", _X0, jit.Ptr(_VP, 0)) // MOVSS X0, (VP) - self.Link(pin) + var pin = "_f32_end_{n}" + self.parse_number(float32Type, pin, -1) // PARSE NUMBER + self.range_single_X0() // RANGE float32 + self.Emit("MOVSS", _X0, jit.Ptr(_VP, 0)) // MOVSS X0, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_f64(_ *_Instr) { - var pin = "_f64_end_{n}" - self.parse_number(float64Type, pin, -1) // PARSE NUMBER - self.Emit("MOVSD", _VAR_st_Dv, _X0) // MOVSD st.Dv, X0 - self.Emit("MOVSD", _X0, jit.Ptr(_VP, 0)) // MOVSD X0, (VP) - self.Link(pin) + var pin = "_f64_end_{n}" + self.parse_number(float64Type, pin, -1) // PARSE NUMBER + self.Emit("MOVSD", _VAR_st_Dv, _X0) // MOVSD st.Dv, X0 + self.Emit("MOVSD", _X0, jit.Ptr(_VP, 0)) // MOVSD X0, (VP) + self.Link(pin) } func (self *_Assembler) _asm_OP_unquote(_ *_Instr) { - self.check_eof(2) - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('\\')) // CMPB (IP)(IC), $'\\' - self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 1), jit.Imm('"')) // CMPB 1(IP)(IC), $'"' - self.Sjmp("JNE" , _LB_char_1_error) // JNE _char_1_error - self.Emit("ADDQ", jit.Imm(2), _IC) // ADDQ $2, IC - self.parse_string() // PARSE STRING - self.unquote_twice(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false) // UNQUOTE twice, (VP), 8(VP) + self.check_eof(2) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('\\')) // CMPB (IP)(IC), $'\\' + self.Sjmp("JNE", _LB_char_0_error) // JNE _char_0_error + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 1), jit.Imm('"')) // CMPB 1(IP)(IC), $'"' + self.Sjmp("JNE", _LB_char_1_error) // JNE _char_1_error + self.Emit("ADDQ", jit.Imm(2), _IC) // ADDQ $2, IC + self.parse_string() // PARSE STRING + self.unquote_twice(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false) // UNQUOTE twice, (VP), 8(VP) } func (self *_Assembler) _asm_OP_nil_1(_ *_Instr) { - self.Emit("XORL", _AX, _AX) // XORL AX, AX - self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) } func (self *_Assembler) _asm_OP_nil_2(_ *_Instr) { - self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 - self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) + self.Emit("PXOR", _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) } func (self *_Assembler) _asm_OP_nil_3(_ *_Instr) { - self.Emit("XORL" , _AX, _AX) // XORL AX, AX - self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 - self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) - self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 16)) // MOVOU AX, 16(VP) + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("PXOR", _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 16)) // MOVOU AX, 16(VP) } func (self *_Assembler) _asm_OP_deref(p *_Instr) { - self.vfollow(p.vt()) + self.vfollow(p.vt()) } func (self *_Assembler) _asm_OP_index(p *_Instr) { - self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ ${p.vi()}, AX - self.Emit("ADDQ", _AX, _VP) // ADDQ _AX, _VP + self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ ${p.vi()}, AX + self.Emit("ADDQ", _AX, _VP) // ADDQ _AX, _VP } func (self *_Assembler) _asm_OP_is_null(p *_Instr) { - self.Emit("LEAQ" , jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX - self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL - self.Sjmp("JA" , "_not_null_{n}") // JA _not_null_{n} - self.Emit("CMPL" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null" - self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC - self.Xjmp("JE" , p.vi()) // JE {p.vi()} - self.Link("_not_null_{n}") // _not_null_{n}: + self.Emit("LEAQ", jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA", "_not_null_{n}") // JA _not_null_{n} + self.Emit("CMPL", jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null" + self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC + self.Xjmp("JE", p.vi()) // JE {p.vi()} + self.Link("_not_null_{n}") // _not_null_{n}: } func (self *_Assembler) _asm_OP_is_null_quote(p *_Instr) { - self.Emit("LEAQ" , jit.Ptr(_IC, 5), _AX) // LEAQ 4(IC), AX - self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL - self.Sjmp("JA" , "_not_null_quote_{n}") // JA _not_null_quote_{n} - self.Emit("CMPL" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null" - self.Sjmp("JNE" , "_not_null_quote_{n}") // JNE _not_null_quote_{n} - self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, 4), jit.Imm('"')) // CMPB 4(IP)(IC), $'"' - self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC - self.Xjmp("JE" , p.vi()) // JE {p.vi()} - self.Link("_not_null_quote_{n}") // _not_null_quote_{n}: + self.Emit("LEAQ", jit.Ptr(_IC, 5), _AX) // LEAQ 4(IC), AX + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA", "_not_null_quote_{n}") // JA _not_null_quote_{n} + self.Emit("CMPL", jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null" + self.Sjmp("JNE", "_not_null_quote_{n}") // JNE _not_null_quote_{n} + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 4), jit.Imm('"')) // CMPB 4(IP)(IC), $'"' + self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC + self.Xjmp("JE", p.vi()) // JE {p.vi()} + self.Link("_not_null_quote_{n}") // _not_null_quote_{n}: } func (self *_Assembler) _asm_OP_map_init(_ *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n} - self.call_go(_F_makemap_small) // CALL_GO makemap_small - self.WritePtrAX(6, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) - self.Link("_end_{n}") // _end_{n}: - self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP + self.Emit("MOVQ", jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ", "_end_{n}") // JNZ _end_{n} + self.call_go(_F_makemap_small) // CALL_GO makemap_small + self.WritePtrAX(6, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Link("_end_{n}") // _end_{n}: + self.Emit("MOVQ", _AX, _VP) // MOVQ AX, VP } func (self *_Assembler) _asm_OP_map_key_i8(p *_Instr) { - self.parse_signed(int8Type, "", p.vi()) // PARSE int8 - self.range_signed_CX(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8 - self.match_char('"') - self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int8, mapassign, st.Iv + self.parse_signed(int8Type, "", p.vi()) // PARSE int8 + self.range_signed_CX(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8 + self.match_char('"') + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int8, mapassign, st.Iv } func (self *_Assembler) _asm_OP_map_key_i16(p *_Instr) { - self.parse_signed(int16Type, "", p.vi()) // PARSE int16 - self.range_signed_CX(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16 - self.match_char('"') - self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int16, mapassign, st.Iv + self.parse_signed(int16Type, "", p.vi()) // PARSE int16 + self.range_signed_CX(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16 + self.match_char('"') + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int16, mapassign, st.Iv } func (self *_Assembler) _asm_OP_map_key_i32(p *_Instr) { - self.parse_signed(int32Type, "", p.vi()) // PARSE int32 - self.range_signed_CX(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32 - self.match_char('"') - if vt := p.vt(); !mapfast(vt) { - self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int32, mapassign, st.Iv - } else { - self.Emit("MOVQ", _CX, _AX) // MOVQ CX, AX - self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN int32, mapassign_fast32 - } + self.parse_signed(int32Type, "", p.vi()) // PARSE int32 + self.range_signed_CX(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32 + self.match_char('"') + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int32, mapassign, st.Iv + } else { + self.Emit("MOVQ", _CX, _AX) // MOVQ CX, AX + self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN int32, mapassign_fast32 + } } func (self *_Assembler) _asm_OP_map_key_i64(p *_Instr) { - self.parse_signed(int64Type, "", p.vi()) // PARSE int64 - self.match_char('"') - if vt := p.vt(); !mapfast(vt) { - self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int64, mapassign, st.Iv - } else { - self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX - self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN int64, mapassign_fast64 - } + self.parse_signed(int64Type, "", p.vi()) // PARSE int64 + self.match_char('"') + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int64, mapassign, st.Iv + } else { + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN int64, mapassign_fast64 + } } func (self *_Assembler) _asm_OP_map_key_u8(p *_Instr) { - self.parse_unsigned(uint8Type, "", p.vi()) // PARSE uint8 - self.range_unsigned_CX(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8 - self.match_char('"') - self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint8, vt.Iv + self.parse_unsigned(uint8Type, "", p.vi()) // PARSE uint8 + self.range_unsigned_CX(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8 + self.match_char('"') + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint8, vt.Iv } func (self *_Assembler) _asm_OP_map_key_u16(p *_Instr) { - self.parse_unsigned(uint16Type, "", p.vi()) // PARSE uint16 - self.range_unsigned_CX(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16 - self.match_char('"') - self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint16, vt.Iv + self.parse_unsigned(uint16Type, "", p.vi()) // PARSE uint16 + self.range_unsigned_CX(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16 + self.match_char('"') + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint16, vt.Iv } func (self *_Assembler) _asm_OP_map_key_u32(p *_Instr) { - self.parse_unsigned(uint32Type, "", p.vi()) // PARSE uint32 - self.range_unsigned_CX(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32 - self.match_char('"') - if vt := p.vt(); !mapfast(vt) { - self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint32, vt.Iv - } else { - self.Emit("MOVQ", _CX, _AX) // MOVQ CX, AX - self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN uint32, mapassign_fast32 - } + self.parse_unsigned(uint32Type, "", p.vi()) // PARSE uint32 + self.range_unsigned_CX(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32 + self.match_char('"') + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint32, vt.Iv + } else { + self.Emit("MOVQ", _CX, _AX) // MOVQ CX, AX + self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN uint32, mapassign_fast32 + } } func (self *_Assembler) _asm_OP_map_key_u64(p *_Instr) { - self.parse_unsigned(uint64Type, "", p.vi()) // PARSE uint64 - self.match_char('"') - if vt := p.vt(); !mapfast(vt) { - self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint64, vt.Iv - } else { - self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX - self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN uint64, mapassign_fast64 - } + self.parse_unsigned(uint64Type, "", p.vi()) // PARSE uint64 + self.match_char('"') + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint64, vt.Iv + } else { + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN uint64, mapassign_fast64 + } } func (self *_Assembler) _asm_OP_map_key_f32(p *_Instr) { - self.parse_number(float32Type, "", p.vi()) // PARSE NUMBER - self.range_single_X0() // RANGE float32 - self.Emit("MOVSS", _X0, _VAR_st_Dv) // MOVSS X0, st.Dv - self.match_char('"') - self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv + self.parse_number(float32Type, "", p.vi()) // PARSE NUMBER + self.range_single_X0() // RANGE float32 + self.Emit("MOVSS", _X0, _VAR_st_Dv) // MOVSS X0, st.Dv + self.match_char('"') + self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv } func (self *_Assembler) _asm_OP_map_key_f64(p *_Instr) { - self.parse_number(float64Type, "", p.vi()) // PARSE NUMBER - self.match_char('"') - self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv + self.parse_number(float64Type, "", p.vi()) // PARSE NUMBER + self.match_char('"') + self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv } func (self *_Assembler) _asm_OP_map_key_str(p *_Instr) { - self.parse_string() // PARSE STRING - self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n - if vt := p.vt(); !mapfast(vt) { - self.valloc(vt.Key(), _DI) - self.Emit("MOVOU", _ARG_sv, _X0) - self.Emit("MOVOU", _X0, jit.Ptr(_DI, 0)) - self.mapassign_std(vt, jit.Ptr(_DI, 0)) // MAPASSIGN string, DI, SI - } else { - self.mapassign_str_fast(vt, _ARG_sv_p, _ARG_sv_n) // MAPASSIGN string, DI, SI - } + self.parse_string() // PARSE STRING + self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + if vt := p.vt(); !mapfast(vt) { + self.valloc(vt.Key(), _DI) + self.Emit("MOVOU", _ARG_sv, _X0) + self.Emit("MOVOU", _X0, jit.Ptr(_DI, 0)) + self.mapassign_std(vt, jit.Ptr(_DI, 0)) // MAPASSIGN string, DI, SI + } else { + self.mapassign_str_fast(vt, _ARG_sv_p, _ARG_sv_n) // MAPASSIGN string, DI, SI + } } func (self *_Assembler) _asm_OP_map_key_utext(p *_Instr) { - self.parse_string() // PARSE STRING - self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n - self.mapassign_utext(p.vt(), false) // MAPASSIGN utext, ${p.vt()}, false + self.parse_string() // PARSE STRING + self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + self.mapassign_utext(p.vt(), false) // MAPASSIGN utext, ${p.vt()}, false } func (self *_Assembler) _asm_OP_map_key_utext_p(p *_Instr) { - self.parse_string() // PARSE STRING - self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n - self.mapassign_utext(p.vt(), true) // MAPASSIGN utext, ${p.vt()}, true + self.parse_string() // PARSE STRING + self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + self.mapassign_utext(p.vt(), true) // MAPASSIGN utext, ${p.vt()}, true } func (self *_Assembler) _asm_OP_array_skip(_ *_Instr) { - self.call_sf(_F_skip_array) // CALL_SF skip_array - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + self.call_sf(_F_skip_array) // CALL_SF skip_array + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v } func (self *_Assembler) _asm_OP_array_clear(p *_Instr) { - self.mem_clear_rem(p.i64(), true) + self.mem_clear_rem(p.i64(), true) } func (self *_Assembler) _asm_OP_array_clear_p(p *_Instr) { - self.mem_clear_rem(p.i64(), false) + self.mem_clear_rem(p.i64(), false) } func (self *_Assembler) _asm_OP_slice_init(p *_Instr) { - self.Emit("XORL" , _AX, _AX) // XORL AX, AX - self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) - self.Emit("MOVQ" , jit.Ptr(_VP, 16), _BX) // MOVQ 16(VP), BX - self.Emit("TESTQ", _BX, _BX) // TESTQ BX, BX - self.Sjmp("JNZ" , "_done_{n}") // JNZ _done_{n} - self.Emit("MOVQ" , jit.Imm(_MinSlice), _CX) // MOVQ ${_MinSlice}, CX - self.Emit("MOVQ" , _CX, jit.Ptr(_VP, 16)) // MOVQ CX, 16(VP) - self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, DX - self.call_go(_F_makeslice) // CALL_GO makeslice - self.WritePtrAX(7, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) - self.Emit("XORL" , _AX, _AX) // XORL AX, AX - self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) - self.Link("_done_{n}") // _done_{n} + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) + self.Emit("MOVQ", jit.Ptr(_VP, 16), _BX) // MOVQ 16(VP), BX + self.Emit("TESTQ", _BX, _BX) // TESTQ BX, BX + self.Sjmp("JNZ", "_done_{n}") // JNZ _done_{n} + self.Emit("MOVQ", jit.Imm(_MinSlice), _CX) // MOVQ ${_MinSlice}, CX + self.Emit("MOVQ", _CX, jit.Ptr(_VP, 16)) // MOVQ CX, 16(VP) + self.Emit("MOVQ", jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, DX + self.call_go(_F_makeslice) // CALL_GO makeslice + self.WritePtrAX(7, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) + self.Link("_done_{n}") // _done_{n} } func (self *_Assembler) _asm_OP_check_empty(p *_Instr) { - rbracket := p.vb() - if rbracket == ']' { - self.check_eof(1) - self.Emit("LEAQ", jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(rbracket))) // CMPB (IP)(IC), ']' - self.Sjmp("JNE" , "_not_empty_array_{n}") // JNE _not_empty_array_{n} - self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC - self.StorePtr(_Zero_Base, jit.Ptr(_VP, 0), _AX) // MOVQ $zerobase, (VP) - self.Emit("PXOR", _X0, _X0) // PXOR X0, X0 - self.Emit("MOVOU", _X0, jit.Ptr(_VP, 8)) // MOVOU X0, 8(VP) - self.Xjmp("JMP" , p.vi()) // JMP {p.vi()} - self.Link("_not_empty_array_{n}") - } else { - panic("only implement check empty array here!") - } + rbracket := p.vb() + if rbracket == ']' { + self.check_eof(1) + self.Emit("LEAQ", jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(rbracket))) // CMPB (IP)(IC), ']' + self.Sjmp("JNE", "_not_empty_array_{n}") // JNE _not_empty_array_{n} + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + self.StorePtr(_Zero_Base, jit.Ptr(_VP, 0), _AX) // MOVQ $zerobase, (VP) + self.Emit("PXOR", _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 8)) // MOVOU X0, 8(VP) + self.Xjmp("JMP", p.vi()) // JMP {p.vi()} + self.Link("_not_empty_array_{n}") + } else { + panic("only implement check empty array here!") + } } func (self *_Assembler) _asm_OP_slice_append(p *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_VP, 8), _AX) // MOVQ 8(VP), AX - self.Emit("CMPQ" , _AX, jit.Ptr(_VP, 16)) // CMPQ AX, 16(VP) - self.Sjmp("JB" , "_index_{n}") // JB _index_{n} - self.Emit("MOVQ" , _AX, _SI) // MOVQ AX, SI - self.Emit("SHLQ" , jit.Imm(1), _SI) // SHLQ $1, SI - self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX - self.Emit("MOVQ" , jit.Ptr(_VP, 0), _BX) // MOVQ (VP), BX - self.Emit("MOVQ" , jit.Ptr(_VP, 8), _CX) // MOVQ 8(VP), CX - self.Emit("MOVQ" , jit.Ptr(_VP, 16), _DI) // MOVQ 16(VP), DI - self.call_go(_F_growslice) // CALL_GO growslice - self.WritePtrAX(8, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) - self.Emit("MOVQ" , _BX, jit.Ptr(_VP, 8)) // MOVQ BX, 8(VP) - self.Emit("MOVQ" , _CX, jit.Ptr(_VP, 16)) // MOVQ CX, 16(VP) - - // because growslice not zero memory {oldcap, newlen} when append et not has ptrdata. - // but we should zero it, avoid decode it as random values. - if rt.UnpackType(p.vt()).PtrData == 0 { - self.Emit("MOVQ" , _CX, _DI) // MOVQ CX, DI - self.Emit("SUBQ" , _BX, _DI) // MOVQ BX, DI - - self.Emit("ADDQ" , jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP) - self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP - self.Emit("MOVQ" , jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX - self.Emit("MOVQ" , _BX, _AX) // MOVQ BX, AX - self.From("MULQ" , _CX) // MULQ CX - self.Emit("ADDQ" , _AX, _VP) // ADDQ AX, VP - - self.Emit("MOVQ" , _DI, _AX) // MOVQ SI, AX - self.From("MULQ" , _CX) // MULQ BX - self.Emit("MOVQ" , _AX, _BX) // ADDQ AX, BX - self.Emit("MOVQ" , _VP, _AX) // MOVQ VP, AX - self.mem_clear_fn(true) // CALL_GO memclr{Has,NoHeap} - self.Sjmp("JMP", "_append_slice_end_{n}") - } - - self.Emit("MOVQ" , _BX, _AX) // MOVQ BX, AX - self.Link("_index_{n}") // _index_{n}: - self.Emit("ADDQ" , jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP) - self.Emit("MOVQ" , jit.Ptr(_VP, 0), _VP) // MOVQ (VP), VP - self.Emit("MOVQ" , jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX - self.From("MULQ" , _CX) // MULQ CX - self.Emit("ADDQ" , _AX, _VP) // ADDQ AX, VP - self.Link("_append_slice_end_{n}") + self.Emit("MOVQ", jit.Ptr(_VP, 8), _AX) // MOVQ 8(VP), AX + self.Emit("CMPQ", _AX, jit.Ptr(_VP, 16)) // CMPQ AX, 16(VP) + self.Sjmp("JB", "_index_{n}") // JB _index_{n} + self.Emit("MOVQ", _AX, _SI) // MOVQ AX, SI + self.Emit("SHLQ", jit.Imm(1), _SI) // SHLQ $1, SI + self.Emit("MOVQ", jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX + self.Emit("MOVQ", jit.Ptr(_VP, 0), _BX) // MOVQ (VP), BX + self.Emit("MOVQ", jit.Ptr(_VP, 8), _CX) // MOVQ 8(VP), CX + self.Emit("MOVQ", jit.Ptr(_VP, 16), _DI) // MOVQ 16(VP), DI + self.call_go(_F_growslice) // CALL_GO growslice + self.WritePtrAX(8, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Emit("MOVQ", _BX, jit.Ptr(_VP, 8)) // MOVQ BX, 8(VP) + self.Emit("MOVQ", _CX, jit.Ptr(_VP, 16)) // MOVQ CX, 16(VP) + + // because growslice not zero memory {oldcap, newlen} when append et not has ptrdata. + // but we should zero it, avoid decode it as random values. + if rt.UnpackType(p.vt()).PtrData == 0 { + self.Emit("MOVQ", _CX, _DI) // MOVQ CX, DI + self.Emit("SUBQ", _BX, _DI) // MOVQ BX, DI + + self.Emit("ADDQ", jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP) + self.Emit("MOVQ", _AX, _VP) // MOVQ AX, VP + self.Emit("MOVQ", jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX + self.Emit("MOVQ", _BX, _AX) // MOVQ BX, AX + self.From("MULQ", _CX) // MULQ CX + self.Emit("ADDQ", _AX, _VP) // ADDQ AX, VP + + self.Emit("MOVQ", _DI, _AX) // MOVQ SI, AX + self.From("MULQ", _CX) // MULQ BX + self.Emit("MOVQ", _AX, _BX) // ADDQ AX, BX + self.Emit("MOVQ", _VP, _AX) // MOVQ VP, AX + self.mem_clear_fn(true) // CALL_GO memclr{Has,NoHeap} + self.Sjmp("JMP", "_append_slice_end_{n}") + } + + self.Emit("MOVQ", _BX, _AX) // MOVQ BX, AX + self.Link("_index_{n}") // _index_{n}: + self.Emit("ADDQ", jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP) + self.Emit("MOVQ", jit.Ptr(_VP, 0), _VP) // MOVQ (VP), VP + self.Emit("MOVQ", jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX + self.From("MULQ", _CX) // MULQ CX + self.Emit("ADDQ", _AX, _VP) // ADDQ AX, VP + self.Link("_append_slice_end_{n}") } func (self *_Assembler) _asm_OP_object_skip(_ *_Instr) { - self.call_sf(_F_skip_object) // CALL_SF skip_object - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + self.call_sf(_F_skip_object) // CALL_SF skip_object + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v } func (self *_Assembler) _asm_OP_object_next(_ *_Instr) { - self.call_sf(_F_skip_one) // CALL_SF skip_one - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parse_error_v } func (self *_Assembler) _asm_OP_struct_field(p *_Instr) { - assert_eq(caching.FieldEntrySize, 32, "invalid field entry size") - self.Emit("MOVQ" , jit.Imm(-1), _AX) // MOVQ $-1, AX - self.Emit("MOVQ" , _AX, _VAR_sr) // MOVQ AX, sr - self.parse_string() // PARSE STRING - self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, false) // UNQUOTE once, sv.p, sv.n - self.Emit("LEAQ" , _ARG_sv, _AX) // LEAQ sv, AX - self.Emit("XORL" , _BX, _BX) // XORL BX, BX - self.call_go(_F_strhash) // CALL_GO strhash - self.Emit("MOVQ" , _AX, _R9) // MOVQ AX, R9 - self.Emit("MOVQ" , jit.Imm(freezeFields(p.vf())), _CX) // MOVQ ${p.vf()}, CX - self.Emit("MOVQ" , jit.Ptr(_CX, caching.FieldMap_b), _SI) // MOVQ FieldMap.b(CX), SI - self.Emit("MOVQ" , jit.Ptr(_CX, caching.FieldMap_N), _CX) // MOVQ FieldMap.N(CX), CX - self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX - self.Sjmp("JZ" , "_try_lowercase_{n}") // JZ _try_lowercase_{n} - self.Link("_loop_{n}") // _loop_{n}: - self.Emit("XORL" , _DX, _DX) // XORL DX, DX - self.From("DIVQ" , _CX) // DIVQ CX - self.Emit("LEAQ" , jit.Ptr(_DX, 1), _AX) // LEAQ 1(DX), AX - self.Emit("SHLQ" , jit.Imm(5), _DX) // SHLQ $5, DX - self.Emit("LEAQ" , jit.Sib(_SI, _DX, 1, 0), _DI) // LEAQ (SI)(DX), DI - self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Hash), _R8) // MOVQ FieldEntry.Hash(DI), R8 - self.Emit("TESTQ", _R8, _R8) // TESTQ R8, R8 - self.Sjmp("JZ" , "_try_lowercase_{n}") // JZ _try_lowercase_{n} - self.Emit("CMPQ" , _R8, _R9) // CMPQ R8, R9 - self.Sjmp("JNE" , "_loop_{n}") // JNE _loop_{n} - self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Name + 8), _DX) // MOVQ FieldEntry.Name+8(DI), DX - self.Emit("CMPQ" , _DX, _ARG_sv_n) // CMPQ DX, sv.n - self.Sjmp("JNE" , "_loop_{n}") // JNE _loop_{n} - self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_ID), _R8) // MOVQ FieldEntry.ID(DI), R8 - self.Emit("MOVQ" , _AX, _VAR_ss_AX) // MOVQ AX, ss.AX - self.Emit("MOVQ" , _CX, _VAR_ss_CX) // MOVQ CX, ss.CX - self.Emit("MOVQ" , _SI, _VAR_ss_SI) // MOVQ SI, ss.SI - self.Emit("MOVQ" , _R8, _VAR_ss_R8) // MOVQ R8, ss.R8 - self.Emit("MOVQ" , _R9, _VAR_ss_R9) // MOVQ R9, ss.R9 - self.Emit("MOVQ" , _ARG_sv_p, _AX) // MOVQ _VAR_sv_p, AX - self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Name), _CX) // MOVQ FieldEntry.Name(DI), CX - self.Emit("MOVQ" , _CX, _BX) // MOVQ CX, 8(SP) - self.Emit("MOVQ" , _DX, _CX) // MOVQ DX, 16(SP) - self.call_go(_F_memequal) // CALL_GO memequal - self.Emit("MOVB" , _AX, _DX) // MOVB 24(SP), DX - self.Emit("MOVQ" , _VAR_ss_AX, _AX) // MOVQ ss.AX, AX - self.Emit("MOVQ" , _VAR_ss_CX, _CX) // MOVQ ss.CX, CX - self.Emit("MOVQ" , _VAR_ss_SI, _SI) // MOVQ ss.SI, SI - self.Emit("MOVQ" , _VAR_ss_R9, _R9) // MOVQ ss.R9, R9 - self.Emit("TESTB", _DX, _DX) // TESTB DX, DX - self.Sjmp("JZ" , "_loop_{n}") // JZ _loop_{n} - self.Emit("MOVQ" , _VAR_ss_R8, _R8) // MOVQ ss.R8, R8 - self.Emit("MOVQ" , _R8, _VAR_sr) // MOVQ R8, sr - self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n} - self.Link("_try_lowercase_{n}") // _try_lowercase_{n}: - self.Emit("MOVQ" , jit.Imm(referenceFields(p.vf())), _AX) // MOVQ ${p.vf()}, AX - self.Emit("MOVQ", _ARG_sv_p, _BX) // MOVQ sv, BX - self.Emit("MOVQ", _ARG_sv_n, _CX) // MOVQ sv, CX - self.call_go(_F_FieldMap_GetCaseInsensitive) // CALL_GO FieldMap::GetCaseInsensitive - self.Emit("MOVQ" , _AX, _VAR_sr) // MOVQ AX, _VAR_sr - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JNS" , "_end_{n}") // JNS _end_{n} - self.Emit("BTQ" , jit.Imm(_F_disable_unknown), _ARG_fv) // BTQ ${_F_disable_unknown}, fv - self.Sjmp("JC" , _LB_field_error) // JC _field_error - self.Link("_end_{n}") // _end_{n}: + assert_eq(caching.FieldEntrySize, 32, "invalid field entry size") + self.Emit("MOVQ", jit.Imm(-1), _AX) // MOVQ $-1, AX + self.Emit("MOVQ", _AX, _VAR_sr) // MOVQ AX, sr + self.parse_string() // PARSE STRING + self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, false) // UNQUOTE once, sv.p, sv.n + self.Emit("LEAQ", _ARG_sv, _AX) // LEAQ sv, AX + self.Emit("XORL", _BX, _BX) // XORL BX, BX + self.call_go(_F_strhash) // CALL_GO strhash + self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9 + self.Emit("MOVQ", jit.Imm(freezeFields(p.vf())), _CX) // MOVQ ${p.vf()}, CX + self.Emit("MOVQ", jit.Ptr(_CX, caching.FieldMap_b), _SI) // MOVQ FieldMap.b(CX), SI + self.Emit("MOVQ", jit.Ptr(_CX, caching.FieldMap_N), _CX) // MOVQ FieldMap.N(CX), CX + self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX + self.Sjmp("JZ", "_try_lowercase_{n}") // JZ _try_lowercase_{n} + self.Link("_loop_{n}") // _loop_{n}: + self.Emit("XORL", _DX, _DX) // XORL DX, DX + self.From("DIVQ", _CX) // DIVQ CX + self.Emit("LEAQ", jit.Ptr(_DX, 1), _AX) // LEAQ 1(DX), AX + self.Emit("SHLQ", jit.Imm(5), _DX) // SHLQ $5, DX + self.Emit("LEAQ", jit.Sib(_SI, _DX, 1, 0), _DI) // LEAQ (SI)(DX), DI + self.Emit("MOVQ", jit.Ptr(_DI, _Fe_Hash), _R8) // MOVQ FieldEntry.Hash(DI), R8 + self.Emit("TESTQ", _R8, _R8) // TESTQ R8, R8 + self.Sjmp("JZ", "_try_lowercase_{n}") // JZ _try_lowercase_{n} + self.Emit("CMPQ", _R8, _R9) // CMPQ R8, R9 + self.Sjmp("JNE", "_loop_{n}") // JNE _loop_{n} + self.Emit("MOVQ", jit.Ptr(_DI, _Fe_Name+8), _DX) // MOVQ FieldEntry.Name+8(DI), DX + self.Emit("CMPQ", _DX, _ARG_sv_n) // CMPQ DX, sv.n + self.Sjmp("JNE", "_loop_{n}") // JNE _loop_{n} + self.Emit("MOVQ", jit.Ptr(_DI, _Fe_ID), _R8) // MOVQ FieldEntry.ID(DI), R8 + self.Emit("MOVQ", _AX, _VAR_ss_AX) // MOVQ AX, ss.AX + self.Emit("MOVQ", _CX, _VAR_ss_CX) // MOVQ CX, ss.CX + self.Emit("MOVQ", _SI, _VAR_ss_SI) // MOVQ SI, ss.SI + self.Emit("MOVQ", _R8, _VAR_ss_R8) // MOVQ R8, ss.R8 + self.Emit("MOVQ", _R9, _VAR_ss_R9) // MOVQ R9, ss.R9 + self.Emit("MOVQ", _ARG_sv_p, _AX) // MOVQ _VAR_sv_p, AX + self.Emit("MOVQ", jit.Ptr(_DI, _Fe_Name), _CX) // MOVQ FieldEntry.Name(DI), CX + self.Emit("MOVQ", _CX, _BX) // MOVQ CX, 8(SP) + self.Emit("MOVQ", _DX, _CX) // MOVQ DX, 16(SP) + self.call_go(_F_memequal) // CALL_GO memequal + self.Emit("MOVB", _AX, _DX) // MOVB 24(SP), DX + self.Emit("MOVQ", _VAR_ss_AX, _AX) // MOVQ ss.AX, AX + self.Emit("MOVQ", _VAR_ss_CX, _CX) // MOVQ ss.CX, CX + self.Emit("MOVQ", _VAR_ss_SI, _SI) // MOVQ ss.SI, SI + self.Emit("MOVQ", _VAR_ss_R9, _R9) // MOVQ ss.R9, R9 + self.Emit("TESTB", _DX, _DX) // TESTB DX, DX + self.Sjmp("JZ", "_loop_{n}") // JZ _loop_{n} + self.Emit("MOVQ", _VAR_ss_R8, _R8) // MOVQ ss.R8, R8 + self.Emit("MOVQ", _R8, _VAR_sr) // MOVQ R8, sr + self.Sjmp("JMP", "_end_{n}") // JMP _end_{n} + self.Link("_try_lowercase_{n}") // _try_lowercase_{n}: + self.Emit("MOVQ", jit.Imm(referenceFields(p.vf())), _AX) // MOVQ ${p.vf()}, AX + self.Emit("MOVQ", _ARG_sv_p, _BX) // MOVQ sv, BX + self.Emit("MOVQ", _ARG_sv_n, _CX) // MOVQ sv, CX + self.call_go(_F_FieldMap_GetCaseInsensitive) // CALL_GO FieldMap::GetCaseInsensitive + self.Emit("MOVQ", _AX, _VAR_sr) // MOVQ AX, _VAR_sr + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNS", "_end_{n}") // JNS _end_{n} + self.Emit("BTQ", jit.Imm(_F_disable_unknown), _ARG_fv) // BTQ ${_F_disable_unknown}, fv + self.Sjmp("JC", _LB_field_error) // JC _field_error + self.Link("_end_{n}") // _end_{n}: } func (self *_Assembler) _asm_OP_unmarshal(p *_Instr) { - self.unmarshal_json(p.vt(), true) + self.unmarshal_json(p.vt(), true) } func (self *_Assembler) _asm_OP_unmarshal_p(p *_Instr) { - self.unmarshal_json(p.vt(), false) + self.unmarshal_json(p.vt(), false) } func (self *_Assembler) _asm_OP_unmarshal_text(p *_Instr) { - self.unmarshal_text(p.vt(), true) + self.unmarshal_text(p.vt(), true) } func (self *_Assembler) _asm_OP_unmarshal_text_p(p *_Instr) { - self.unmarshal_text(p.vt(), false) + self.unmarshal_text(p.vt(), false) } func (self *_Assembler) _asm_OP_lspace(_ *_Instr) { - self.lspace("_{n}") + self.lspace("_{n}") } func (self *_Assembler) lspace(subfix string) { - var label = "_lspace" + subfix - self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL - self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error - self.Emit("MOVQ" , jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX - self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX - self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' - self.Sjmp("JA" , label) // JA _nospace_{n} - self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX - self.Sjmp("JNC" , label) // JNC _nospace_{n} - - /* test up to 4 characters */ - for i := 0; i < 3; i++ { - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL - self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error - self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX - self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' - self.Sjmp("JA" , label) // JA _nospace_{n} - self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX - self.Sjmp("JNC" , label) // JNC _nospace_{n} - } - - /* handle over to the native function */ - self.Emit("MOVQ" , _IP, _DI) // MOVQ IP, DI - self.Emit("MOVQ" , _IL, _SI) // MOVQ IL, SI - self.Emit("MOVQ" , _IC, _DX) // MOVQ IC, DX - self.callc(_F_lspace) // CALL lspace - self.Emit("TESTQ" , _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , _LB_parsing_error_v) // JS _parsing_error_v - self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL - self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error - self.Emit("MOVQ" , _AX, _IC) // MOVQ AX, IC - self.Link(label) // _nospace_{n}: + var label = "_lspace" + subfix + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE", _LB_eof_error) // JAE _eof_error + self.Emit("MOVQ", jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("CMPQ", _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA", label) // JA _nospace_{n} + self.Emit("BTQ", _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC", label) // JNC _nospace_{n} + + /* test up to 4 characters */ + for i := 0; i < 3; i++ { + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE", _LB_eof_error) // JAE _eof_error + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("CMPQ", _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA", label) // JA _nospace_{n} + self.Emit("BTQ", _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC", label) // JNC _nospace_{n} + } + + /* handle over to the native function */ + self.Emit("MOVQ", _IP, _DI) // MOVQ IP, DI + self.Emit("MOVQ", _IL, _SI) // MOVQ IL, SI + self.Emit("MOVQ", _IC, _DX) // MOVQ IC, DX + self.callc(_F_lspace) // CALL lspace + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", _LB_parsing_error_v) // JS _parsing_error_v + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JAE", _LB_eof_error) // JAE _eof_error + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + self.Link(label) // _nospace_{n}: } func (self *_Assembler) _asm_OP_match_char(p *_Instr) { - self.match_char(p.vb()) + self.match_char(p.vb()) } func (self *_Assembler) match_char(char byte) { - self.check_eof(1) - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(char))) // CMPB (IP)(IC), ${p.vb()} - self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error - self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.check_eof(1) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(char))) // CMPB (IP)(IC), ${p.vb()} + self.Sjmp("JNE", _LB_char_0_error) // JNE _char_0_error + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC } func (self *_Assembler) _asm_OP_check_char(p *_Instr) { - self.check_eof(1) - self.Emit("LEAQ" , jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX - self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} - self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC - self.Xjmp("JE" , p.vi()) // JE {p.vi()} + self.check_eof(1) + self.Emit("LEAQ", jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} + self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC + self.Xjmp("JE", p.vi()) // JE {p.vi()} } func (self *_Assembler) _asm_OP_check_char_0(p *_Instr) { - self.check_eof(1) - self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} - self.Xjmp("JE" , p.vi()) // JE {p.vi()} + self.check_eof(1) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} + self.Xjmp("JE", p.vi()) // JE {p.vi()} } func (self *_Assembler) _asm_OP_add(p *_Instr) { - self.Emit("ADDQ", jit.Imm(int64(p.vi())), _IC) // ADDQ ${p.vi()}, IC + self.Emit("ADDQ", jit.Imm(int64(p.vi())), _IC) // ADDQ ${p.vi()}, IC } func (self *_Assembler) _asm_OP_load(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX - self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _VP) // MOVQ (ST)(AX), VP + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _VP) // MOVQ (ST)(AX), VP } func (self *_Assembler) _asm_OP_save(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX - self.Emit("CMPQ", _CX, jit.Imm(_MaxStackBytes)) // CMPQ CX, ${_MaxStackBytes} - self.Sjmp("JAE" , _LB_stack_error) // JA _stack_error - self.WriteRecNotAX(0 , _VP, jit.Sib(_ST, _CX, 1, 8), false, false) // MOVQ VP, 8(ST)(CX) - self.Emit("ADDQ", jit.Imm(8), _CX) // ADDQ $8, CX - self.Emit("MOVQ", _CX, jit.Ptr(_ST, 0)) // MOVQ CX, (ST) + self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX + self.Emit("CMPQ", _CX, jit.Imm(_MaxStackBytes)) // CMPQ CX, ${_MaxStackBytes} + self.Sjmp("JAE", _LB_stack_error) // JA _stack_error + self.WriteRecNotAX(0, _VP, jit.Sib(_ST, _CX, 1, 8), false, false) // MOVQ VP, 8(ST)(CX) + self.Emit("ADDQ", jit.Imm(8), _CX) // ADDQ $8, CX + self.Emit("MOVQ", _CX, jit.Ptr(_ST, 0)) // MOVQ CX, (ST) } func (self *_Assembler) _asm_OP_drop(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX - self.Emit("SUBQ", jit.Imm(8), _AX) // SUBQ $8, AX - self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP - self.Emit("MOVQ", _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) - self.Emit("XORL", _BX, _BX) // XORL BX, BX - self.Emit("MOVQ", _BX, jit.Sib(_ST, _AX, 1, 8)) // MOVQ BX, 8(ST)(AX) + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("SUBQ", jit.Imm(8), _AX) // SUBQ $8, AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP + self.Emit("MOVQ", _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) + self.Emit("XORL", _BX, _BX) // XORL BX, BX + self.Emit("MOVQ", _BX, jit.Sib(_ST, _AX, 1, 8)) // MOVQ BX, 8(ST)(AX) } func (self *_Assembler) _asm_OP_drop_2(_ *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX - self.Emit("SUBQ" , jit.Imm(16), _AX) // SUBQ $16, AX - self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP - self.Emit("MOVQ" , _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) - self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 - self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX) + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("SUBQ", jit.Imm(16), _AX) // SUBQ $16, AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP + self.Emit("MOVQ", _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) + self.Emit("PXOR", _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX) } func (self *_Assembler) _asm_OP_recurse(p *_Instr) { - self.Emit("MOVQ", jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX - self.decode_dynamic(_AX, _VP) // DECODE AX, VP + self.Emit("MOVQ", jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX + self.decode_dynamic(_AX, _VP) // DECODE AX, VP } func (self *_Assembler) _asm_OP_goto(p *_Instr) { - self.Xjmp("JMP", p.vi()) + self.Xjmp("JMP", p.vi()) } func (self *_Assembler) _asm_OP_switch(p *_Instr) { - self.Emit("MOVQ", _VAR_sr, _AX) // MOVQ sr, AX - self.Emit("CMPQ", _AX, jit.Imm(p.i64())) // CMPQ AX, ${len(p.vs())} - self.Sjmp("JAE" , "_default_{n}") // JAE _default_{n} + self.Emit("MOVQ", _VAR_sr, _AX) // MOVQ sr, AX + self.Emit("CMPQ", _AX, jit.Imm(p.i64())) // CMPQ AX, ${len(p.vs())} + self.Sjmp("JAE", "_default_{n}") // JAE _default_{n} - /* jump table selector */ - self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI - self.Sref("_switch_table_{n}", 4) // .... &_switch_table_{n} - self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX - self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX - self.Rjmp("JMP" , _AX) // JMP AX - self.Link("_switch_table_{n}") // _switch_table_{n}: + /* jump table selector */ + self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI + self.Sref("_switch_table_{n}", 4) // .... &_switch_table_{n} + self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX + self.Emit("ADDQ", _DI, _AX) // ADDQ DI, AX + self.Rjmp("JMP", _AX) // JMP AX + self.Link("_switch_table_{n}") // _switch_table_{n}: - /* generate the jump table */ - for i, v := range p.vs() { - self.Xref(v, int64(-i) * 4) - } + /* generate the jump table */ + for i, v := range p.vs() { + self.Xref(v, int64(-i)*4) + } - /* default case */ - self.Link("_default_{n}") - self.NOP() + /* default case */ + self.Link("_default_{n}") + self.NOP() } func (self *_Assembler) print_gc(i int, p1 *_Instr, p2 *_Instr) { - self.Emit("MOVQ", jit.Imm(int64(p2.op())), _CX)// MOVQ $(p2.op()), 16(SP) - self.Emit("MOVQ", jit.Imm(int64(p1.op())), _BX) // MOVQ $(p1.op()), 8(SP) - self.Emit("MOVQ", jit.Imm(int64(i)), _AX) // MOVQ $(i), (SP) - self.call_go(_F_println) + self.Emit("MOVQ", jit.Imm(int64(p2.op())), _CX) // MOVQ $(p2.op()), 16(SP) + self.Emit("MOVQ", jit.Imm(int64(p1.op())), _BX) // MOVQ $(p1.op()), 8(SP) + self.Emit("MOVQ", jit.Imm(int64(i)), _AX) // MOVQ $(i), (SP) + self.call_go(_F_println) } //go:linkname _runtime_writeBarrier runtime.writeBarrier @@ -1939,54 +1935,54 @@ var _runtime_writeBarrier uintptr func gcWriteBarrierAX() var ( - _V_writeBarrier = jit.Imm(int64(uintptr(unsafe.Pointer(&_runtime_writeBarrier)))) + _V_writeBarrier = jit.Imm(int64(uintptr(unsafe.Pointer(&_runtime_writeBarrier)))) - _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX) + _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX) ) func (self *_Assembler) WritePtrAX(i int, rec obj.Addr, saveDI bool) { - self.Emit("MOVQ", _V_writeBarrier, _R9) - self.Emit("CMPL", jit.Ptr(_R9, 0), jit.Imm(0)) - self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - if saveDI { - self.save(_DI) - } - self.Emit("LEAQ", rec, _DI) - self.call(_F_gcWriteBarrierAX) - if saveDI { - self.load(_DI) - } - self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Emit("MOVQ", _AX, rec) - self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", _V_writeBarrier, _R9) + self.Emit("CMPL", jit.Ptr(_R9, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier"+strconv.Itoa(i)+"_{n}") + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.call(_F_gcWriteBarrierAX) + if saveDI { + self.load(_DI) + } + self.Sjmp("JMP", "_end_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", _AX, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") } func (self *_Assembler) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool, saveAX bool) { - if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { - panic("rec contains AX!") - } - self.Emit("MOVQ", _V_writeBarrier, _R9) - self.Emit("CMPL", jit.Ptr(_R9, 0), jit.Imm(0)) - self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - if saveAX { - self.Emit("XCHGQ", ptr, _AX) - } else { - self.Emit("MOVQ", ptr, _AX) - } - if saveDI { - self.save(_DI) - } - self.Emit("LEAQ", rec, _DI) - self.call(_F_gcWriteBarrierAX) - if saveDI { - self.load(_DI) - } - if saveAX { - self.Emit("XCHGQ", ptr, _AX) - } - self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Emit("MOVQ", ptr, rec) - self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") -} \ No newline at end of file + if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { + panic("rec contains AX!") + } + self.Emit("MOVQ", _V_writeBarrier, _R9) + self.Emit("CMPL", jit.Ptr(_R9, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier"+strconv.Itoa(i)+"_{n}") + if saveAX { + self.Emit("XCHGQ", ptr, _AX) + } else { + self.Emit("MOVQ", ptr, _AX) + } + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.call(_F_gcWriteBarrierAX) + if saveDI { + self.load(_DI) + } + if saveAX { + self.Emit("XCHGQ", ptr, _AX) + } + self.Sjmp("JMP", "_end_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/compiler.go b/vendor/github.com/bytedance/sonic/internal/decoder/compiler.go index 8f3905fca..04e9f5230 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/compiler.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/compiler.go @@ -17,1139 +17,1270 @@ package decoder import ( - `encoding/json` - `fmt` - `reflect` - `sort` - `strconv` - `strings` - `unsafe` - - `github.com/bytedance/sonic/internal/caching` - `github.com/bytedance/sonic/internal/resolver` - `github.com/bytedance/sonic/internal/rt` - `github.com/bytedance/sonic/option` + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "unsafe" + + "github.com/bytedance/sonic/internal/caching" + "github.com/bytedance/sonic/internal/resolver" + "github.com/bytedance/sonic/internal/rt" + "github.com/bytedance/sonic/option" ) type _Op uint8 const ( - _OP_any _Op = iota + 1 - _OP_dyn - _OP_str - _OP_bin - _OP_bool - _OP_num - _OP_i8 - _OP_i16 - _OP_i32 - _OP_i64 - _OP_u8 - _OP_u16 - _OP_u32 - _OP_u64 - _OP_f32 - _OP_f64 - _OP_unquote - _OP_nil_1 - _OP_nil_2 - _OP_nil_3 - _OP_deref - _OP_index - _OP_is_null - _OP_is_null_quote - _OP_map_init - _OP_map_key_i8 - _OP_map_key_i16 - _OP_map_key_i32 - _OP_map_key_i64 - _OP_map_key_u8 - _OP_map_key_u16 - _OP_map_key_u32 - _OP_map_key_u64 - _OP_map_key_f32 - _OP_map_key_f64 - _OP_map_key_str - _OP_map_key_utext - _OP_map_key_utext_p - _OP_array_skip - _OP_array_clear - _OP_array_clear_p - _OP_slice_init - _OP_slice_append - _OP_object_skip - _OP_object_next - _OP_struct_field - _OP_unmarshal - _OP_unmarshal_p - _OP_unmarshal_text - _OP_unmarshal_text_p - _OP_lspace - _OP_match_char - _OP_check_char - _OP_load - _OP_save - _OP_drop - _OP_drop_2 - _OP_recurse - _OP_goto - _OP_switch - _OP_check_char_0 - _OP_dismatch_err - _OP_go_skip - _OP_add - _OP_check_empty - _OP_debug + _OP_any _Op = iota + 1 + _OP_dyn + _OP_str + _OP_bin + _OP_bool + _OP_num + _OP_i8 + _OP_i16 + _OP_i32 + _OP_i64 + _OP_u8 + _OP_u16 + _OP_u32 + _OP_u64 + _OP_f32 + _OP_f64 + _OP_unquote + _OP_nil_1 + _OP_nil_2 + _OP_nil_3 + _OP_deref + _OP_index + _OP_is_null + _OP_is_null_quote + _OP_map_init + _OP_map_key_i8 + _OP_map_key_i16 + _OP_map_key_i32 + _OP_map_key_i64 + _OP_map_key_u8 + _OP_map_key_u16 + _OP_map_key_u32 + _OP_map_key_u64 + _OP_map_key_f32 + _OP_map_key_f64 + _OP_map_key_str + _OP_map_key_utext + _OP_map_key_utext_p + _OP_array_skip + _OP_array_clear + _OP_array_clear_p + _OP_slice_init + _OP_slice_append + _OP_object_skip + _OP_object_next + _OP_struct_field + _OP_unmarshal + _OP_unmarshal_p + _OP_unmarshal_text + _OP_unmarshal_text_p + _OP_lspace + _OP_match_char + _OP_check_char + _OP_load + _OP_save + _OP_drop + _OP_drop_2 + _OP_recurse + _OP_goto + _OP_switch + _OP_check_char_0 + _OP_dismatch_err + _OP_go_skip + _OP_add + _OP_check_empty + _OP_debug ) const ( - _INT_SIZE = 32 << (^uint(0) >> 63) - _PTR_SIZE = 32 << (^uintptr(0) >> 63) - _PTR_BYTE = unsafe.Sizeof(uintptr(0)) + _INT_SIZE = 32 << (^uint(0) >> 63) + _PTR_SIZE = 32 << (^uintptr(0) >> 63) + _PTR_BYTE = unsafe.Sizeof(uintptr(0)) ) const ( - _MAX_ILBUF = 100000 // cutoff at 100k of IL instructions - _MAX_FIELDS = 50 // cutoff at 50 fields struct + _MAX_ILBUF = 100000 // cutoff at 100k of IL instructions + _MAX_FIELDS = 50 // cutoff at 50 fields struct ) -var _OpNames = [256]string { - _OP_any : "any", - _OP_dyn : "dyn", - _OP_str : "str", - _OP_bin : "bin", - _OP_bool : "bool", - _OP_num : "num", - _OP_i8 : "i8", - _OP_i16 : "i16", - _OP_i32 : "i32", - _OP_i64 : "i64", - _OP_u8 : "u8", - _OP_u16 : "u16", - _OP_u32 : "u32", - _OP_u64 : "u64", - _OP_f32 : "f32", - _OP_f64 : "f64", - _OP_unquote : "unquote", - _OP_nil_1 : "nil_1", - _OP_nil_2 : "nil_2", - _OP_nil_3 : "nil_3", - _OP_deref : "deref", - _OP_index : "index", - _OP_is_null : "is_null", - _OP_is_null_quote : "is_null_quote", - _OP_map_init : "map_init", - _OP_map_key_i8 : "map_key_i8", - _OP_map_key_i16 : "map_key_i16", - _OP_map_key_i32 : "map_key_i32", - _OP_map_key_i64 : "map_key_i64", - _OP_map_key_u8 : "map_key_u8", - _OP_map_key_u16 : "map_key_u16", - _OP_map_key_u32 : "map_key_u32", - _OP_map_key_u64 : "map_key_u64", - _OP_map_key_f32 : "map_key_f32", - _OP_map_key_f64 : "map_key_f64", - _OP_map_key_str : "map_key_str", - _OP_map_key_utext : "map_key_utext", - _OP_map_key_utext_p : "map_key_utext_p", - _OP_array_skip : "array_skip", - _OP_slice_init : "slice_init", - _OP_slice_append : "slice_append", - _OP_object_skip : "object_skip", - _OP_object_next : "object_next", - _OP_struct_field : "struct_field", - _OP_unmarshal : "unmarshal", - _OP_unmarshal_p : "unmarshal_p", - _OP_unmarshal_text : "unmarshal_text", - _OP_unmarshal_text_p : "unmarshal_text_p", - _OP_lspace : "lspace", - _OP_match_char : "match_char", - _OP_check_char : "check_char", - _OP_load : "load", - _OP_save : "save", - _OP_drop : "drop", - _OP_drop_2 : "drop_2", - _OP_recurse : "recurse", - _OP_goto : "goto", - _OP_switch : "switch", - _OP_check_char_0 : "check_char_0", - _OP_dismatch_err : "dismatch_err", - _OP_add : "add", - _OP_go_skip : "go_skip", - _OP_check_empty : "check_empty", - _OP_debug : "debug", +var _OpNames = [256]string{ + _OP_any: "any", + _OP_dyn: "dyn", + _OP_str: "str", + _OP_bin: "bin", + _OP_bool: "bool", + _OP_num: "num", + _OP_i8: "i8", + _OP_i16: "i16", + _OP_i32: "i32", + _OP_i64: "i64", + _OP_u8: "u8", + _OP_u16: "u16", + _OP_u32: "u32", + _OP_u64: "u64", + _OP_f32: "f32", + _OP_f64: "f64", + _OP_unquote: "unquote", + _OP_nil_1: "nil_1", + _OP_nil_2: "nil_2", + _OP_nil_3: "nil_3", + _OP_deref: "deref", + _OP_index: "index", + _OP_is_null: "is_null", + _OP_is_null_quote: "is_null_quote", + _OP_map_init: "map_init", + _OP_map_key_i8: "map_key_i8", + _OP_map_key_i16: "map_key_i16", + _OP_map_key_i32: "map_key_i32", + _OP_map_key_i64: "map_key_i64", + _OP_map_key_u8: "map_key_u8", + _OP_map_key_u16: "map_key_u16", + _OP_map_key_u32: "map_key_u32", + _OP_map_key_u64: "map_key_u64", + _OP_map_key_f32: "map_key_f32", + _OP_map_key_f64: "map_key_f64", + _OP_map_key_str: "map_key_str", + _OP_map_key_utext: "map_key_utext", + _OP_map_key_utext_p: "map_key_utext_p", + _OP_array_skip: "array_skip", + _OP_slice_init: "slice_init", + _OP_slice_append: "slice_append", + _OP_object_skip: "object_skip", + _OP_object_next: "object_next", + _OP_struct_field: "struct_field", + _OP_unmarshal: "unmarshal", + _OP_unmarshal_p: "unmarshal_p", + _OP_unmarshal_text: "unmarshal_text", + _OP_unmarshal_text_p: "unmarshal_text_p", + _OP_lspace: "lspace", + _OP_match_char: "match_char", + _OP_check_char: "check_char", + _OP_load: "load", + _OP_save: "save", + _OP_drop: "drop", + _OP_drop_2: "drop_2", + _OP_recurse: "recurse", + _OP_goto: "goto", + _OP_switch: "switch", + _OP_check_char_0: "check_char_0", + _OP_dismatch_err: "dismatch_err", + _OP_add: "add", + _OP_go_skip: "go_skip", + _OP_check_empty: "check_empty", + _OP_debug: "debug", } func (self _Op) String() string { - if ret := _OpNames[self]; ret != "" { - return ret - } else { - return "<invalid>" - } + if ret := _OpNames[self]; ret != "" { + return ret + } else { + return "<invalid>" + } } func _OP_int() _Op { - switch _INT_SIZE { - case 32: return _OP_i32 - case 64: return _OP_i64 - default: panic("unsupported int size") - } + switch _INT_SIZE { + case 32: + return _OP_i32 + case 64: + return _OP_i64 + default: + panic("unsupported int size") + } } func _OP_uint() _Op { - switch _INT_SIZE { - case 32: return _OP_u32 - case 64: return _OP_u64 - default: panic("unsupported uint size") - } + switch _INT_SIZE { + case 32: + return _OP_u32 + case 64: + return _OP_u64 + default: + panic("unsupported uint size") + } } func _OP_uintptr() _Op { - switch _PTR_SIZE { - case 32: return _OP_u32 - case 64: return _OP_u64 - default: panic("unsupported pointer size") - } + switch _PTR_SIZE { + case 32: + return _OP_u32 + case 64: + return _OP_u64 + default: + panic("unsupported pointer size") + } } func _OP_map_key_int() _Op { - switch _INT_SIZE { - case 32: return _OP_map_key_i32 - case 64: return _OP_map_key_i64 - default: panic("unsupported int size") - } + switch _INT_SIZE { + case 32: + return _OP_map_key_i32 + case 64: + return _OP_map_key_i64 + default: + panic("unsupported int size") + } } func _OP_map_key_uint() _Op { - switch _INT_SIZE { - case 32: return _OP_map_key_u32 - case 64: return _OP_map_key_u64 - default: panic("unsupported uint size") - } + switch _INT_SIZE { + case 32: + return _OP_map_key_u32 + case 64: + return _OP_map_key_u64 + default: + panic("unsupported uint size") + } } func _OP_map_key_uintptr() _Op { - switch _PTR_SIZE { - case 32: return _OP_map_key_u32 - case 64: return _OP_map_key_u64 - default: panic("unsupported pointer size") - } + switch _PTR_SIZE { + case 32: + return _OP_map_key_u32 + case 64: + return _OP_map_key_u64 + default: + panic("unsupported pointer size") + } } type _Instr struct { - u uint64 // union {op: 8, vb: 8, vi: 48}, iv maybe int or len([]int) - p unsafe.Pointer // maybe GoSlice.Data, *GoType or *caching.FieldMap + u uint64 // union {op: 8, vb: 8, vi: 48}, iv maybe int or len([]int) + p unsafe.Pointer // maybe GoSlice.Data, *GoType or *caching.FieldMap } func packOp(op _Op) uint64 { - return uint64(op) << 56 + return uint64(op) << 56 } func newInsOp(op _Op) _Instr { - return _Instr{u: packOp(op)} + return _Instr{u: packOp(op)} } func newInsVi(op _Op, vi int) _Instr { - return _Instr{u: packOp(op) | rt.PackInt(vi)} + return _Instr{u: packOp(op) | rt.PackInt(vi)} } func newInsVb(op _Op, vb byte) _Instr { - return _Instr{u: packOp(op) | (uint64(vb) << 48)} + return _Instr{u: packOp(op) | (uint64(vb) << 48)} } func newInsVs(op _Op, vs []int) _Instr { - return _Instr { - u: packOp(op) | rt.PackInt(len(vs)), - p: (*rt.GoSlice)(unsafe.Pointer(&vs)).Ptr, - } + return _Instr{ + u: packOp(op) | rt.PackInt(len(vs)), + p: (*rt.GoSlice)(unsafe.Pointer(&vs)).Ptr, + } } func newInsVt(op _Op, vt reflect.Type) _Instr { - return _Instr { - u: packOp(op), - p: unsafe.Pointer(rt.UnpackType(vt)), - } + return _Instr{ + u: packOp(op), + p: unsafe.Pointer(rt.UnpackType(vt)), + } } func newInsVf(op _Op, vf *caching.FieldMap) _Instr { - return _Instr { - u: packOp(op), - p: unsafe.Pointer(vf), - } + return _Instr{ + u: packOp(op), + p: unsafe.Pointer(vf), + } } func (self _Instr) op() _Op { - return _Op(self.u >> 56) + return _Op(self.u >> 56) } func (self _Instr) vi() int { - return rt.UnpackInt(self.u) + return rt.UnpackInt(self.u) } func (self _Instr) vb() byte { - return byte(self.u >> 48) + return byte(self.u >> 48) } func (self _Instr) vs() (v []int) { - (*rt.GoSlice)(unsafe.Pointer(&v)).Ptr = self.p - (*rt.GoSlice)(unsafe.Pointer(&v)).Cap = self.vi() - (*rt.GoSlice)(unsafe.Pointer(&v)).Len = self.vi() - return + (*rt.GoSlice)(unsafe.Pointer(&v)).Ptr = self.p + (*rt.GoSlice)(unsafe.Pointer(&v)).Cap = self.vi() + (*rt.GoSlice)(unsafe.Pointer(&v)).Len = self.vi() + return } func (self _Instr) vf() *caching.FieldMap { - return (*caching.FieldMap)(self.p) + return (*caching.FieldMap)(self.p) } func (self _Instr) vk() reflect.Kind { - return (*rt.GoType)(self.p).Kind() + return (*rt.GoType)(self.p).Kind() } func (self _Instr) vt() reflect.Type { - return (*rt.GoType)(self.p).Pack() + return (*rt.GoType)(self.p).Pack() } func (self _Instr) i64() int64 { - return int64(self.vi()) + return int64(self.vi()) } func (self _Instr) vlen() int { - return int((*rt.GoType)(self.p).Size) + return int((*rt.GoType)(self.p).Size) } func (self _Instr) isBranch() bool { - switch self.op() { - case _OP_goto : fallthrough - case _OP_switch : fallthrough - case _OP_is_null : fallthrough - case _OP_is_null_quote : fallthrough - case _OP_check_char : return true - default : return false - } + switch self.op() { + case _OP_goto: + fallthrough + case _OP_switch: + fallthrough + case _OP_is_null: + fallthrough + case _OP_is_null_quote: + fallthrough + case _OP_check_char: + return true + default: + return false + } } func (self _Instr) disassemble() string { - switch self.op() { - case _OP_dyn : fallthrough - case _OP_deref : fallthrough - case _OP_map_key_i8 : fallthrough - case _OP_map_key_i16 : fallthrough - case _OP_map_key_i32 : fallthrough - case _OP_map_key_i64 : fallthrough - case _OP_map_key_u8 : fallthrough - case _OP_map_key_u16 : fallthrough - case _OP_map_key_u32 : fallthrough - case _OP_map_key_u64 : fallthrough - case _OP_map_key_f32 : fallthrough - case _OP_map_key_f64 : fallthrough - case _OP_map_key_str : fallthrough - case _OP_map_key_utext : fallthrough - case _OP_map_key_utext_p : fallthrough - case _OP_slice_init : fallthrough - case _OP_slice_append : fallthrough - case _OP_unmarshal : fallthrough - case _OP_unmarshal_p : fallthrough - case _OP_unmarshal_text : fallthrough - case _OP_unmarshal_text_p : fallthrough - case _OP_recurse : return fmt.Sprintf("%-18s%s", self.op(), self.vt()) - case _OP_goto : fallthrough - case _OP_is_null_quote : fallthrough - case _OP_is_null : return fmt.Sprintf("%-18sL_%d", self.op(), self.vi()) - case _OP_index : fallthrough - case _OP_array_clear : fallthrough - case _OP_array_clear_p : return fmt.Sprintf("%-18s%d", self.op(), self.vi()) - case _OP_switch : return fmt.Sprintf("%-18s%s", self.op(), self.formatSwitchLabels()) - case _OP_struct_field : return fmt.Sprintf("%-18s%s", self.op(), self.formatStructFields()) - case _OP_match_char : return fmt.Sprintf("%-18s%s", self.op(), strconv.QuoteRune(rune(self.vb()))) - case _OP_check_char : return fmt.Sprintf("%-18sL_%d, %s", self.op(), self.vi(), strconv.QuoteRune(rune(self.vb()))) - default : return self.op().String() - } + switch self.op() { + case _OP_dyn: + fallthrough + case _OP_deref: + fallthrough + case _OP_map_key_i8: + fallthrough + case _OP_map_key_i16: + fallthrough + case _OP_map_key_i32: + fallthrough + case _OP_map_key_i64: + fallthrough + case _OP_map_key_u8: + fallthrough + case _OP_map_key_u16: + fallthrough + case _OP_map_key_u32: + fallthrough + case _OP_map_key_u64: + fallthrough + case _OP_map_key_f32: + fallthrough + case _OP_map_key_f64: + fallthrough + case _OP_map_key_str: + fallthrough + case _OP_map_key_utext: + fallthrough + case _OP_map_key_utext_p: + fallthrough + case _OP_slice_init: + fallthrough + case _OP_slice_append: + fallthrough + case _OP_unmarshal: + fallthrough + case _OP_unmarshal_p: + fallthrough + case _OP_unmarshal_text: + fallthrough + case _OP_unmarshal_text_p: + fallthrough + case _OP_recurse: + return fmt.Sprintf("%-18s%s", self.op(), self.vt()) + case _OP_goto: + fallthrough + case _OP_is_null_quote: + fallthrough + case _OP_is_null: + return fmt.Sprintf("%-18sL_%d", self.op(), self.vi()) + case _OP_index: + fallthrough + case _OP_array_clear: + fallthrough + case _OP_array_clear_p: + return fmt.Sprintf("%-18s%d", self.op(), self.vi()) + case _OP_switch: + return fmt.Sprintf("%-18s%s", self.op(), self.formatSwitchLabels()) + case _OP_struct_field: + return fmt.Sprintf("%-18s%s", self.op(), self.formatStructFields()) + case _OP_match_char: + return fmt.Sprintf("%-18s%s", self.op(), strconv.QuoteRune(rune(self.vb()))) + case _OP_check_char: + return fmt.Sprintf("%-18sL_%d, %s", self.op(), self.vi(), strconv.QuoteRune(rune(self.vb()))) + default: + return self.op().String() + } } func (self _Instr) formatSwitchLabels() string { - var i int - var v int - var m []string + var i int + var v int + var m []string - /* format each label */ - for i, v = range self.vs() { - m = append(m, fmt.Sprintf("%d=L_%d", i, v)) - } + /* format each label */ + for i, v = range self.vs() { + m = append(m, fmt.Sprintf("%d=L_%d", i, v)) + } - /* join them with "," */ - return strings.Join(m, ", ") + /* join them with "," */ + return strings.Join(m, ", ") } func (self _Instr) formatStructFields() string { - var i uint64 - var r []string - var m []struct{i int; n string} - - /* extract all the fields */ - for i = 0; i < self.vf().N; i++ { - if v := self.vf().At(i); v.Hash != 0 { - m = append(m, struct{i int; n string}{i: v.ID, n: v.Name}) - } - } - - /* sort by field name */ - sort.Slice(m, func(i, j int) bool { - return m[i].n < m[j].n - }) - - /* format each field */ - for _, v := range m { - r = append(r, fmt.Sprintf("%s=%d", v.n, v.i)) - } - - /* join them with "," */ - return strings.Join(r, ", ") + var i uint64 + var r []string + var m []struct { + i int + n string + } + + /* extract all the fields */ + for i = 0; i < self.vf().N; i++ { + if v := self.vf().At(i); v.Hash != 0 { + m = append(m, struct { + i int + n string + }{i: v.ID, n: v.Name}) + } + } + + /* sort by field name */ + sort.Slice(m, func(i, j int) bool { + return m[i].n < m[j].n + }) + + /* format each field */ + for _, v := range m { + r = append(r, fmt.Sprintf("%s=%d", v.n, v.i)) + } + + /* join them with "," */ + return strings.Join(r, ", ") } type ( - _Program []_Instr + _Program []_Instr ) func (self _Program) pc() int { - return len(self) + return len(self) } func (self _Program) tag(n int) { - if n >= _MaxStack { - panic("type nesting too deep") - } + if n >= _MaxStack { + panic("type nesting too deep") + } } func (self _Program) pin(i int) { - v := &self[i] - v.u &= 0xffff000000000000 - v.u |= rt.PackInt(self.pc()) + v := &self[i] + v.u &= 0xffff000000000000 + v.u |= rt.PackInt(self.pc()) } func (self _Program) rel(v []int) { - for _, i := range v { - self.pin(i) - } + for _, i := range v { + self.pin(i) + } } func (self *_Program) add(op _Op) { - *self = append(*self, newInsOp(op)) + *self = append(*self, newInsOp(op)) } func (self *_Program) int(op _Op, vi int) { - *self = append(*self, newInsVi(op, vi)) + *self = append(*self, newInsVi(op, vi)) } func (self *_Program) chr(op _Op, vb byte) { - *self = append(*self, newInsVb(op, vb)) + *self = append(*self, newInsVb(op, vb)) } func (self *_Program) tab(op _Op, vs []int) { - *self = append(*self, newInsVs(op, vs)) + *self = append(*self, newInsVs(op, vs)) } func (self *_Program) rtt(op _Op, vt reflect.Type) { - *self = append(*self, newInsVt(op, vt)) + *self = append(*self, newInsVt(op, vt)) } func (self *_Program) fmv(op _Op, vf *caching.FieldMap) { - *self = append(*self, newInsVf(op, vf)) + *self = append(*self, newInsVf(op, vf)) } func (self _Program) disassemble() string { - nb := len(self) - tab := make([]bool, nb + 1) - ret := make([]string, 0, nb + 1) - - /* prescan to get all the labels */ - for _, ins := range self { - if ins.isBranch() { - if ins.op() != _OP_switch { - tab[ins.vi()] = true - } else { - for _, v := range ins.vs() { - tab[v] = true - } - } - } - } - - /* disassemble each instruction */ - for i, ins := range self { - if !tab[i] { - ret = append(ret, "\t" + ins.disassemble()) - } else { - ret = append(ret, fmt.Sprintf("L_%d:\n\t%s", i, ins.disassemble())) - } - } - - /* add the last label, if needed */ - if tab[nb] { - ret = append(ret, fmt.Sprintf("L_%d:", nb)) - } - - /* add an "end" indicator, and join all the strings */ - return strings.Join(append(ret, "\tend"), "\n") + nb := len(self) + tab := make([]bool, nb+1) + ret := make([]string, 0, nb+1) + + /* prescan to get all the labels */ + for _, ins := range self { + if ins.isBranch() { + if ins.op() != _OP_switch { + tab[ins.vi()] = true + } else { + for _, v := range ins.vs() { + tab[v] = true + } + } + } + } + + /* disassemble each instruction */ + for i, ins := range self { + if !tab[i] { + ret = append(ret, "\t"+ins.disassemble()) + } else { + ret = append(ret, fmt.Sprintf("L_%d:\n\t%s", i, ins.disassemble())) + } + } + + /* add the last label, if needed */ + if tab[nb] { + ret = append(ret, fmt.Sprintf("L_%d:", nb)) + } + + /* add an "end" indicator, and join all the strings */ + return strings.Join(append(ret, "\tend"), "\n") } type _Compiler struct { - opts option.CompileOptions - tab map[reflect.Type]bool - rec map[reflect.Type]bool + opts option.CompileOptions + tab map[reflect.Type]bool + rec map[reflect.Type]bool } func newCompiler() *_Compiler { - return &_Compiler { - opts: option.DefaultCompileOptions(), - tab: map[reflect.Type]bool{}, - rec: map[reflect.Type]bool{}, - } + return &_Compiler{ + opts: option.DefaultCompileOptions(), + tab: map[reflect.Type]bool{}, + rec: map[reflect.Type]bool{}, + } } func (self *_Compiler) apply(opts option.CompileOptions) *_Compiler { - self.opts = opts - return self + self.opts = opts + return self } func (self *_Compiler) rescue(ep *error) { - if val := recover(); val != nil { - if err, ok := val.(error); ok { - *ep = err - } else { - panic(val) - } - } + if val := recover(); val != nil { + if err, ok := val.(error); ok { + *ep = err + } else { + panic(val) + } + } } func (self *_Compiler) compile(vt reflect.Type) (ret _Program, err error) { - defer self.rescue(&err) - self.compileOne(&ret, 0, vt) - return + defer self.rescue(&err) + self.compileOne(&ret, 0, vt) + return } func (self *_Compiler) compileOne(p *_Program, sp int, vt reflect.Type) { - /* check for recursive nesting */ - ok := self.tab[vt] - if ok { - p.rtt(_OP_recurse, vt) - return - } - - pt := reflect.PtrTo(vt) - - /* check for `json.Unmarshaler` with pointer receiver */ - if pt.Implements(jsonUnmarshalerType) { - p.rtt(_OP_unmarshal_p, pt) - return - } - - /* check for `json.Unmarshaler` */ - if vt.Implements(jsonUnmarshalerType) { - p.add(_OP_lspace) - self.compileUnmarshalJson(p, vt) - return - } - - /* check for `encoding.TextMarshaler` with pointer receiver */ - if pt.Implements(encodingTextUnmarshalerType) { - p.add(_OP_lspace) - self.compileUnmarshalTextPtr(p, pt) - return - } - - /* check for `encoding.TextUnmarshaler` */ - if vt.Implements(encodingTextUnmarshalerType) { - p.add(_OP_lspace) - self.compileUnmarshalText(p, vt) - return - } - - /* enter the recursion */ - p.add(_OP_lspace) - self.tab[vt] = true - self.compileOps(p, sp, vt) - delete(self.tab, vt) + /* check for recursive nesting */ + ok := self.tab[vt] + if ok { + p.rtt(_OP_recurse, vt) + return + } + + pt := reflect.PtrTo(vt) + + /* check for `json.Unmarshaler` with pointer receiver */ + if pt.Implements(jsonUnmarshalerType) { + p.rtt(_OP_unmarshal_p, pt) + return + } + + /* check for `json.Unmarshaler` */ + if vt.Implements(jsonUnmarshalerType) { + p.add(_OP_lspace) + self.compileUnmarshalJson(p, vt) + return + } + + /* check for `encoding.TextMarshaler` with pointer receiver */ + if pt.Implements(encodingTextUnmarshalerType) { + p.add(_OP_lspace) + self.compileUnmarshalTextPtr(p, pt) + return + } + + /* check for `encoding.TextUnmarshaler` */ + if vt.Implements(encodingTextUnmarshalerType) { + p.add(_OP_lspace) + self.compileUnmarshalText(p, vt) + return + } + + /* enter the recursion */ + p.add(_OP_lspace) + self.tab[vt] = true + self.compileOps(p, sp, vt) + delete(self.tab, vt) } func (self *_Compiler) compileOps(p *_Program, sp int, vt reflect.Type) { - switch vt.Kind() { - case reflect.Bool : self.compilePrimitive (vt, p, _OP_bool) - case reflect.Int : self.compilePrimitive (vt, p, _OP_int()) - case reflect.Int8 : self.compilePrimitive (vt, p, _OP_i8) - case reflect.Int16 : self.compilePrimitive (vt, p, _OP_i16) - case reflect.Int32 : self.compilePrimitive (vt, p, _OP_i32) - case reflect.Int64 : self.compilePrimitive (vt, p, _OP_i64) - case reflect.Uint : self.compilePrimitive (vt, p, _OP_uint()) - case reflect.Uint8 : self.compilePrimitive (vt, p, _OP_u8) - case reflect.Uint16 : self.compilePrimitive (vt, p, _OP_u16) - case reflect.Uint32 : self.compilePrimitive (vt, p, _OP_u32) - case reflect.Uint64 : self.compilePrimitive (vt, p, _OP_u64) - case reflect.Uintptr : self.compilePrimitive (vt, p, _OP_uintptr()) - case reflect.Float32 : self.compilePrimitive (vt, p, _OP_f32) - case reflect.Float64 : self.compilePrimitive (vt, p, _OP_f64) - case reflect.String : self.compileString (p, vt) - case reflect.Array : self.compileArray (p, sp, vt) - case reflect.Interface : self.compileInterface (p, vt) - case reflect.Map : self.compileMap (p, sp, vt) - case reflect.Ptr : self.compilePtr (p, sp, vt) - case reflect.Slice : self.compileSlice (p, sp, vt) - case reflect.Struct : self.compileStruct (p, sp, vt) - default : panic (&json.UnmarshalTypeError{Type: vt}) - } + switch vt.Kind() { + case reflect.Bool: + self.compilePrimitive(vt, p, _OP_bool) + case reflect.Int: + self.compilePrimitive(vt, p, _OP_int()) + case reflect.Int8: + self.compilePrimitive(vt, p, _OP_i8) + case reflect.Int16: + self.compilePrimitive(vt, p, _OP_i16) + case reflect.Int32: + self.compilePrimitive(vt, p, _OP_i32) + case reflect.Int64: + self.compilePrimitive(vt, p, _OP_i64) + case reflect.Uint: + self.compilePrimitive(vt, p, _OP_uint()) + case reflect.Uint8: + self.compilePrimitive(vt, p, _OP_u8) + case reflect.Uint16: + self.compilePrimitive(vt, p, _OP_u16) + case reflect.Uint32: + self.compilePrimitive(vt, p, _OP_u32) + case reflect.Uint64: + self.compilePrimitive(vt, p, _OP_u64) + case reflect.Uintptr: + self.compilePrimitive(vt, p, _OP_uintptr()) + case reflect.Float32: + self.compilePrimitive(vt, p, _OP_f32) + case reflect.Float64: + self.compilePrimitive(vt, p, _OP_f64) + case reflect.String: + self.compileString(p, vt) + case reflect.Array: + self.compileArray(p, sp, vt) + case reflect.Interface: + self.compileInterface(p, vt) + case reflect.Map: + self.compileMap(p, sp, vt) + case reflect.Ptr: + self.compilePtr(p, sp, vt) + case reflect.Slice: + self.compileSlice(p, sp, vt) + case reflect.Struct: + self.compileStruct(p, sp, vt) + default: + panic(&json.UnmarshalTypeError{Type: vt}) + } } func (self *_Compiler) compileMap(p *_Program, sp int, vt reflect.Type) { - if reflect.PtrTo(vt.Key()).Implements(encodingTextUnmarshalerType) { - self.compileMapOp(p, sp, vt, _OP_map_key_utext_p) - } else if vt.Key().Implements(encodingTextUnmarshalerType) { - self.compileMapOp(p, sp, vt, _OP_map_key_utext) - } else { - self.compileMapUt(p, sp, vt) - } + if reflect.PtrTo(vt.Key()).Implements(encodingTextUnmarshalerType) { + self.compileMapOp(p, sp, vt, _OP_map_key_utext_p) + } else if vt.Key().Implements(encodingTextUnmarshalerType) { + self.compileMapOp(p, sp, vt, _OP_map_key_utext) + } else { + self.compileMapUt(p, sp, vt) + } } func (self *_Compiler) compileMapUt(p *_Program, sp int, vt reflect.Type) { - switch vt.Key().Kind() { - case reflect.Int : self.compileMapOp(p, sp, vt, _OP_map_key_int()) - case reflect.Int8 : self.compileMapOp(p, sp, vt, _OP_map_key_i8) - case reflect.Int16 : self.compileMapOp(p, sp, vt, _OP_map_key_i16) - case reflect.Int32 : self.compileMapOp(p, sp, vt, _OP_map_key_i32) - case reflect.Int64 : self.compileMapOp(p, sp, vt, _OP_map_key_i64) - case reflect.Uint : self.compileMapOp(p, sp, vt, _OP_map_key_uint()) - case reflect.Uint8 : self.compileMapOp(p, sp, vt, _OP_map_key_u8) - case reflect.Uint16 : self.compileMapOp(p, sp, vt, _OP_map_key_u16) - case reflect.Uint32 : self.compileMapOp(p, sp, vt, _OP_map_key_u32) - case reflect.Uint64 : self.compileMapOp(p, sp, vt, _OP_map_key_u64) - case reflect.Uintptr : self.compileMapOp(p, sp, vt, _OP_map_key_uintptr()) - case reflect.Float32 : self.compileMapOp(p, sp, vt, _OP_map_key_f32) - case reflect.Float64 : self.compileMapOp(p, sp, vt, _OP_map_key_f64) - case reflect.String : self.compileMapOp(p, sp, vt, _OP_map_key_str) - default : panic(&json.UnmarshalTypeError{Type: vt}) - } + switch vt.Key().Kind() { + case reflect.Int: + self.compileMapOp(p, sp, vt, _OP_map_key_int()) + case reflect.Int8: + self.compileMapOp(p, sp, vt, _OP_map_key_i8) + case reflect.Int16: + self.compileMapOp(p, sp, vt, _OP_map_key_i16) + case reflect.Int32: + self.compileMapOp(p, sp, vt, _OP_map_key_i32) + case reflect.Int64: + self.compileMapOp(p, sp, vt, _OP_map_key_i64) + case reflect.Uint: + self.compileMapOp(p, sp, vt, _OP_map_key_uint()) + case reflect.Uint8: + self.compileMapOp(p, sp, vt, _OP_map_key_u8) + case reflect.Uint16: + self.compileMapOp(p, sp, vt, _OP_map_key_u16) + case reflect.Uint32: + self.compileMapOp(p, sp, vt, _OP_map_key_u32) + case reflect.Uint64: + self.compileMapOp(p, sp, vt, _OP_map_key_u64) + case reflect.Uintptr: + self.compileMapOp(p, sp, vt, _OP_map_key_uintptr()) + case reflect.Float32: + self.compileMapOp(p, sp, vt, _OP_map_key_f32) + case reflect.Float64: + self.compileMapOp(p, sp, vt, _OP_map_key_f64) + case reflect.String: + self.compileMapOp(p, sp, vt, _OP_map_key_str) + default: + panic(&json.UnmarshalTypeError{Type: vt}) + } } func (self *_Compiler) compileMapOp(p *_Program, sp int, vt reflect.Type, op _Op) { - i := p.pc() - p.add(_OP_is_null) - p.tag(sp + 1) - skip := self.checkIfSkip(p, vt, '{') - p.add(_OP_save) - p.add(_OP_map_init) - p.add(_OP_save) - p.add(_OP_lspace) - j := p.pc() - p.chr(_OP_check_char, '}') - p.chr(_OP_match_char, '"') - skip2 := p.pc() - p.rtt(op, vt) - - /* match the value separator */ - p.add(_OP_lspace) - p.chr(_OP_match_char, ':') - self.compileOne(p, sp + 2, vt.Elem()) - p.pin(skip2) - p.add(_OP_load) - k0 := p.pc() - p.add(_OP_lspace) - k1 := p.pc() - p.chr(_OP_check_char, '}') - p.chr(_OP_match_char, ',') - p.add(_OP_lspace) - p.chr(_OP_match_char, '"') - skip3 := p.pc() - p.rtt(op, vt) - - /* match the value separator */ - p.add(_OP_lspace) - p.chr(_OP_match_char, ':') - self.compileOne(p, sp + 2, vt.Elem()) - p.pin(skip3) - p.add(_OP_load) - p.int(_OP_goto, k0) - p.pin(j) - p.pin(k1) - p.add(_OP_drop_2) - x := p.pc() - p.add(_OP_goto) - p.pin(i) - p.add(_OP_nil_1) - p.pin(skip) - p.pin(x) + i := p.pc() + p.add(_OP_is_null) + p.tag(sp + 1) + skip := self.checkIfSkip(p, vt, '{') + p.add(_OP_save) + p.add(_OP_map_init) + p.add(_OP_save) + p.add(_OP_lspace) + j := p.pc() + p.chr(_OP_check_char, '}') + p.chr(_OP_match_char, '"') + skip2 := p.pc() + p.rtt(op, vt) + + /* match the value separator */ + p.add(_OP_lspace) + p.chr(_OP_match_char, ':') + self.compileOne(p, sp+2, vt.Elem()) + p.pin(skip2) + p.add(_OP_load) + k0 := p.pc() + p.add(_OP_lspace) + k1 := p.pc() + p.chr(_OP_check_char, '}') + p.chr(_OP_match_char, ',') + p.add(_OP_lspace) + p.chr(_OP_match_char, '"') + skip3 := p.pc() + p.rtt(op, vt) + + /* match the value separator */ + p.add(_OP_lspace) + p.chr(_OP_match_char, ':') + self.compileOne(p, sp+2, vt.Elem()) + p.pin(skip3) + p.add(_OP_load) + p.int(_OP_goto, k0) + p.pin(j) + p.pin(k1) + p.add(_OP_drop_2) + x := p.pc() + p.add(_OP_goto) + p.pin(i) + p.add(_OP_nil_1) + p.pin(skip) + p.pin(x) } func (self *_Compiler) compilePtr(p *_Program, sp int, et reflect.Type) { - i := p.pc() - p.add(_OP_is_null) - - /* dereference all the way down */ - for et.Kind() == reflect.Ptr { - if et.Implements(jsonUnmarshalerType) { - p.rtt(_OP_unmarshal_p, et) - return - } - - if et.Implements(encodingTextUnmarshalerType) { - p.add(_OP_lspace) - self.compileUnmarshalTextPtr(p, et) - return - } - - et = et.Elem() - p.rtt(_OP_deref, et) - } - - /* check for recursive nesting */ - ok := self.tab[et] - if ok { - p.rtt(_OP_recurse, et) - } else { - /* enter the recursion */ - p.add(_OP_lspace) - self.tab[et] = true - - /* not inline the pointer type - * recursing the defined pointer type's elem will casue issue379. - */ - self.compileOps(p, sp, et) - } - delete(self.tab, et) - - j := p.pc() - p.add(_OP_goto) - p.pin(i) - p.add(_OP_nil_1) - p.pin(j) + i := p.pc() + p.add(_OP_is_null) + + /* dereference all the way down */ + for et.Kind() == reflect.Ptr { + if et.Implements(jsonUnmarshalerType) { + p.rtt(_OP_unmarshal_p, et) + return + } + + if et.Implements(encodingTextUnmarshalerType) { + p.add(_OP_lspace) + self.compileUnmarshalTextPtr(p, et) + return + } + + et = et.Elem() + p.rtt(_OP_deref, et) + } + + /* check for recursive nesting */ + ok := self.tab[et] + if ok { + p.rtt(_OP_recurse, et) + } else { + /* enter the recursion */ + p.add(_OP_lspace) + self.tab[et] = true + + /* not inline the pointer type + * recursing the defined pointer type's elem will casue issue379. + */ + self.compileOps(p, sp, et) + } + delete(self.tab, et) + + j := p.pc() + p.add(_OP_goto) + p.pin(i) + p.add(_OP_nil_1) + p.pin(j) } func (self *_Compiler) compileArray(p *_Program, sp int, vt reflect.Type) { - x := p.pc() - p.add(_OP_is_null) - p.tag(sp) - skip := self.checkIfSkip(p, vt, '[') - - p.add(_OP_save) - p.add(_OP_lspace) - v := []int{p.pc()} - p.chr(_OP_check_char, ']') - - /* decode every item */ - for i := 1; i <= vt.Len(); i++ { - self.compileOne(p, sp + 1, vt.Elem()) - p.add(_OP_load) - p.int(_OP_index, i * int(vt.Elem().Size())) - p.add(_OP_lspace) - v = append(v, p.pc()) - p.chr(_OP_check_char, ']') - p.chr(_OP_match_char, ',') - } - - /* drop rest of the array */ - p.add(_OP_array_skip) - w := p.pc() - p.add(_OP_goto) - p.rel(v) - - /* check for pointer data */ - if rt.UnpackType(vt.Elem()).PtrData == 0 { - p.int(_OP_array_clear, int(vt.Size())) - } else { - p.int(_OP_array_clear_p, int(vt.Size())) - } - - /* restore the stack */ - p.pin(w) - p.add(_OP_drop) - - p.pin(skip) - p.pin(x) + x := p.pc() + p.add(_OP_is_null) + p.tag(sp) + skip := self.checkIfSkip(p, vt, '[') + + p.add(_OP_save) + p.add(_OP_lspace) + v := []int{p.pc()} + p.chr(_OP_check_char, ']') + + /* decode every item */ + for i := 1; i <= vt.Len(); i++ { + self.compileOne(p, sp+1, vt.Elem()) + p.add(_OP_load) + p.int(_OP_index, i*int(vt.Elem().Size())) + p.add(_OP_lspace) + v = append(v, p.pc()) + p.chr(_OP_check_char, ']') + p.chr(_OP_match_char, ',') + } + + /* drop rest of the array */ + p.add(_OP_array_skip) + w := p.pc() + p.add(_OP_goto) + p.rel(v) + + /* check for pointer data */ + if rt.UnpackType(vt.Elem()).PtrData == 0 { + p.int(_OP_array_clear, int(vt.Size())) + } else { + p.int(_OP_array_clear_p, int(vt.Size())) + } + + /* restore the stack */ + p.pin(w) + p.add(_OP_drop) + + p.pin(skip) + p.pin(x) } func (self *_Compiler) compileSlice(p *_Program, sp int, vt reflect.Type) { - if vt.Elem().Kind() == byteType.Kind() { - self.compileSliceBin(p, sp, vt) - } else { - self.compileSliceList(p, sp, vt) - } + if vt.Elem().Kind() == byteType.Kind() { + self.compileSliceBin(p, sp, vt) + } else { + self.compileSliceList(p, sp, vt) + } } func (self *_Compiler) compileSliceBin(p *_Program, sp int, vt reflect.Type) { - i := p.pc() - p.add(_OP_is_null) - j := p.pc() - p.chr(_OP_check_char, '[') - skip := self.checkIfSkip(p, vt, '"') - k := p.pc() - p.chr(_OP_check_char, '"') - p.add(_OP_bin) - x := p.pc() - p.add(_OP_goto) - p.pin(j) - self.compileSliceBody(p, sp, vt.Elem()) - y := p.pc() - p.add(_OP_goto) - p.pin(i) - p.pin(k) - p.add(_OP_nil_3) - p.pin(x) - p.pin(skip) - p.pin(y) + i := p.pc() + p.add(_OP_is_null) + j := p.pc() + p.chr(_OP_check_char, '[') + skip := self.checkIfSkip(p, vt, '"') + k := p.pc() + p.chr(_OP_check_char, '"') + p.add(_OP_bin) + x := p.pc() + p.add(_OP_goto) + p.pin(j) + self.compileSliceBody(p, sp, vt.Elem()) + y := p.pc() + p.add(_OP_goto) + p.pin(i) + p.pin(k) + p.add(_OP_nil_3) + p.pin(x) + p.pin(skip) + p.pin(y) } func (self *_Compiler) compileSliceList(p *_Program, sp int, vt reflect.Type) { - i := p.pc() - p.add(_OP_is_null) - p.tag(sp) - skip := self.checkIfSkip(p, vt, '[') - self.compileSliceBody(p, sp, vt.Elem()) - x := p.pc() - p.add(_OP_goto) - p.pin(i) - p.add(_OP_nil_3) - p.pin(x) - p.pin(skip) + i := p.pc() + p.add(_OP_is_null) + p.tag(sp) + skip := self.checkIfSkip(p, vt, '[') + self.compileSliceBody(p, sp, vt.Elem()) + x := p.pc() + p.add(_OP_goto) + p.pin(i) + p.add(_OP_nil_3) + p.pin(x) + p.pin(skip) } func (self *_Compiler) compileSliceBody(p *_Program, sp int, et reflect.Type) { - p.add(_OP_lspace) - j := p.pc() - p.chr(_OP_check_empty, ']') - p.rtt(_OP_slice_init, et) - p.add(_OP_save) - p.rtt(_OP_slice_append, et) - self.compileOne(p, sp + 1, et) - p.add(_OP_load) - k0 := p.pc() - p.add(_OP_lspace) - k1 := p.pc() - p.chr(_OP_check_char, ']') - p.chr(_OP_match_char, ',') - p.rtt(_OP_slice_append, et) - self.compileOne(p, sp + 1, et) - p.add(_OP_load) - p.int(_OP_goto, k0) - p.pin(k1) - p.add(_OP_drop) - p.pin(j) + p.add(_OP_lspace) + j := p.pc() + p.chr(_OP_check_empty, ']') + p.rtt(_OP_slice_init, et) + p.add(_OP_save) + p.rtt(_OP_slice_append, et) + self.compileOne(p, sp+1, et) + p.add(_OP_load) + k0 := p.pc() + p.add(_OP_lspace) + k1 := p.pc() + p.chr(_OP_check_char, ']') + p.chr(_OP_match_char, ',') + p.rtt(_OP_slice_append, et) + self.compileOne(p, sp+1, et) + p.add(_OP_load) + p.int(_OP_goto, k0) + p.pin(k1) + p.add(_OP_drop) + p.pin(j) } func (self *_Compiler) compileString(p *_Program, vt reflect.Type) { - if vt == jsonNumberType { - self.compilePrimitive(vt, p, _OP_num) - } else { - self.compileStringBody(vt, p) - } + if vt == jsonNumberType { + self.compilePrimitive(vt, p, _OP_num) + } else { + self.compileStringBody(vt, p) + } } func (self *_Compiler) compileStringBody(vt reflect.Type, p *_Program) { - i := p.pc() - p.add(_OP_is_null) - skip := self.checkIfSkip(p, vt, '"') - p.add(_OP_str) - p.pin(i) - p.pin(skip) + i := p.pc() + p.add(_OP_is_null) + skip := self.checkIfSkip(p, vt, '"') + p.add(_OP_str) + p.pin(i) + p.pin(skip) } func (self *_Compiler) compileStruct(p *_Program, sp int, vt reflect.Type) { - if sp >= self.opts.MaxInlineDepth || p.pc() >= _MAX_ILBUF || (sp > 0 && vt.NumField() >= _MAX_FIELDS) { - p.rtt(_OP_recurse, vt) - if self.opts.RecursiveDepth > 0 { - self.rec[vt] = true - } - } else { - self.compileStructBody(p, sp, vt) - } + if sp >= self.opts.MaxInlineDepth || p.pc() >= _MAX_ILBUF || (sp > 0 && vt.NumField() >= _MAX_FIELDS) { + p.rtt(_OP_recurse, vt) + if self.opts.RecursiveDepth > 0 { + self.rec[vt] = true + } + } else { + self.compileStructBody(p, sp, vt) + } } func (self *_Compiler) compileStructBody(p *_Program, sp int, vt reflect.Type) { - fv := resolver.ResolveStruct(vt) - fm, sw := caching.CreateFieldMap(len(fv)), make([]int, len(fv)) - - /* start of object */ - p.tag(sp) - n := p.pc() - p.add(_OP_is_null) - - skip := self.checkIfSkip(p, vt, '{') - - p.add(_OP_save) - p.add(_OP_lspace) - x := p.pc() - p.chr(_OP_check_char, '}') - p.chr(_OP_match_char, '"') - p.fmv(_OP_struct_field, fm) - p.add(_OP_lspace) - p.chr(_OP_match_char, ':') - p.tab(_OP_switch, sw) - p.add(_OP_object_next) - y0 := p.pc() - p.add(_OP_lspace) - y1 := p.pc() - p.chr(_OP_check_char, '}') - p.chr(_OP_match_char, ',') - - /* special case of an empty struct */ - if len(fv) == 0 { - p.add(_OP_object_skip) - goto end_of_object - } - - /* match the remaining fields */ - p.add(_OP_lspace) - p.chr(_OP_match_char, '"') - p.fmv(_OP_struct_field, fm) - p.add(_OP_lspace) - p.chr(_OP_match_char, ':') - p.tab(_OP_switch, sw) - p.add(_OP_object_next) - p.int(_OP_goto, y0) - - /* process each field */ - for i, f := range fv { - sw[i] = p.pc() - fm.Set(f.Name, i) - - /* index to the field */ - for _, o := range f.Path { - if p.int(_OP_index, int(o.Size)); o.Kind == resolver.F_deref { - p.rtt(_OP_deref, o.Type) - } - } - - /* check for "stringnize" option */ - if (f.Opts & resolver.F_stringize) == 0 { - self.compileOne(p, sp + 1, f.Type) - } else { - self.compileStructFieldStr(p, sp + 1, f.Type) - } - - /* load the state, and try next field */ - p.add(_OP_load) - p.int(_OP_goto, y0) - } + fv := resolver.ResolveStruct(vt) + fm, sw := caching.CreateFieldMap(len(fv)), make([]int, len(fv)) + + /* start of object */ + p.tag(sp) + n := p.pc() + p.add(_OP_is_null) + + skip := self.checkIfSkip(p, vt, '{') + + p.add(_OP_save) + p.add(_OP_lspace) + x := p.pc() + p.chr(_OP_check_char, '}') + p.chr(_OP_match_char, '"') + p.fmv(_OP_struct_field, fm) + p.add(_OP_lspace) + p.chr(_OP_match_char, ':') + p.tab(_OP_switch, sw) + p.add(_OP_object_next) + y0 := p.pc() + p.add(_OP_lspace) + y1 := p.pc() + p.chr(_OP_check_char, '}') + p.chr(_OP_match_char, ',') + + /* special case of an empty struct */ + if len(fv) == 0 { + p.add(_OP_object_skip) + goto end_of_object + } + + /* match the remaining fields */ + p.add(_OP_lspace) + p.chr(_OP_match_char, '"') + p.fmv(_OP_struct_field, fm) + p.add(_OP_lspace) + p.chr(_OP_match_char, ':') + p.tab(_OP_switch, sw) + p.add(_OP_object_next) + p.int(_OP_goto, y0) + + /* process each field */ + for i, f := range fv { + sw[i] = p.pc() + fm.Set(f.Name, i) + + /* index to the field */ + for _, o := range f.Path { + if p.int(_OP_index, int(o.Size)); o.Kind == resolver.F_deref { + p.rtt(_OP_deref, o.Type) + } + } + + /* check for "stringnize" option */ + if (f.Opts & resolver.F_stringize) == 0 { + self.compileOne(p, sp+1, f.Type) + } else { + self.compileStructFieldStr(p, sp+1, f.Type) + } + + /* load the state, and try next field */ + p.add(_OP_load) + p.int(_OP_goto, y0) + } end_of_object: - p.pin(x) - p.pin(y1) - p.add(_OP_drop) - p.pin(n) - p.pin(skip) + p.pin(x) + p.pin(y1) + p.add(_OP_drop) + p.pin(n) + p.pin(skip) } func (self *_Compiler) compileStructFieldStr(p *_Program, sp int, vt reflect.Type) { - n1 := -1 - ft := vt - sv := false - - /* dereference the pointer if needed */ - if ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } - - /* check if it can be stringized */ - switch ft.Kind() { - case reflect.Bool : sv = true - case reflect.Int : sv = true - case reflect.Int8 : sv = true - case reflect.Int16 : sv = true - case reflect.Int32 : sv = true - case reflect.Int64 : sv = true - case reflect.Uint : sv = true - case reflect.Uint8 : sv = true - case reflect.Uint16 : sv = true - case reflect.Uint32 : sv = true - case reflect.Uint64 : sv = true - case reflect.Uintptr : sv = true - case reflect.Float32 : sv = true - case reflect.Float64 : sv = true - case reflect.String : sv = true - } - - /* if it's not, ignore the "string" and follow the regular path */ - if !sv { - self.compileOne(p, sp, vt) - return - } - - /* remove the leading space, and match the leading quote */ - vk := vt.Kind() - p.add(_OP_lspace) - n0 := p.pc() - p.add(_OP_is_null) - - skip := self.checkIfSkip(p, stringType, '"') - - /* also check for inner "null" */ - n1 = p.pc() - p.add(_OP_is_null_quote) - - /* dereference the pointer only when it is not null */ - if vk == reflect.Ptr { - vt = vt.Elem() - p.rtt(_OP_deref, vt) - } - - n2 := p.pc() - p.chr(_OP_check_char_0, '"') - - /* string opcode selector */ - _OP_string := func() _Op { - if ft == jsonNumberType { - return _OP_num - } else { - return _OP_unquote - } - } - - /* compile for each type */ - switch vt.Kind() { - case reflect.Bool : p.add(_OP_bool) - case reflect.Int : p.add(_OP_int()) - case reflect.Int8 : p.add(_OP_i8) - case reflect.Int16 : p.add(_OP_i16) - case reflect.Int32 : p.add(_OP_i32) - case reflect.Int64 : p.add(_OP_i64) - case reflect.Uint : p.add(_OP_uint()) - case reflect.Uint8 : p.add(_OP_u8) - case reflect.Uint16 : p.add(_OP_u16) - case reflect.Uint32 : p.add(_OP_u32) - case reflect.Uint64 : p.add(_OP_u64) - case reflect.Uintptr : p.add(_OP_uintptr()) - case reflect.Float32 : p.add(_OP_f32) - case reflect.Float64 : p.add(_OP_f64) - case reflect.String : p.add(_OP_string()) - default : panic("not reachable") - } - - /* the closing quote is not needed when parsing a pure string */ - if vt == jsonNumberType || vt.Kind() != reflect.String { - p.chr(_OP_match_char, '"') - } - - /* pin the `is_null_quote` jump location */ - if n1 != -1 && vk != reflect.Ptr { - p.pin(n1) - } - - /* "null" but not a pointer, act as if the field is not present */ - if vk != reflect.Ptr { - pc2 := p.pc() - p.add(_OP_goto) - p.pin(n2) - p.rtt(_OP_dismatch_err, vt) - p.int(_OP_add, 1) - p.pin(pc2) - p.pin(n0) - return - } - - /* the "null" case of the pointer */ - pc := p.pc() - p.add(_OP_goto) - p.pin(n0) // `is_null` jump location - p.pin(n1) // `is_null_quote` jump location - p.add(_OP_nil_1) - pc2 := p.pc() - p.add(_OP_goto) - p.pin(n2) - p.rtt(_OP_dismatch_err, vt) - p.int(_OP_add, 1) - p.pin(pc) - p.pin(pc2) - p.pin(skip) + n1 := -1 + ft := vt + sv := false + + /* dereference the pointer if needed */ + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + /* check if it can be stringized */ + switch ft.Kind() { + case reflect.Bool: + sv = true + case reflect.Int: + sv = true + case reflect.Int8: + sv = true + case reflect.Int16: + sv = true + case reflect.Int32: + sv = true + case reflect.Int64: + sv = true + case reflect.Uint: + sv = true + case reflect.Uint8: + sv = true + case reflect.Uint16: + sv = true + case reflect.Uint32: + sv = true + case reflect.Uint64: + sv = true + case reflect.Uintptr: + sv = true + case reflect.Float32: + sv = true + case reflect.Float64: + sv = true + case reflect.String: + sv = true + } + + /* if it's not, ignore the "string" and follow the regular path */ + if !sv { + self.compileOne(p, sp, vt) + return + } + + /* remove the leading space, and match the leading quote */ + vk := vt.Kind() + p.add(_OP_lspace) + n0 := p.pc() + p.add(_OP_is_null) + + skip := self.checkIfSkip(p, stringType, '"') + + /* also check for inner "null" */ + n1 = p.pc() + p.add(_OP_is_null_quote) + + /* dereference the pointer only when it is not null */ + if vk == reflect.Ptr { + vt = vt.Elem() + p.rtt(_OP_deref, vt) + } + + n2 := p.pc() + p.chr(_OP_check_char_0, '"') + + /* string opcode selector */ + _OP_string := func() _Op { + if ft == jsonNumberType { + return _OP_num + } else { + return _OP_unquote + } + } + + /* compile for each type */ + switch vt.Kind() { + case reflect.Bool: + p.add(_OP_bool) + case reflect.Int: + p.add(_OP_int()) + case reflect.Int8: + p.add(_OP_i8) + case reflect.Int16: + p.add(_OP_i16) + case reflect.Int32: + p.add(_OP_i32) + case reflect.Int64: + p.add(_OP_i64) + case reflect.Uint: + p.add(_OP_uint()) + case reflect.Uint8: + p.add(_OP_u8) + case reflect.Uint16: + p.add(_OP_u16) + case reflect.Uint32: + p.add(_OP_u32) + case reflect.Uint64: + p.add(_OP_u64) + case reflect.Uintptr: + p.add(_OP_uintptr()) + case reflect.Float32: + p.add(_OP_f32) + case reflect.Float64: + p.add(_OP_f64) + case reflect.String: + p.add(_OP_string()) + default: + panic("not reachable") + } + + /* the closing quote is not needed when parsing a pure string */ + if vt == jsonNumberType || vt.Kind() != reflect.String { + p.chr(_OP_match_char, '"') + } + + /* pin the `is_null_quote` jump location */ + if n1 != -1 && vk != reflect.Ptr { + p.pin(n1) + } + + /* "null" but not a pointer, act as if the field is not present */ + if vk != reflect.Ptr { + pc2 := p.pc() + p.add(_OP_goto) + p.pin(n2) + p.rtt(_OP_dismatch_err, vt) + p.int(_OP_add, 1) + p.pin(pc2) + p.pin(n0) + return + } + + /* the "null" case of the pointer */ + pc := p.pc() + p.add(_OP_goto) + p.pin(n0) // `is_null` jump location + p.pin(n1) // `is_null_quote` jump location + p.add(_OP_nil_1) + pc2 := p.pc() + p.add(_OP_goto) + p.pin(n2) + p.rtt(_OP_dismatch_err, vt) + p.int(_OP_add, 1) + p.pin(pc) + p.pin(pc2) + p.pin(skip) } func (self *_Compiler) compileInterface(p *_Program, vt reflect.Type) { - i := p.pc() - p.add(_OP_is_null) + i := p.pc() + p.add(_OP_is_null) - /* check for empty interface */ - if vt.NumMethod() == 0 { - p.add(_OP_any) - } else { - p.rtt(_OP_dyn, vt) - } + /* check for empty interface */ + if vt.NumMethod() == 0 { + p.add(_OP_any) + } else { + p.rtt(_OP_dyn, vt) + } - /* finish the OpCode */ - j := p.pc() - p.add(_OP_goto) - p.pin(i) - p.add(_OP_nil_2) - p.pin(j) + /* finish the OpCode */ + j := p.pc() + p.add(_OP_goto) + p.pin(i) + p.add(_OP_nil_2) + p.pin(j) } func (self *_Compiler) compilePrimitive(vt reflect.Type, p *_Program, op _Op) { - i := p.pc() - p.add(_OP_is_null) - // skip := self.checkPrimitive(p, vt) - p.add(op) - p.pin(i) - // p.pin(skip) + i := p.pc() + p.add(_OP_is_null) + // skip := self.checkPrimitive(p, vt) + p.add(op) + p.pin(i) + // p.pin(skip) } func (self *_Compiler) compileUnmarshalEnd(p *_Program, vt reflect.Type, i int) { - j := p.pc() - k := vt.Kind() + j := p.pc() + k := vt.Kind() - /* not a pointer */ - if k != reflect.Ptr { - p.pin(i) - return - } + /* not a pointer */ + if k != reflect.Ptr { + p.pin(i) + return + } - /* it seems that in Go JSON library, "null" takes priority over any kind of unmarshaler */ - p.add(_OP_goto) - p.pin(i) - p.add(_OP_nil_1) - p.pin(j) + /* it seems that in Go JSON library, "null" takes priority over any kind of unmarshaler */ + p.add(_OP_goto) + p.pin(i) + p.add(_OP_nil_1) + p.pin(j) } func (self *_Compiler) compileUnmarshalJson(p *_Program, vt reflect.Type) { - i := p.pc() - v := _OP_unmarshal - p.add(_OP_is_null) + i := p.pc() + v := _OP_unmarshal + p.add(_OP_is_null) - /* check for dynamic interface */ - if vt.Kind() == reflect.Interface { - v = _OP_dyn - } + /* check for dynamic interface */ + if vt.Kind() == reflect.Interface { + v = _OP_dyn + } - /* call the unmarshaler */ - p.rtt(v, vt) - self.compileUnmarshalEnd(p, vt, i) + /* call the unmarshaler */ + p.rtt(v, vt) + self.compileUnmarshalEnd(p, vt, i) } func (self *_Compiler) compileUnmarshalText(p *_Program, vt reflect.Type) { - i := p.pc() - v := _OP_unmarshal_text - p.add(_OP_is_null) + i := p.pc() + v := _OP_unmarshal_text + p.add(_OP_is_null) - /* check for dynamic interface */ - if vt.Kind() == reflect.Interface { - v = _OP_dyn - } else { - p.chr(_OP_match_char, '"') - } + /* check for dynamic interface */ + if vt.Kind() == reflect.Interface { + v = _OP_dyn + } else { + p.chr(_OP_match_char, '"') + } - /* call the unmarshaler */ - p.rtt(v, vt) - self.compileUnmarshalEnd(p, vt, i) + /* call the unmarshaler */ + p.rtt(v, vt) + self.compileUnmarshalEnd(p, vt, i) } func (self *_Compiler) compileUnmarshalTextPtr(p *_Program, vt reflect.Type) { - i := p.pc() - p.add(_OP_is_null) - p.chr(_OP_match_char, '"') - p.rtt(_OP_unmarshal_text_p, vt) - p.pin(i) + i := p.pc() + p.add(_OP_is_null) + p.chr(_OP_match_char, '"') + p.rtt(_OP_unmarshal_text_p, vt) + p.pin(i) } func (self *_Compiler) checkIfSkip(p *_Program, vt reflect.Type, c byte) int { - j := p.pc() - p.chr(_OP_check_char_0, c) - p.rtt(_OP_dismatch_err, vt) - s := p.pc() - p.add(_OP_go_skip) - p.pin(j) - p.int(_OP_add, 1) - return s -} \ No newline at end of file + j := p.pc() + p.chr(_OP_check_char_0, c) + p.rtt(_OP_dismatch_err, vt) + s := p.pc() + p.add(_OP_go_skip) + p.pin(j) + p.int(_OP_add, 1) + return s +} diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/debug.go b/vendor/github.com/bytedance/sonic/internal/decoder/debug.go index 9cf3a6a00..da67132d4 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/debug.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/debug.go @@ -17,54 +17,53 @@ package decoder import ( - `os` - `runtime` - `runtime/debug` - `strings` + "os" + "runtime" + "runtime/debug" + "strings" - `github.com/bytedance/sonic/internal/jit` + "github.com/bytedance/sonic/internal/jit" ) - var ( - debugSyncGC = os.Getenv("SONIC_SYNC_GC") != "" - debugAsyncGC = os.Getenv("SONIC_NO_ASYNC_GC") == "" + debugSyncGC = os.Getenv("SONIC_SYNC_GC") != "" + debugAsyncGC = os.Getenv("SONIC_NO_ASYNC_GC") == "" ) var ( - _Instr_End _Instr = newInsOp(_OP_nil_1) + _Instr_End _Instr = newInsOp(_OP_nil_1) - _F_gc = jit.Func(runtime.GC) - _F_force_gc = jit.Func(debug.FreeOSMemory) - _F_println = jit.Func(println_wrapper) - _F_print = jit.Func(print) + _F_gc = jit.Func(runtime.GC) + _F_force_gc = jit.Func(debug.FreeOSMemory) + _F_println = jit.Func(println_wrapper) + _F_print = jit.Func(print) ) -func println_wrapper(i int, op1 int, op2 int){ - println(i, " Intrs ", op1, _OpNames[op1], "next: ", op2, _OpNames[op2]) +func println_wrapper(i int, op1 int, op2 int) { + println(i, " Intrs ", op1, _OpNames[op1], "next: ", op2, _OpNames[op2]) } -func print(i int){ - println(i) +func print(i int) { + println(i) } func (self *_Assembler) force_gc() { - self.call_go(_F_gc) - self.call_go(_F_force_gc) + self.call_go(_F_gc) + self.call_go(_F_force_gc) } func (self *_Assembler) debug_instr(i int, v *_Instr) { - if debugSyncGC { - if (i+1 == len(self.p)) { - self.print_gc(i, v, &_Instr_End) - } else { - next := &(self.p[i+1]) - self.print_gc(i, v, next) - name := _OpNames[next.op()] - if strings.Contains(name, "save") { - return - } - } - self.force_gc() - } -} \ No newline at end of file + if debugSyncGC { + if i+1 == len(self.p) { + self.print_gc(i, v, &_Instr_End) + } else { + next := &(self.p[i+1]) + self.print_gc(i, v, next) + name := _OpNames[next.op()] + if strings.Contains(name, "save") { + return + } + } + self.force_gc() + } +} diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/decoder.go b/vendor/github.com/bytedance/sonic/internal/decoder/decoder.go index 19ad71965..b7875776c 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/decoder.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/decoder.go @@ -17,175 +17,173 @@ package decoder import ( - `unsafe` - `encoding/json` - `reflect` - `runtime` - - `github.com/bytedance/sonic/internal/native` - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/internal/rt` - `github.com/bytedance/sonic/option` - `github.com/bytedance/sonic/utf8` + "encoding/json" + "reflect" + "runtime" + "unsafe" + + "github.com/bytedance/sonic/internal/native" + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/internal/rt" + "github.com/bytedance/sonic/option" + "github.com/bytedance/sonic/utf8" ) const ( - _F_use_int64 = iota - _F_use_number - _F_disable_urc - _F_disable_unknown - _F_copy_string - _F_validate_string - - _F_allow_control = 31 + _F_use_int64 = iota + _F_use_number + _F_disable_urc + _F_disable_unknown + _F_copy_string + _F_validate_string + + _F_allow_control = 31 ) type Options uint64 const ( - OptionUseInt64 Options = 1 << _F_use_int64 - OptionUseNumber Options = 1 << _F_use_number - OptionUseUnicodeErrors Options = 1 << _F_disable_urc - OptionDisableUnknown Options = 1 << _F_disable_unknown - OptionCopyString Options = 1 << _F_copy_string - OptionValidateString Options = 1 << _F_validate_string + OptionUseInt64 Options = 1 << _F_use_int64 + OptionUseNumber Options = 1 << _F_use_number + OptionUseUnicodeErrors Options = 1 << _F_disable_urc + OptionDisableUnknown Options = 1 << _F_disable_unknown + OptionCopyString Options = 1 << _F_copy_string + OptionValidateString Options = 1 << _F_validate_string ) func (self *Decoder) SetOptions(opts Options) { - if (opts & OptionUseNumber != 0) && (opts & OptionUseInt64 != 0) { - panic("can't set OptionUseInt64 and OptionUseNumber both!") - } - self.f = uint64(opts) + if (opts&OptionUseNumber != 0) && (opts&OptionUseInt64 != 0) { + panic("can't set OptionUseInt64 and OptionUseNumber both!") + } + self.f = uint64(opts) } - // Decoder is the decoder context object type Decoder struct { - i int - f uint64 - s string + i int + f uint64 + s string } // NewDecoder creates a new decoder instance. func NewDecoder(s string) *Decoder { - return &Decoder{s: s} + return &Decoder{s: s} } // Pos returns the current decoding position. func (self *Decoder) Pos() int { - return self.i + return self.i } func (self *Decoder) Reset(s string) { - self.s = s - self.i = 0 - // self.f = 0 + self.s = s + self.i = 0 + // self.f = 0 } func (self *Decoder) CheckTrailings() error { - pos := self.i - buf := self.s - /* skip all the trailing spaces */ - if pos != len(buf) { - for pos < len(buf) && (types.SPACE_MASK & (1 << buf[pos])) != 0 { - pos++ - } - } - - /* then it must be at EOF */ - if pos == len(buf) { - return nil - } - - /* junk after JSON value */ - return SyntaxError { - Src : buf, - Pos : pos, - Code : types.ERR_INVALID_CHAR, - } + pos := self.i + buf := self.s + /* skip all the trailing spaces */ + if pos != len(buf) { + for pos < len(buf) && (types.SPACE_MASK&(1<<buf[pos])) != 0 { + pos++ + } + } + + /* then it must be at EOF */ + if pos == len(buf) { + return nil + } + + /* junk after JSON value */ + return SyntaxError{ + Src: buf, + Pos: pos, + Code: types.ERR_INVALID_CHAR, + } } - // Decode parses the JSON-encoded data from current position and stores the result // in the value pointed to by val. func (self *Decoder) Decode(val interface{}) error { - /* validate json if needed */ - if (self.f & (1 << _F_validate_string)) != 0 && !utf8.ValidateString(self.s){ - dbuf := utf8.CorrectWith(nil, rt.Str2Mem(self.s), "\ufffd") - self.s = rt.Mem2Str(dbuf) - } - - vv := rt.UnpackEface(val) - vp := vv.Value - - /* check for nil type */ - if vv.Type == nil { - return &json.InvalidUnmarshalError{} - } - - /* must be a non-nil pointer */ - if vp == nil || vv.Type.Kind() != reflect.Ptr { - return &json.InvalidUnmarshalError{Type: vv.Type.Pack()} - } - - etp := rt.PtrElem(vv.Type) - - /* check the defined pointer type for issue 379 */ - if vv.Type.IsNamed() { - newp := vp - etp = vv.Type - vp = unsafe.Pointer(&newp) - } - - /* create a new stack, and call the decoder */ - sb := newStack() - nb, err := decodeTypedPointer(self.s, self.i, etp, vp, sb, self.f) - /* return the stack back */ - self.i = nb - freeStack(sb) - - /* avoid GC ahead */ - runtime.KeepAlive(vv) - return err + /* validate json if needed */ + if (self.f&(1<<_F_validate_string)) != 0 && !utf8.ValidateString(self.s) { + dbuf := utf8.CorrectWith(nil, rt.Str2Mem(self.s), "\ufffd") + self.s = rt.Mem2Str(dbuf) + } + + vv := rt.UnpackEface(val) + vp := vv.Value + + /* check for nil type */ + if vv.Type == nil { + return &json.InvalidUnmarshalError{} + } + + /* must be a non-nil pointer */ + if vp == nil || vv.Type.Kind() != reflect.Ptr { + return &json.InvalidUnmarshalError{Type: vv.Type.Pack()} + } + + etp := rt.PtrElem(vv.Type) + + /* check the defined pointer type for issue 379 */ + if vv.Type.IsNamed() { + newp := vp + etp = vv.Type + vp = unsafe.Pointer(&newp) + } + + /* create a new stack, and call the decoder */ + sb := newStack() + nb, err := decodeTypedPointer(self.s, self.i, etp, vp, sb, self.f) + /* return the stack back */ + self.i = nb + freeStack(sb) + + /* avoid GC ahead */ + runtime.KeepAlive(vv) + return err } // UseInt64 indicates the Decoder to unmarshal an integer into an interface{} as an // int64 instead of as a float64. func (self *Decoder) UseInt64() { - self.f |= 1 << _F_use_int64 - self.f &^= 1 << _F_use_number + self.f |= 1 << _F_use_int64 + self.f &^= 1 << _F_use_number } // UseNumber indicates the Decoder to unmarshal a number into an interface{} as a // json.Number instead of as a float64. func (self *Decoder) UseNumber() { - self.f &^= 1 << _F_use_int64 - self.f |= 1 << _F_use_number + self.f &^= 1 << _F_use_int64 + self.f |= 1 << _F_use_number } // UseUnicodeErrors indicates the Decoder to return an error when encounter invalid // UTF-8 escape sequences. func (self *Decoder) UseUnicodeErrors() { - self.f |= 1 << _F_disable_urc + self.f |= 1 << _F_disable_urc } // DisallowUnknownFields indicates the Decoder to return an error when the destination // is a struct and the input contains object keys which do not match any // non-ignored, exported fields in the destination. func (self *Decoder) DisallowUnknownFields() { - self.f |= 1 << _F_disable_unknown + self.f |= 1 << _F_disable_unknown } // CopyString indicates the Decoder to decode string values by copying instead of referring. func (self *Decoder) CopyString() { - self.f |= 1 << _F_copy_string + self.f |= 1 << _F_copy_string } -// ValidateString causes the Decoder to validate string values when decoding string value +// ValidateString causes the Decoder to validate string values when decoding string value // in JSON. Validation is that, returning error when unescaped control chars(0x00-0x1f) or // invalid UTF-8 chars in the string value of JSON. func (self *Decoder) ValidateString() { - self.f |= 1 << _F_validate_string + self.f |= 1 << _F_validate_string } // Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in @@ -194,62 +192,62 @@ func (self *Decoder) ValidateString() { // Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is // a compile option to set the depth of recursive compile for the nested struct type. func Pretouch(vt reflect.Type, opts ...option.CompileOption) error { - cfg := option.DefaultCompileOptions() - for _, opt := range opts { - opt(&cfg) - } - return pretouchRec(map[reflect.Type]bool{vt:true}, cfg) + cfg := option.DefaultCompileOptions() + for _, opt := range opts { + opt(&cfg) + } + return pretouchRec(map[reflect.Type]bool{vt: true}, cfg) } func pretouchType(_vt reflect.Type, opts option.CompileOptions) (map[reflect.Type]bool, error) { - /* compile function */ - compiler := newCompiler().apply(opts) - decoder := func(vt *rt.GoType, _ ...interface{}) (interface{}, error) { - if pp, err := compiler.compile(_vt); err != nil { - return nil, err - } else { - as := newAssembler(pp) - as.name = _vt.String() - return as.Load(), nil - } - } - - /* find or compile */ - vt := rt.UnpackType(_vt) - if val := programCache.Get(vt); val != nil { - return nil, nil - } else if _, err := programCache.Compute(vt, decoder); err == nil { - return compiler.rec, nil - } else { - return nil, err - } + /* compile function */ + compiler := newCompiler().apply(opts) + decoder := func(vt *rt.GoType, _ ...interface{}) (interface{}, error) { + if pp, err := compiler.compile(_vt); err != nil { + return nil, err + } else { + as := newAssembler(pp) + as.name = _vt.String() + return as.Load(), nil + } + } + + /* find or compile */ + vt := rt.UnpackType(_vt) + if val := programCache.Get(vt); val != nil { + return nil, nil + } else if _, err := programCache.Compute(vt, decoder); err == nil { + return compiler.rec, nil + } else { + return nil, err + } } func pretouchRec(vtm map[reflect.Type]bool, opts option.CompileOptions) error { - if opts.RecursiveDepth < 0 || len(vtm) == 0 { - return nil - } - next := make(map[reflect.Type]bool) - for vt := range(vtm) { - sub, err := pretouchType(vt, opts) - if err != nil { - return err - } - for svt := range(sub) { - next[svt] = true - } - } - opts.RecursiveDepth -= 1 - return pretouchRec(next, opts) + if opts.RecursiveDepth < 0 || len(vtm) == 0 { + return nil + } + next := make(map[reflect.Type]bool) + for vt := range vtm { + sub, err := pretouchType(vt, opts) + if err != nil { + return err + } + for svt := range sub { + next[svt] = true + } + } + opts.RecursiveDepth -= 1 + return pretouchRec(next, opts) } // Skip skips only one json value, and returns first non-blank character position and its ending position if it is valid. // Otherwise, returns negative error code using start and invalid character position using end func Skip(data []byte) (start int, end int) { - s := rt.Mem2Str(data) - p := 0 - m := types.NewStateMachine() - ret := native.SkipOne(&s, &p, m, uint64(0)) - types.FreeStateMachine(m) - return ret, p -} \ No newline at end of file + s := rt.Mem2Str(data) + p := 0 + m := types.NewStateMachine() + ret := native.SkipOne(&s, &p, m, uint64(0)) + types.FreeStateMachine(m) + return ret, p +} diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/errors.go b/vendor/github.com/bytedance/sonic/internal/decoder/errors.go index c905fdfb0..793e894d5 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/errors.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/errors.go @@ -17,165 +17,171 @@ package decoder import ( - `encoding/json` - `errors` - `fmt` - `reflect` - `strconv` - `strings` - - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/internal/rt` + "encoding/json" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/internal/rt" ) type SyntaxError struct { - Pos int - Src string - Code types.ParsingError - Msg string + Pos int + Src string + Code types.ParsingError + Msg string } func (self SyntaxError) Error() string { - return fmt.Sprintf("%q", self.Description()) + return fmt.Sprintf("%q", self.Description()) } func (self SyntaxError) Description() string { - return "Syntax error " + self.description() + return "Syntax error " + self.description() } func (self SyntaxError) description() string { - i := 16 - p := self.Pos - i - q := self.Pos + i - - /* check for empty source */ - if self.Src == "" { - return fmt.Sprintf("no sources available: %#v", self) - } - - /* prevent slicing before the beginning */ - if p < 0 { - p, q, i = 0, q - p, i + p - } - - /* prevent slicing beyond the end */ - if n := len(self.Src); q > n { - n = q - n - q = len(self.Src) - - /* move the left bound if possible */ - if p > n { - i += n - p -= n - } - } - - /* left and right length */ - x := clamp_zero(i) - y := clamp_zero(q - p - i - 1) - - /* compose the error description */ - return fmt.Sprintf( - "at index %d: %s\n\n\t%s\n\t%s^%s\n", - self.Pos, - self.Message(), - self.Src[p:q], - strings.Repeat(".", x), - strings.Repeat(".", y), - ) + i := 16 + p := self.Pos - i + q := self.Pos + i + + /* check for empty source */ + if self.Src == "" { + return fmt.Sprintf("no sources available: %#v", self) + } + + /* prevent slicing before the beginning */ + if p < 0 { + p, q, i = 0, q-p, i+p + } + + /* prevent slicing beyond the end */ + if n := len(self.Src); q > n { + n = q - n + q = len(self.Src) + + /* move the left bound if possible */ + if p > n { + i += n + p -= n + } + } + + /* left and right length */ + x := clamp_zero(i) + y := clamp_zero(q - p - i - 1) + + /* compose the error description */ + return fmt.Sprintf( + "at index %d: %s\n\n\t%s\n\t%s^%s\n", + self.Pos, + self.Message(), + self.Src[p:q], + strings.Repeat(".", x), + strings.Repeat(".", y), + ) } func (self SyntaxError) Message() string { - if self.Msg == "" { - return self.Code.Message() - } - return self.Msg + if self.Msg == "" { + return self.Code.Message() + } + return self.Msg } func clamp_zero(v int) int { - if v < 0 { - return 0 - } else { - return v - } + if v < 0 { + return 0 + } else { + return v + } } /** JIT Error Helpers **/ -var stackOverflow = &json.UnsupportedValueError { - Str : "Value nesting too deep", - Value : reflect.ValueOf("..."), +var stackOverflow = &json.UnsupportedValueError{ + Str: "Value nesting too deep", + Value: reflect.ValueOf("..."), } //go:nosplit func error_wrap(src string, pos int, code types.ParsingError) error { - return SyntaxError { - Pos : pos, - Src : src, - Code : code, - } + return SyntaxError{ + Pos: pos, + Src: src, + Code: code, + } } //go:nosplit func error_type(vt *rt.GoType) error { - return &json.UnmarshalTypeError{Type: vt.Pack()} + return &json.UnmarshalTypeError{Type: vt.Pack()} } type MismatchTypeError struct { - Pos int - Src string - Type reflect.Type -} - -func swithchJSONType (src string, pos int) string { - var val string - switch src[pos] { - case 'f': fallthrough - case 't': val = "bool" - case '"': val = "string" - case '{': val = "object" - case '[': val = "array" - case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': val = "number" - } - return val + Pos int + Src string + Type reflect.Type +} + +func swithchJSONType(src string, pos int) string { + var val string + switch src[pos] { + case 'f': + fallthrough + case 't': + val = "bool" + case '"': + val = "string" + case '{': + val = "object" + case '[': + val = "array" + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val = "number" + } + return val } func (self MismatchTypeError) Error() string { - se := SyntaxError { - Pos : self.Pos, - Src : self.Src, - Code : types.ERR_MISMATCH, - } - return fmt.Sprintf("Mismatch type %s with value %s %q", self.Type.String(), swithchJSONType(self.Src, self.Pos), se.description()) + se := SyntaxError{ + Pos: self.Pos, + Src: self.Src, + Code: types.ERR_MISMATCH, + } + return fmt.Sprintf("Mismatch type %s with value %s %q", self.Type.String(), swithchJSONType(self.Src, self.Pos), se.description()) } func (self MismatchTypeError) Description() string { - se := SyntaxError { - Pos : self.Pos, - Src : self.Src, - Code : types.ERR_MISMATCH, - } - return fmt.Sprintf("Mismatch type %s with value %s %s", self.Type.String(), swithchJSONType(self.Src, self.Pos), se.description()) + se := SyntaxError{ + Pos: self.Pos, + Src: self.Src, + Code: types.ERR_MISMATCH, + } + return fmt.Sprintf("Mismatch type %s with value %s %s", self.Type.String(), swithchJSONType(self.Src, self.Pos), se.description()) } //go:nosplit func error_mismatch(src string, pos int, vt *rt.GoType) error { - return &MismatchTypeError { - Pos : pos, - Src : src, - Type : vt.Pack(), - } + return &MismatchTypeError{ + Pos: pos, + Src: src, + Type: vt.Pack(), + } } //go:nosplit func error_field(name string) error { - return errors.New("json: unknown field " + strconv.Quote(name)) + return errors.New("json: unknown field " + strconv.Quote(name)) } //go:nosplit func error_value(value string, vtype reflect.Type) error { - return &json.UnmarshalTypeError { - Type : vtype, - Value : value, - } + return &json.UnmarshalTypeError{ + Type: vtype, + Value: value, + } } diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/generic_amd64_go116.go b/vendor/github.com/bytedance/sonic/internal/decoder/generic_amd64_go116.go index b597043f9..358b13522 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/generic_amd64_go116.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/generic_amd64_go116.go @@ -1,3 +1,4 @@ +//go:build go1.15 && !go1.17 // +build go1.15,!go1.17 /* @@ -19,16 +20,16 @@ package decoder import ( - `encoding/json` - `fmt` - `reflect` - `strconv` - - `github.com/bytedance/sonic/internal/jit` - `github.com/bytedance/sonic/internal/native` - `github.com/bytedance/sonic/internal/native/types` - `github.com/twitchyliquid64/golang-asm/obj` - `github.com/twitchyliquid64/golang-asm/obj/x86` + "encoding/json" + "fmt" + "reflect" + "strconv" + + "github.com/bytedance/sonic/internal/jit" + "github.com/bytedance/sonic/internal/native" + "github.com/bytedance/sonic/internal/native/types" + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/obj/x86" ) /** Crucial Registers: @@ -43,734 +44,734 @@ import ( */ const ( - _VD_args = 8 // 8 bytes for passing arguments to this functions - _VD_fargs = 64 // 64 bytes for passing arguments to other Go functions - _VD_saves = 40 // 40 bytes for saving the registers before CALL instructions - _VD_locals = 88 // 88 bytes for local variables + _VD_args = 8 // 8 bytes for passing arguments to this functions + _VD_fargs = 64 // 64 bytes for passing arguments to other Go functions + _VD_saves = 40 // 40 bytes for saving the registers before CALL instructions + _VD_locals = 88 // 88 bytes for local variables ) const ( - _VD_offs = _VD_fargs + _VD_saves + _VD_locals - _VD_size = _VD_offs + 8 // 8 bytes for the parent frame pointer + _VD_offs = _VD_fargs + _VD_saves + _VD_locals + _VD_size = _VD_offs + 8 // 8 bytes for the parent frame pointer ) var ( - _VAR_ss = _VAR_ss_Vt - _VAR_df = jit.Ptr(_SP, _VD_fargs + _VD_saves) + _VAR_ss = _VAR_ss_Vt + _VAR_df = jit.Ptr(_SP, _VD_fargs+_VD_saves) ) var ( - _VAR_ss_Vt = jit.Ptr(_SP, _VD_fargs + _VD_saves + 8) - _VAR_ss_Dv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 16) - _VAR_ss_Iv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 24) - _VAR_ss_Ep = jit.Ptr(_SP, _VD_fargs + _VD_saves + 32) - _VAR_ss_Db = jit.Ptr(_SP, _VD_fargs + _VD_saves + 40) - _VAR_ss_Dc = jit.Ptr(_SP, _VD_fargs + _VD_saves + 48) + _VAR_ss_Vt = jit.Ptr(_SP, _VD_fargs+_VD_saves+8) + _VAR_ss_Dv = jit.Ptr(_SP, _VD_fargs+_VD_saves+16) + _VAR_ss_Iv = jit.Ptr(_SP, _VD_fargs+_VD_saves+24) + _VAR_ss_Ep = jit.Ptr(_SP, _VD_fargs+_VD_saves+32) + _VAR_ss_Db = jit.Ptr(_SP, _VD_fargs+_VD_saves+40) + _VAR_ss_Dc = jit.Ptr(_SP, _VD_fargs+_VD_saves+48) ) var ( - _VAR_cs_LR = jit.Ptr(_SP, _VD_fargs + _VD_saves + 56) - _VAR_cs_p = jit.Ptr(_SP, _VD_fargs + _VD_saves + 64) - _VAR_cs_n = jit.Ptr(_SP, _VD_fargs + _VD_saves + 72) - _VAR_cs_d = jit.Ptr(_SP, _VD_fargs + _VD_saves + 80) + _VAR_cs_LR = jit.Ptr(_SP, _VD_fargs+_VD_saves+56) + _VAR_cs_p = jit.Ptr(_SP, _VD_fargs+_VD_saves+64) + _VAR_cs_n = jit.Ptr(_SP, _VD_fargs+_VD_saves+72) + _VAR_cs_d = jit.Ptr(_SP, _VD_fargs+_VD_saves+80) ) type _ValueDecoder struct { - jit.BaseAssembler + jit.BaseAssembler } func (self *_ValueDecoder) build() uintptr { - self.Init(self.compile) - return *(*uintptr)(self.Load("decode_value", _VD_size, _VD_args, argPtrs_generic, localPtrs_generic)) + self.Init(self.compile) + return *(*uintptr)(self.Load("decode_value", _VD_size, _VD_args, argPtrs_generic, localPtrs_generic)) } /** Function Calling Helpers **/ func (self *_ValueDecoder) save(r ...obj.Addr) { - for i, v := range r { - if i > _VD_saves / 8 - 1 { - panic("too many registers to save") - } else { - self.Emit("MOVQ", v, jit.Ptr(_SP, _VD_fargs + int64(i) * 8)) - } - } + for i, v := range r { + if i > _VD_saves/8-1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _VD_fargs+int64(i)*8)) + } + } } func (self *_ValueDecoder) load(r ...obj.Addr) { - for i, v := range r { - if i > _VD_saves / 8 - 1 { - panic("too many registers to load") - } else { - self.Emit("MOVQ", jit.Ptr(_SP, _VD_fargs + int64(i) * 8), v) - } - } + for i, v := range r { + if i > _VD_saves/8-1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _VD_fargs+int64(i)*8), v) + } + } } func (self *_ValueDecoder) call(fn obj.Addr) { - self.Emit("MOVQ", fn, _AX) // MOVQ ${fn}, AX - self.Rjmp("CALL", _AX) // CALL AX + self.Emit("MOVQ", fn, _AX) // MOVQ ${fn}, AX + self.Rjmp("CALL", _AX) // CALL AX } func (self *_ValueDecoder) call_go(fn obj.Addr) { - self.save(_REG_go...) // SAVE $REG_go - self.call(fn) // CALL ${fn} - self.load(_REG_go...) // LOAD $REG_go + self.save(_REG_go...) // SAVE $REG_go + self.call(fn) // CALL ${fn} + self.load(_REG_go...) // LOAD $REG_go } /** Decoder Assembler **/ const ( - _S_val = iota + 1 - _S_arr - _S_arr_0 - _S_obj - _S_obj_0 - _S_obj_delim - _S_obj_sep + _S_val = iota + 1 + _S_arr + _S_arr_0 + _S_obj + _S_obj_0 + _S_obj_delim + _S_obj_sep ) const ( - _S_omask_key = (1 << _S_obj_0) | (1 << _S_obj_sep) - _S_omask_end = (1 << _S_obj_0) | (1 << _S_obj) - _S_vmask = (1 << _S_val) | (1 << _S_arr_0) + _S_omask_key = (1 << _S_obj_0) | (1 << _S_obj_sep) + _S_omask_end = (1 << _S_obj_0) | (1 << _S_obj) + _S_vmask = (1 << _S_val) | (1 << _S_arr_0) ) const ( - _A_init_len = 1 - _A_init_cap = 16 + _A_init_len = 1 + _A_init_cap = 16 ) const ( - _ST_Sp = 0 - _ST_Vt = _PtrBytes - _ST_Vp = _PtrBytes * (types.MAX_RECURSE + 1) + _ST_Sp = 0 + _ST_Vt = _PtrBytes + _ST_Vp = _PtrBytes * (types.MAX_RECURSE + 1) ) var ( - _V_true = jit.Imm(int64(pbool(true))) - _V_false = jit.Imm(int64(pbool(false))) - _F_value = jit.Imm(int64(native.S_value)) + _V_true = jit.Imm(int64(pbool(true))) + _V_false = jit.Imm(int64(pbool(false))) + _F_value = jit.Imm(int64(native.S_value)) ) var ( - _V_max = jit.Imm(int64(types.V_MAX)) - _E_eof = jit.Imm(int64(types.ERR_EOF)) - _E_invalid = jit.Imm(int64(types.ERR_INVALID_CHAR)) - _E_recurse = jit.Imm(int64(types.ERR_RECURSE_EXCEED_MAX)) + _V_max = jit.Imm(int64(types.V_MAX)) + _E_eof = jit.Imm(int64(types.ERR_EOF)) + _E_invalid = jit.Imm(int64(types.ERR_INVALID_CHAR)) + _E_recurse = jit.Imm(int64(types.ERR_RECURSE_EXCEED_MAX)) ) var ( - _F_convTslice = jit.Func(convTslice) - _F_convTstring = jit.Func(convTstring) - _F_invalid_vtype = jit.Func(invalid_vtype) + _F_convTslice = jit.Func(convTslice) + _F_convTstring = jit.Func(convTstring) + _F_invalid_vtype = jit.Func(invalid_vtype) ) var ( - _T_map = jit.Type(reflect.TypeOf((map[string]interface{})(nil))) - _T_bool = jit.Type(reflect.TypeOf(false)) - _T_int64 = jit.Type(reflect.TypeOf(int64(0))) - _T_eface = jit.Type(reflect.TypeOf((*interface{})(nil)).Elem()) - _T_slice = jit.Type(reflect.TypeOf(([]interface{})(nil))) - _T_string = jit.Type(reflect.TypeOf("")) - _T_number = jit.Type(reflect.TypeOf(json.Number(""))) - _T_float64 = jit.Type(reflect.TypeOf(float64(0))) + _T_map = jit.Type(reflect.TypeOf((map[string]interface{})(nil))) + _T_bool = jit.Type(reflect.TypeOf(false)) + _T_int64 = jit.Type(reflect.TypeOf(int64(0))) + _T_eface = jit.Type(reflect.TypeOf((*interface{})(nil)).Elem()) + _T_slice = jit.Type(reflect.TypeOf(([]interface{})(nil))) + _T_string = jit.Type(reflect.TypeOf("")) + _T_number = jit.Type(reflect.TypeOf(json.Number(""))) + _T_float64 = jit.Type(reflect.TypeOf(float64(0))) ) -var _R_tab = map[int]string { - '[': "_decode_V_ARRAY", - '{': "_decode_V_OBJECT", - ':': "_decode_V_KEY_SEP", - ',': "_decode_V_ELEM_SEP", - ']': "_decode_V_ARRAY_END", - '}': "_decode_V_OBJECT_END", +var _R_tab = map[int]string{ + '[': "_decode_V_ARRAY", + '{': "_decode_V_OBJECT", + ':': "_decode_V_KEY_SEP", + ',': "_decode_V_ELEM_SEP", + ']': "_decode_V_ARRAY_END", + '}': "_decode_V_OBJECT_END", } func (self *_ValueDecoder) compile() { - self.Emit("SUBQ", jit.Imm(_VD_size), _SP) // SUBQ $_VD_size, SP - self.Emit("MOVQ", _BP, jit.Ptr(_SP, _VD_offs)) // MOVQ BP, _VD_offs(SP) - self.Emit("LEAQ", jit.Ptr(_SP, _VD_offs), _BP) // LEAQ _VD_offs(SP), BP - - /* initialize the state machine */ - self.Emit("XORL", _CX, _CX) // XORL CX, CX - self.Emit("MOVQ", _DF, _VAR_df) // MOVQ DF, df - /* initialize digital buffer first */ - self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_ss_Dc) // MOVQ $_MaxDigitNums, ss.Dcap - self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX - self.Emit("MOVQ", _AX, _VAR_ss_Db) // MOVQ AX, ss.Dbuf - /* add ST offset */ - self.Emit("ADDQ", jit.Imm(_FsmOffset), _ST) // ADDQ _FsmOffset, _ST - self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp - self.WriteRecNotAX(0, _VP, jit.Ptr(_ST, _ST_Vp), false) // MOVQ VP, ST.Vp[0] - self.Emit("MOVQ", jit.Imm(_S_val), jit.Ptr(_ST, _ST_Vt)) // MOVQ _S_val, ST.Vt[0] - self.Sjmp("JMP" , "_next") // JMP _next - - /* set the value from previous round */ - self.Link("_set_value") // _set_value: - self.Emit("MOVL" , jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX - self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ" , jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX - self.Sjmp("JNC" , "_vtype_error") // JNC _vtype_error - self.Emit("XORL" , _SI, _SI) // XORL SI, SI - self.Emit("SUBQ" , jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp - self.Emit("XCHGQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // XCHGQ ST.Vp[CX], SI - self.Emit("MOVQ" , _R8, jit.Ptr(_SI, 0)) // MOVQ R8, (SI) - self.WriteRecNotAX(1, _R9, jit.Ptr(_SI, 8), false) // MOVQ R9, 8(SI) - - /* check for value stack */ - self.Link("_next") // _next: - self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _AX) // MOVQ ST.Sp, AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , "_return") // JS _return - - /* fast path: test up to 4 characters manually */ - self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL - self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF - self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX - self.Emit("MOVQ" , jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX - self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' - self.Sjmp("JA" , "_decode_fast") // JA _decode_fast - self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX - self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - - /* at least 1 to 3 spaces */ - for i := 0; i < 3; i++ { - self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL - self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF - self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX - self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' - self.Sjmp("JA" , "_decode_fast") // JA _decode_fast - self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX - self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - } - - /* at least 4 spaces */ - self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL - self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF - self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX - - /* fast path: use lookup table to select decoder */ - self.Link("_decode_fast") // _decode_fast: - self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI - self.Sref("_decode_tab", 4) // .... &_decode_tab - self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX - self.Emit("TESTQ" , _AX, _AX) // TESTQ AX, AX - self.Sjmp("JZ" , "_decode_native") // JZ _decode_native - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX - self.Rjmp("JMP" , _AX) // JMP AX - - /* decode with native decoder */ - self.Link("_decode_native") // _decode_native: - self.Emit("MOVQ", _IP, _DI) // MOVQ IP, DI - self.Emit("MOVQ", _IL, _SI) // MOVQ IL, SI - self.Emit("MOVQ", _IC, _DX) // MOVQ IC, DX - self.Emit("LEAQ", _VAR_ss, _CX) // LEAQ ss, CX - self.Emit("MOVQ", _VAR_df, _R8) // MOVQ $df, R8 - self.Emit("BTSQ", jit.Imm(_F_allow_control), _R8) // ANDQ $1<<_F_allow_control, R8 - self.call(_F_value) // CALL value - self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC - - /* check for errors */ - self.Emit("MOVQ" , _VAR_ss_Vt, _AX) // MOVQ ss.Vt, AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , "_parsing_error") - self.Sjmp("JZ" , "_invalid_vtype") // JZ _invalid_vtype - self.Emit("CMPQ" , _AX, _V_max) // CMPQ AX, _V_max - self.Sjmp("JA" , "_invalid_vtype") // JA _invalid_vtype - - /* jump table selector */ - self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI - self.Sref("_switch_table", 4) // .... &_switch_table - self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, -4), _AX) // MOVLQSX -4(DI)(AX*4), AX - self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX - self.Rjmp("JMP" , _AX) // JMP AX - - /** V_EOF **/ - self.Link("_decode_V_EOF") // _decode_V_EOF: - self.Emit("MOVL", _E_eof, _EP) // MOVL _E_eof, EP - self.Sjmp("JMP" , "_error") // JMP _error - - /** V_NULL **/ - self.Link("_decode_V_NULL") // _decode_V_NULL: - self.Emit("XORL", _R8, _R8) // XORL R8, R8 - self.Emit("XORL", _R9, _R9) // XORL R9, R9 - self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /** V_TRUE **/ - self.Link("_decode_V_TRUE") // _decode_V_TRUE: - self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 - // TODO: maybe modified by users? - self.Emit("MOVQ", _V_true, _R9) // MOVQ _V_true, R9 - self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /** V_FALSE **/ - self.Link("_decode_V_FALSE") // _decode_V_FALSE: - self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 - self.Emit("MOVQ", _V_false, _R9) // MOVQ _V_false, R9 - self.Emit("LEAQ", jit.Ptr(_IC, -5), _DI) // LEAQ -5(IC), DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /** V_ARRAY **/ - self.Link("_decode_V_ARRAY") // _decode_V_ARRAY - self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX - self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char - - /* create a new array */ - self.Emit("MOVQ", _T_eface, _AX) // MOVQ _T_eface, AX - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVQ", jit.Imm(_A_init_len), jit.Ptr(_SP, 8)) // MOVQ _A_init_len, 8(SP) - self.Emit("MOVQ", jit.Imm(_A_init_cap), jit.Ptr(_SP, 16)) // MOVQ _A_init_cap, 16(SP) - self.call_go(_F_makeslice) // CALL_GO runtime.makeslice - self.Emit("MOVQ", jit.Ptr(_SP, 24), _DX) // MOVQ 24(SP), DX - - /* pack into an interface */ - self.Emit("MOVQ", _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP) - self.Emit("MOVQ", jit.Imm(_A_init_len), jit.Ptr(_SP, 8)) // MOVQ _A_init_len, 8(SP) - self.Emit("MOVQ", jit.Imm(_A_init_cap), jit.Ptr(_SP, 16)) // MOVQ _A_init_cap, 16(SP) - self.call_go(_F_convTslice) // CALL_GO runtime.convTslice - self.Emit("MOVQ", jit.Ptr(_SP, 24), _R8) // MOVQ 24(SP), R8 - - /* replace current state with an array */ - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI - self.Emit("MOVQ", jit.Imm(_S_arr), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr, ST.Vt[CX] - self.Emit("MOVQ", _T_slice, _AX) // MOVQ _T_slice, AX - self.Emit("MOVQ", _AX, jit.Ptr(_SI, 0)) // MOVQ AX, (SI) - self.WriteRecNotAX(2, _R8, jit.Ptr(_SI, 8), false) // MOVQ R8, 8(SI) - - /* add a new slot for the first element */ - self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX - self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} - self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow - self.Emit("MOVQ", jit.Ptr(_R8, 0), _AX) // MOVQ (R8), AX - self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp - self.WritePtrAX(3, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] - self.Emit("MOVQ", jit.Imm(_S_arr_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr_0, ST.Vt[CX] - self.Sjmp("JMP" , "_next") // JMP _next - - /** V_OBJECT **/ - self.Link("_decode_V_OBJECT") // _decode_V_OBJECT: - self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX - self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char - self.call_go(_F_makemap_small) // CALL_GO runtime.makemap_small - self.Emit("MOVQ", jit.Ptr(_SP, 0), _AX) // MOVQ (SP), AX - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Imm(_S_obj_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj, ST.Vt[CX] - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI - self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX - self.Emit("MOVQ", _DX, jit.Ptr(_SI, 0)) // MOVQ DX, (SI) - self.WritePtrAX(4, jit.Ptr(_SI, 8), false) // MOVQ AX, 8(SI) - self.Sjmp("JMP" , "_next") // JMP _next - - /** V_STRING **/ - self.Link("_decode_V_STRING") // _decode_V_STRING: - self.Emit("MOVQ", _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX - self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX - self.Emit("SUBQ", _CX, _AX) // SUBQ CX, AX - - /* check for escapes */ - self.Emit("CMPQ", _VAR_ss_Ep, jit.Imm(-1)) // CMPQ ss.Ep, $-1 - self.Sjmp("JNE" , "_unquote") // JNE _unquote - self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX - self.Emit("LEAQ", jit.Sib(_IP, _CX, 1, 0), _R8) // LEAQ (IP)(CX), R8 - self.Byte(0x48, 0x8d, 0x3d) // LEAQ (PC), DI - self.Sref("_copy_string_end", 4) - self.Emit("BTQ", jit.Imm(_F_copy_string), _VAR_df) - self.Sjmp("JC", "copy_string") - self.Link("_copy_string_end") - self.Emit("XORL", _DX, _DX) // XORL DX, DX - /* strings with no escape sequences */ - self.Link("_noescape") // _noescape: - self.Emit("MOVL", jit.Imm(_S_omask_key), _DI) // MOVL _S_omask, DI - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _SI) // MOVQ ST.Vt[CX], SI - self.Emit("BTQ" , _SI, _DI) // BTQ SI, DI - self.Sjmp("JC" , "_object_key") // JC _object_key - - /* check for pre-packed strings, avoid 1 allocation */ - self.Emit("TESTQ", _DX, _DX) // TESTQ DX, DX - self.Sjmp("JNZ" , "_packed_str") // JNZ _packed_str - self.Emit("MOVQ" , _R8, jit.Ptr(_SP, 0)) // MOVQ R8, (SP) - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) - self.call_go(_F_convTstring) // CALL_GO runtime.convTstring - self.Emit("MOVQ" , jit.Ptr(_SP, 16), _R9) // MOVQ 16(SP), R9 - - /* packed string already in R9 */ - self.Link("_packed_str") // _packed_str: - self.Emit("MOVQ", _T_string, _R8) // MOVQ _T_string, R8 - self.Emit("MOVQ", _VAR_ss_Iv, _DI) // MOVQ ss.Iv, DI - self.Emit("SUBQ", jit.Imm(1), _DI) // SUBQ $1, DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /* the string is an object key, get the map */ - self.Link("_object_key") - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI - self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI - - /* add a new delimiter */ - self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX - self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} - self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow - self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp - self.Emit("MOVQ", jit.Imm(_S_obj_delim), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj_delim, ST.Vt[CX] - - /* add a new slot int the map */ - self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX - self.Emit("MOVQ", _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP) - self.Emit("MOVQ", _SI, jit.Ptr(_SP, 8)) // MOVQ SI, 8(SP) - self.Emit("MOVQ", _R8, jit.Ptr(_SP, 16)) // MOVQ R9, 16(SP) - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 24)) // MOVQ AX, 24(SP) - self.call_go(_F_mapassign_faststr) // CALL_GO runtime.mapassign_faststr - self.Emit("MOVQ", jit.Ptr(_SP, 32), _AX) // MOVQ 32(SP), AX - - /* add to the pointer stack */ - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.WritePtrAX(6, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] - self.Sjmp("JMP" , "_next") // JMP _next - - /* allocate memory to store the string header and unquoted result */ - self.Link("_unquote") // _unquote: - self.Emit("ADDQ", jit.Imm(15), _AX) // ADDQ $15, AX - self.Emit("MOVQ", _T_byte, _CX) // MOVQ _T_byte, CX - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) - self.Emit("MOVB", jit.Imm(0), jit.Ptr(_SP, 16)) // MOVB $0, 16(SP) - self.call_go(_F_mallocgc) // CALL_GO runtime.mallocgc - self.Emit("MOVQ", jit.Ptr(_SP, 24), _R9) // MOVQ 24(SP), R9 - - /* prepare the unquoting parameters */ - self.Emit("MOVQ" , _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX - self.Emit("LEAQ" , jit.Sib(_IP, _CX, 1, 0), _DI) // LEAQ (IP)(CX), DI - self.Emit("NEGQ" , _CX) // NEGQ CX - self.Emit("LEAQ" , jit.Sib(_IC, _CX, 1, -1), _SI) // LEAQ -1(IC)(CX), SI - self.Emit("LEAQ" , jit.Ptr(_R9, 16), _DX) // LEAQ 16(R8), DX - self.Emit("LEAQ" , _VAR_ss_Ep, _CX) // LEAQ ss.Ep, CX - self.Emit("XORL" , _R8, _R8) // XORL R8, R8 - self.Emit("BTQ" , jit.Imm(_F_disable_urc), _VAR_df) // BTQ ${_F_disable_urc}, fv - self.Emit("SETCC", _R8) // SETCC R8 - self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8 - - /* unquote the string, with R9 been preserved */ - self.save(_R9) // SAVE R9 - self.call(_F_unquote) // CALL unquote - self.load(_R9) // LOAD R9 - - /* check for errors */ - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , "_unquote_error") // JS _unquote_error - self.Emit("MOVL" , jit.Imm(1), _DX) // MOVL $1, DX - self.Emit("LEAQ" , jit.Ptr(_R9, 16), _R8) // ADDQ $16, R8 - self.Emit("MOVQ" , _R8, jit.Ptr(_R9, 0)) // MOVQ R8, (R9) - self.Emit("MOVQ" , _AX, jit.Ptr(_R9, 8)) // MOVQ AX, 8(R9) - self.Sjmp("JMP" , "_noescape") // JMP _noescape - - /** V_DOUBLE **/ - self.Link("_decode_V_DOUBLE") // _decode_V_DOUBLE: - self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df - self.Sjmp("JC" , "_use_number") // JC _use_number - self.Emit("MOVSD", _VAR_ss_Dv, _X0) // MOVSD ss.Dv, X0 - self.Sjmp("JMP" , "_use_float64") // JMP _use_float64 - - /** V_INTEGER **/ - self.Link("_decode_V_INTEGER") // _decode_V_INTEGER: - self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df - self.Sjmp("JC" , "_use_number") // JC _use_number - self.Emit("BTQ" , jit.Imm(_F_use_int64), _VAR_df) // BTQ _F_use_int64, df - self.Sjmp("JC" , "_use_int64") // JC _use_int64 - self.Emit("MOVQ" , _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX - self.Emit("CVTSQ2SD", _AX, _X0) // CVTSQ2SD AX, X0 - - /* represent numbers as `float64` */ - self.Link("_use_float64") // _use_float64: - self.Emit("MOVSD", _X0, jit.Ptr(_SP, 0)) // MOVSD X0, (SP) - self.call_go(_F_convT64) // CALL_GO runtime.convT64 - self.Emit("MOVQ" , _T_float64, _R8) // MOVQ _T_float64, R8 - self.Emit("MOVQ" , jit.Ptr(_SP, 8), _R9) // MOVQ 8(SP), R9 - self.Emit("MOVQ" , _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /* represent numbers as `json.Number` */ - self.Link("_use_number") // _use_number - self.Emit("MOVQ", _VAR_ss_Ep, _AX) // MOVQ ss.Ep, AX - self.Emit("LEAQ", jit.Sib(_IP, _AX, 1, 0), _SI) // LEAQ (IP)(AX), SI - self.Emit("MOVQ", _IC, _CX) // MOVQ IC, CX - self.Emit("SUBQ", _AX, _CX) // SUBQ AX, CX - self.Emit("MOVQ", _SI, jit.Ptr(_SP, 0)) // MOVQ SI, (SP) - self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) - self.call_go(_F_convTstring) // CALL_GO runtime.convTstring - self.Emit("MOVQ", _T_number, _R8) // MOVQ _T_number, R8 - self.Emit("MOVQ", jit.Ptr(_SP, 16), _R9) // MOVQ 16(SP), R9 - self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /* represent numbers as `int64` */ - self.Link("_use_int64") // _use_int64: - self.Emit("MOVQ", _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.call_go(_F_convT64) // CALL_GO runtime.convT64 - self.Emit("MOVQ", _T_int64, _R8) // MOVQ _T_int64, R8 - self.Emit("MOVQ", jit.Ptr(_SP, 8), _R9) // MOVQ 8(SP), R9 - self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /** V_KEY_SEP **/ - self.Link("_decode_V_KEY_SEP") // _decode_V_KEY_SEP: - // self.Byte(0xcc) - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("CMPQ", _AX, jit.Imm(_S_obj_delim)) // CMPQ AX, _S_obj_delim - self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char - self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX] - self.Emit("MOVQ", jit.Imm(_S_obj), jit.Sib(_ST, _CX, 8, _ST_Vt - 8)) // MOVQ _S_obj, ST.Vt[CX - 1] - self.Sjmp("JMP" , "_next") // JMP _next - - /** V_ELEM_SEP **/ - self.Link("_decode_V_ELEM_SEP") // _decode_V_ELEM_SEP: - self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ" , jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("CMPQ" , _AX, jit.Imm(_S_arr)) // CMPQ _AX, _S_arr - self.Sjmp("JE" , "_array_sep") // JZ _next - self.Emit("CMPQ" , _AX, jit.Imm(_S_obj)) // CMPQ _AX, _S_arr - self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char - self.Emit("MOVQ" , jit.Imm(_S_obj_sep), jit.Sib(_ST, _CX, 8, _ST_Vt)) - self.Sjmp("JMP" , "_next") // JMP _next - - /* arrays */ - self.Link("_array_sep") - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI - self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI - self.Emit("MOVQ", jit.Ptr(_SI, 8), _DX) // MOVQ 8(SI), DX - self.Emit("CMPQ", _DX, jit.Ptr(_SI, 16)) // CMPQ DX, 16(SI) - self.Sjmp("JAE" , "_array_more") // JAE _array_more - - /* add a slot for the new element */ - self.Link("_array_append") // _array_append: - self.Emit("ADDQ", jit.Imm(1), jit.Ptr(_SI, 8)) // ADDQ $1, 8(SI) - self.Emit("MOVQ", jit.Ptr(_SI, 0), _SI) // MOVQ (SI), SI - self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX - self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} - self.Sjmp("JAE" , "_stack_overflow") - self.Emit("SHLQ", jit.Imm(1), _DX) // SHLQ $1, DX - self.Emit("LEAQ", jit.Sib(_SI, _DX, 8, 0), _SI) // LEAQ (SI)(DX*8), SI - self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp - self.WriteRecNotAX(7 , _SI, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ SI, ST.Vp[CX] - self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX} - self.Sjmp("JMP" , "_next") // JMP _next - - /** V_ARRAY_END **/ - self.Link("_decode_V_ARRAY_END") // _decode_V_ARRAY_END: - self.Emit("XORL", _DX, _DX) // XORL DX, DX - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("CMPQ", _AX, jit.Imm(_S_arr_0)) // CMPQ AX, _S_arr_0 - self.Sjmp("JE" , "_first_item") // JE _first_item - self.Emit("CMPQ", _AX, jit.Imm(_S_arr)) // CMPQ AX, _S_arr - self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char - self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp - self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] - self.Sjmp("JMP" , "_next") // JMP _next - - /* first element of an array */ - self.Link("_first_item") // _first_item: - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("SUBQ", jit.Imm(2), jit.Ptr(_ST, _ST_Sp)) // SUBQ $2, ST.Sp - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp - 8), _SI) // MOVQ ST.Vp[CX - 1], SI - self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI - self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp - 8)) // MOVQ DX, ST.Vp[CX - 1] - self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] - self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) - self.Sjmp("JMP" , "_next") // JMP _next - - /** V_OBJECT_END **/ - self.Link("_decode_V_OBJECT_END") // _decode_V_OBJECT_END: - self.Emit("MOVL", jit.Imm(_S_omask_end), _DX) // MOVL _S_omask, DI - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("BTQ" , _AX, _DX) - self.Sjmp("JNC" , "_invalid_char") // JNE _invalid_char - self.Emit("XORL", _AX, _AX) // XORL AX, AX - self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp - self.Emit("MOVQ", _AX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ AX, ST.Vp[CX] - self.Sjmp("JMP" , "_next") // JMP _next - - /* return from decoder */ - self.Link("_return") // _return: - self.Emit("XORL", _EP, _EP) // XORL EP, EP - self.Emit("MOVQ", _EP, jit.Ptr(_ST, _ST_Vp)) // MOVQ EP, ST.Vp[0] - self.Link("_epilogue") // _epilogue: - self.Emit("SUBQ", jit.Imm(_FsmOffset), _ST) // SUBQ _FsmOffset, _ST - self.Emit("MOVQ", jit.Ptr(_SP, _VD_offs), _BP) // MOVQ _VD_offs(SP), BP - self.Emit("ADDQ", jit.Imm(_VD_size), _SP) // ADDQ $_VD_size, SP - self.Emit("RET") // RET - - /* array expand */ - self.Link("_array_more") // _array_more: - self.Emit("MOVQ" , _T_eface, _AX) // MOVQ _T_eface, AX - self.Emit("MOVOU", jit.Ptr(_SI, 0), _X0) // MOVOU (SI), X0 - self.Emit("MOVQ" , jit.Ptr(_SI, 16), _DX) // MOVQ 16(SI), DX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) - self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 24)) // MOVQ DX, 24(SP) - self.Emit("SHLQ" , jit.Imm(1), _DX) // SHLQ $1, DX - self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 32)) // MOVQ DX, 32(SP) - self.call_go(_F_growslice) // CALL_GO runtime.growslice - self.Emit("MOVQ" , jit.Ptr(_SP, 40), _DI) // MOVOU 40(SP), DI - self.Emit("MOVQ" , jit.Ptr(_SP, 48), _DX) // MOVOU 48(SP), DX - self.Emit("MOVQ" , jit.Ptr(_SP, 56), _AX) // MOVQ 56(SP), AX - - /* update the slice */ - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI - self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI - self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) - self.Emit("MOVQ", _AX, jit.Ptr(_SI, 16)) // MOVQ AX, 16(AX) - self.WriteRecNotAX(8 , _DI, jit.Ptr(_SI, 0), false) // MOVQ R10, (SI) - self.Sjmp("JMP" , "_array_append") // JMP _array_append - - /* copy string */ - self.Link("copy_string") // pointer: R8, length: AX, return addr: DI - // self.Byte(0xcc) - self.Emit("MOVQ", _R8, _VAR_cs_p) - self.Emit("MOVQ", _AX, _VAR_cs_n) - self.Emit("MOVQ", _DI, _VAR_cs_LR) - self.Emit("MOVQ", _T_byte, jit.Ptr(_SP, 0)) - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) - self.call_go(_F_makeslice) - self.Emit("MOVQ", jit.Ptr(_SP, 24), _R8) - self.Emit("MOVQ", _R8, _VAR_cs_d) - self.Emit("MOVQ", _R8, jit.Ptr(_SP, 0)) - self.Emit("MOVQ", _VAR_cs_p, _R8) - self.Emit("MOVQ", _R8, jit.Ptr(_SP, 8)) - self.Emit("MOVQ", _VAR_cs_n, _AX) - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) - self.call_go(_F_memmove) - self.Emit("MOVQ", _VAR_cs_d, _R8) - self.Emit("MOVQ", _VAR_cs_n, _AX) - self.Emit("MOVQ", _VAR_cs_LR, _DI) - // self.Byte(0xcc) - self.Rjmp("JMP", _DI) - - /* error handlers */ - self.Link("_stack_overflow") - self.Emit("MOVL" , _E_recurse, _EP) // MOVQ _E_recurse, EP - self.Sjmp("JMP" , "_error") // JMP _error - self.Link("_vtype_error") // _vtype_error: - self.Emit("MOVQ" , _DI, _IC) // MOVQ DI, IC - self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP - self.Sjmp("JMP" , "_error") // JMP _error - self.Link("_invalid_char") // _invalid_char: - self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC - self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP - self.Sjmp("JMP" , "_error") // JMP _error - self.Link("_unquote_error") // _unquote_error: - self.Emit("MOVQ" , _VAR_ss_Iv, _IC) // MOVQ ss.Iv, IC - self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC - self.Link("_parsing_error") // _parsing_error: - self.Emit("NEGQ" , _AX) // NEGQ AX - self.Emit("MOVQ" , _AX, _EP) // MOVQ AX, EP - self.Link("_error") // _error: - self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 - self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) - self.Sjmp("JMP" , "_epilogue") // JMP _epilogue - - /* invalid value type, never returns */ - self.Link("_invalid_vtype") - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.call(_F_invalid_vtype) // CALL invalid_type - self.Emit("UD2") // UD2 - - /* switch jump table */ - self.Link("_switch_table") // _switch_table: - self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 - self.Sref("_decode_V_NULL", -4) // SREF &_decode_V_NULL, $-4 - self.Sref("_decode_V_TRUE", -8) // SREF &_decode_V_TRUE, $-8 - self.Sref("_decode_V_FALSE", -12) // SREF &_decode_V_FALSE, $-12 - self.Sref("_decode_V_ARRAY", -16) // SREF &_decode_V_ARRAY, $-16 - self.Sref("_decode_V_OBJECT", -20) // SREF &_decode_V_OBJECT, $-20 - self.Sref("_decode_V_STRING", -24) // SREF &_decode_V_STRING, $-24 - self.Sref("_decode_V_DOUBLE", -28) // SREF &_decode_V_DOUBLE, $-28 - self.Sref("_decode_V_INTEGER", -32) // SREF &_decode_V_INTEGER, $-32 - self.Sref("_decode_V_KEY_SEP", -36) // SREF &_decode_V_KEY_SEP, $-36 - self.Sref("_decode_V_ELEM_SEP", -40) // SREF &_decode_V_ELEM_SEP, $-40 - self.Sref("_decode_V_ARRAY_END", -44) // SREF &_decode_V_ARRAY_END, $-44 - self.Sref("_decode_V_OBJECT_END", -48) // SREF &_decode_V_OBJECT_END, $-48 - - /* fast character lookup table */ - self.Link("_decode_tab") // _decode_tab: - self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 - - /* generate rest of the tabs */ - for i := 1; i < 256; i++ { - if to, ok := _R_tab[i]; ok { - self.Sref(to, -int64(i) * 4) - } else { - self.Byte(0x00, 0x00, 0x00, 0x00) - } - } + self.Emit("SUBQ", jit.Imm(_VD_size), _SP) // SUBQ $_VD_size, SP + self.Emit("MOVQ", _BP, jit.Ptr(_SP, _VD_offs)) // MOVQ BP, _VD_offs(SP) + self.Emit("LEAQ", jit.Ptr(_SP, _VD_offs), _BP) // LEAQ _VD_offs(SP), BP + + /* initialize the state machine */ + self.Emit("XORL", _CX, _CX) // XORL CX, CX + self.Emit("MOVQ", _DF, _VAR_df) // MOVQ DF, df + /* initialize digital buffer first */ + self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_ss_Dc) // MOVQ $_MaxDigitNums, ss.Dcap + self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX + self.Emit("MOVQ", _AX, _VAR_ss_Db) // MOVQ AX, ss.Dbuf + /* add ST offset */ + self.Emit("ADDQ", jit.Imm(_FsmOffset), _ST) // ADDQ _FsmOffset, _ST + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.WriteRecNotAX(0, _VP, jit.Ptr(_ST, _ST_Vp), false) // MOVQ VP, ST.Vp[0] + self.Emit("MOVQ", jit.Imm(_S_val), jit.Ptr(_ST, _ST_Vt)) // MOVQ _S_val, ST.Vt[0] + self.Sjmp("JMP", "_next") // JMP _next + + /* set the value from previous round */ + self.Link("_set_value") // _set_value: + self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ", _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC", "_vtype_error") // JNC _vtype_error + self.Emit("XORL", _SI, _SI) // XORL SI, SI + self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp + self.Emit("XCHGQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // XCHGQ ST.Vp[CX], SI + self.Emit("MOVQ", _R8, jit.Ptr(_SI, 0)) // MOVQ R8, (SI) + self.WriteRecNotAX(1, _R9, jit.Ptr(_SI, 8), false) // MOVQ R9, 8(SI) + + /* check for value stack */ + self.Link("_next") // _next: + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _AX) // MOVQ ST.Sp, AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", "_return") // JS _return + + /* fast path: test up to 4 characters manually */ + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE", "_decode_V_EOF") // JAE _decode_V_EOF + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("MOVQ", jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX + self.Emit("CMPQ", _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA", "_decode_fast") // JA _decode_fast + self.Emit("BTQ", _AX, _DX) // BTQ _AX, _DX + self.Sjmp("JNC", "_decode_fast") // JNC _decode_fast + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + + /* at least 1 to 3 spaces */ + for i := 0; i < 3; i++ { + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE", "_decode_V_EOF") // JAE _decode_V_EOF + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("CMPQ", _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA", "_decode_fast") // JA _decode_fast + self.Emit("BTQ", _AX, _DX) // BTQ _AX, _DX + self.Sjmp("JNC", "_decode_fast") // JNC _decode_fast + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + } + + /* at least 4 spaces */ + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE", "_decode_V_EOF") // JAE _decode_V_EOF + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + + /* fast path: use lookup table to select decoder */ + self.Link("_decode_fast") // _decode_fast: + self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI + self.Sref("_decode_tab", 4) // .... &_decode_tab + self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ", "_decode_native") // JZ _decode_native + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Emit("ADDQ", _DI, _AX) // ADDQ DI, AX + self.Rjmp("JMP", _AX) // JMP AX + + /* decode with native decoder */ + self.Link("_decode_native") // _decode_native: + self.Emit("MOVQ", _IP, _DI) // MOVQ IP, DI + self.Emit("MOVQ", _IL, _SI) // MOVQ IL, SI + self.Emit("MOVQ", _IC, _DX) // MOVQ IC, DX + self.Emit("LEAQ", _VAR_ss, _CX) // LEAQ ss, CX + self.Emit("MOVQ", _VAR_df, _R8) // MOVQ $df, R8 + self.Emit("BTSQ", jit.Imm(_F_allow_control), _R8) // ANDQ $1<<_F_allow_control, R8 + self.call(_F_value) // CALL value + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + + /* check for errors */ + self.Emit("MOVQ", _VAR_ss_Vt, _AX) // MOVQ ss.Vt, AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", "_parsing_error") + self.Sjmp("JZ", "_invalid_vtype") // JZ _invalid_vtype + self.Emit("CMPQ", _AX, _V_max) // CMPQ AX, _V_max + self.Sjmp("JA", "_invalid_vtype") // JA _invalid_vtype + + /* jump table selector */ + self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI + self.Sref("_switch_table", 4) // .... &_switch_table + self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, -4), _AX) // MOVLQSX -4(DI)(AX*4), AX + self.Emit("ADDQ", _DI, _AX) // ADDQ DI, AX + self.Rjmp("JMP", _AX) // JMP AX + + /** V_EOF **/ + self.Link("_decode_V_EOF") // _decode_V_EOF: + self.Emit("MOVL", _E_eof, _EP) // MOVL _E_eof, EP + self.Sjmp("JMP", "_error") // JMP _error + + /** V_NULL **/ + self.Link("_decode_V_NULL") // _decode_V_NULL: + self.Emit("XORL", _R8, _R8) // XORL R8, R8 + self.Emit("XORL", _R9, _R9) // XORL R9, R9 + self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /** V_TRUE **/ + self.Link("_decode_V_TRUE") // _decode_V_TRUE: + self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 + // TODO: maybe modified by users? + self.Emit("MOVQ", _V_true, _R9) // MOVQ _V_true, R9 + self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /** V_FALSE **/ + self.Link("_decode_V_FALSE") // _decode_V_FALSE: + self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 + self.Emit("MOVQ", _V_false, _R9) // MOVQ _V_false, R9 + self.Emit("LEAQ", jit.Ptr(_IC, -5), _DI) // LEAQ -5(IC), DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /** V_ARRAY **/ + self.Link("_decode_V_ARRAY") // _decode_V_ARRAY + self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ", _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC", "_invalid_char") // JNC _invalid_char + + /* create a new array */ + self.Emit("MOVQ", _T_eface, _AX) // MOVQ _T_eface, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", jit.Imm(_A_init_len), jit.Ptr(_SP, 8)) // MOVQ _A_init_len, 8(SP) + self.Emit("MOVQ", jit.Imm(_A_init_cap), jit.Ptr(_SP, 16)) // MOVQ _A_init_cap, 16(SP) + self.call_go(_F_makeslice) // CALL_GO runtime.makeslice + self.Emit("MOVQ", jit.Ptr(_SP, 24), _DX) // MOVQ 24(SP), DX + + /* pack into an interface */ + self.Emit("MOVQ", _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP) + self.Emit("MOVQ", jit.Imm(_A_init_len), jit.Ptr(_SP, 8)) // MOVQ _A_init_len, 8(SP) + self.Emit("MOVQ", jit.Imm(_A_init_cap), jit.Ptr(_SP, 16)) // MOVQ _A_init_cap, 16(SP) + self.call_go(_F_convTslice) // CALL_GO runtime.convTslice + self.Emit("MOVQ", jit.Ptr(_SP, 24), _R8) // MOVQ 24(SP), R8 + + /* replace current state with an array */ + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Imm(_S_arr), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr, ST.Vt[CX] + self.Emit("MOVQ", _T_slice, _AX) // MOVQ _T_slice, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SI, 0)) // MOVQ AX, (SI) + self.WriteRecNotAX(2, _R8, jit.Ptr(_SI, 8), false) // MOVQ R8, 8(SI) + + /* add a new slot for the first element */ + self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX + self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} + self.Sjmp("JAE", "_stack_overflow") // JA _stack_overflow + self.Emit("MOVQ", jit.Ptr(_R8, 0), _AX) // MOVQ (R8), AX + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.WritePtrAX(3, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] + self.Emit("MOVQ", jit.Imm(_S_arr_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr_0, ST.Vt[CX] + self.Sjmp("JMP", "_next") // JMP _next + + /** V_OBJECT **/ + self.Link("_decode_V_OBJECT") // _decode_V_OBJECT: + self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ", _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC", "_invalid_char") // JNC _invalid_char + self.call_go(_F_makemap_small) // CALL_GO runtime.makemap_small + self.Emit("MOVQ", jit.Ptr(_SP, 0), _AX) // MOVQ (SP), AX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Imm(_S_obj_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj, ST.Vt[CX] + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX + self.Emit("MOVQ", _DX, jit.Ptr(_SI, 0)) // MOVQ DX, (SI) + self.WritePtrAX(4, jit.Ptr(_SI, 8), false) // MOVQ AX, 8(SI) + self.Sjmp("JMP", "_next") // JMP _next + + /** V_STRING **/ + self.Link("_decode_V_STRING") // _decode_V_STRING: + self.Emit("MOVQ", _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX + self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX + self.Emit("SUBQ", _CX, _AX) // SUBQ CX, AX + + /* check for escapes */ + self.Emit("CMPQ", _VAR_ss_Ep, jit.Imm(-1)) // CMPQ ss.Ep, $-1 + self.Sjmp("JNE", "_unquote") // JNE _unquote + self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX + self.Emit("LEAQ", jit.Sib(_IP, _CX, 1, 0), _R8) // LEAQ (IP)(CX), R8 + self.Byte(0x48, 0x8d, 0x3d) // LEAQ (PC), DI + self.Sref("_copy_string_end", 4) + self.Emit("BTQ", jit.Imm(_F_copy_string), _VAR_df) + self.Sjmp("JC", "copy_string") + self.Link("_copy_string_end") + self.Emit("XORL", _DX, _DX) // XORL DX, DX + /* strings with no escape sequences */ + self.Link("_noescape") // _noescape: + self.Emit("MOVL", jit.Imm(_S_omask_key), _DI) // MOVL _S_omask, DI + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _SI) // MOVQ ST.Vt[CX], SI + self.Emit("BTQ", _SI, _DI) // BTQ SI, DI + self.Sjmp("JC", "_object_key") // JC _object_key + + /* check for pre-packed strings, avoid 1 allocation */ + self.Emit("TESTQ", _DX, _DX) // TESTQ DX, DX + self.Sjmp("JNZ", "_packed_str") // JNZ _packed_str + self.Emit("MOVQ", _R8, jit.Ptr(_SP, 0)) // MOVQ R8, (SP) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + self.call_go(_F_convTstring) // CALL_GO runtime.convTstring + self.Emit("MOVQ", jit.Ptr(_SP, 16), _R9) // MOVQ 16(SP), R9 + + /* packed string already in R9 */ + self.Link("_packed_str") // _packed_str: + self.Emit("MOVQ", _T_string, _R8) // MOVQ _T_string, R8 + self.Emit("MOVQ", _VAR_ss_Iv, _DI) // MOVQ ss.Iv, DI + self.Emit("SUBQ", jit.Imm(1), _DI) // SUBQ $1, DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /* the string is an object key, get the map */ + self.Link("_object_key") + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + + /* add a new delimiter */ + self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX + self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} + self.Sjmp("JAE", "_stack_overflow") // JA _stack_overflow + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.Emit("MOVQ", jit.Imm(_S_obj_delim), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj_delim, ST.Vt[CX] + + /* add a new slot int the map */ + self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX + self.Emit("MOVQ", _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP) + self.Emit("MOVQ", _SI, jit.Ptr(_SP, 8)) // MOVQ SI, 8(SP) + self.Emit("MOVQ", _R8, jit.Ptr(_SP, 16)) // MOVQ R9, 16(SP) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 24)) // MOVQ AX, 24(SP) + self.call_go(_F_mapassign_faststr) // CALL_GO runtime.mapassign_faststr + self.Emit("MOVQ", jit.Ptr(_SP, 32), _AX) // MOVQ 32(SP), AX + + /* add to the pointer stack */ + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.WritePtrAX(6, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] + self.Sjmp("JMP", "_next") // JMP _next + + /* allocate memory to store the string header and unquoted result */ + self.Link("_unquote") // _unquote: + self.Emit("ADDQ", jit.Imm(15), _AX) // ADDQ $15, AX + self.Emit("MOVQ", _T_byte, _CX) // MOVQ _T_byte, CX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.Emit("MOVB", jit.Imm(0), jit.Ptr(_SP, 16)) // MOVB $0, 16(SP) + self.call_go(_F_mallocgc) // CALL_GO runtime.mallocgc + self.Emit("MOVQ", jit.Ptr(_SP, 24), _R9) // MOVQ 24(SP), R9 + + /* prepare the unquoting parameters */ + self.Emit("MOVQ", _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX + self.Emit("LEAQ", jit.Sib(_IP, _CX, 1, 0), _DI) // LEAQ (IP)(CX), DI + self.Emit("NEGQ", _CX) // NEGQ CX + self.Emit("LEAQ", jit.Sib(_IC, _CX, 1, -1), _SI) // LEAQ -1(IC)(CX), SI + self.Emit("LEAQ", jit.Ptr(_R9, 16), _DX) // LEAQ 16(R8), DX + self.Emit("LEAQ", _VAR_ss_Ep, _CX) // LEAQ ss.Ep, CX + self.Emit("XORL", _R8, _R8) // XORL R8, R8 + self.Emit("BTQ", jit.Imm(_F_disable_urc), _VAR_df) // BTQ ${_F_disable_urc}, fv + self.Emit("SETCC", _R8) // SETCC R8 + self.Emit("SHLQ", jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8 + + /* unquote the string, with R9 been preserved */ + self.save(_R9) // SAVE R9 + self.call(_F_unquote) // CALL unquote + self.load(_R9) // LOAD R9 + + /* check for errors */ + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", "_unquote_error") // JS _unquote_error + self.Emit("MOVL", jit.Imm(1), _DX) // MOVL $1, DX + self.Emit("LEAQ", jit.Ptr(_R9, 16), _R8) // ADDQ $16, R8 + self.Emit("MOVQ", _R8, jit.Ptr(_R9, 0)) // MOVQ R8, (R9) + self.Emit("MOVQ", _AX, jit.Ptr(_R9, 8)) // MOVQ AX, 8(R9) + self.Sjmp("JMP", "_noescape") // JMP _noescape + + /** V_DOUBLE **/ + self.Link("_decode_V_DOUBLE") // _decode_V_DOUBLE: + self.Emit("BTQ", jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df + self.Sjmp("JC", "_use_number") // JC _use_number + self.Emit("MOVSD", _VAR_ss_Dv, _X0) // MOVSD ss.Dv, X0 + self.Sjmp("JMP", "_use_float64") // JMP _use_float64 + + /** V_INTEGER **/ + self.Link("_decode_V_INTEGER") // _decode_V_INTEGER: + self.Emit("BTQ", jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df + self.Sjmp("JC", "_use_number") // JC _use_number + self.Emit("BTQ", jit.Imm(_F_use_int64), _VAR_df) // BTQ _F_use_int64, df + self.Sjmp("JC", "_use_int64") // JC _use_int64 + self.Emit("MOVQ", _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX + self.Emit("CVTSQ2SD", _AX, _X0) // CVTSQ2SD AX, X0 + + /* represent numbers as `float64` */ + self.Link("_use_float64") // _use_float64: + self.Emit("MOVSD", _X0, jit.Ptr(_SP, 0)) // MOVSD X0, (SP) + self.call_go(_F_convT64) // CALL_GO runtime.convT64 + self.Emit("MOVQ", _T_float64, _R8) // MOVQ _T_float64, R8 + self.Emit("MOVQ", jit.Ptr(_SP, 8), _R9) // MOVQ 8(SP), R9 + self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /* represent numbers as `json.Number` */ + self.Link("_use_number") // _use_number + self.Emit("MOVQ", _VAR_ss_Ep, _AX) // MOVQ ss.Ep, AX + self.Emit("LEAQ", jit.Sib(_IP, _AX, 1, 0), _SI) // LEAQ (IP)(AX), SI + self.Emit("MOVQ", _IC, _CX) // MOVQ IC, CX + self.Emit("SUBQ", _AX, _CX) // SUBQ AX, CX + self.Emit("MOVQ", _SI, jit.Ptr(_SP, 0)) // MOVQ SI, (SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.call_go(_F_convTstring) // CALL_GO runtime.convTstring + self.Emit("MOVQ", _T_number, _R8) // MOVQ _T_number, R8 + self.Emit("MOVQ", jit.Ptr(_SP, 16), _R9) // MOVQ 16(SP), R9 + self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /* represent numbers as `int64` */ + self.Link("_use_int64") // _use_int64: + self.Emit("MOVQ", _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.call_go(_F_convT64) // CALL_GO runtime.convT64 + self.Emit("MOVQ", _T_int64, _R8) // MOVQ _T_int64, R8 + self.Emit("MOVQ", jit.Ptr(_SP, 8), _R9) // MOVQ 8(SP), R9 + self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /** V_KEY_SEP **/ + self.Link("_decode_V_KEY_SEP") // _decode_V_KEY_SEP: + // self.Byte(0xcc) + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("CMPQ", _AX, jit.Imm(_S_obj_delim)) // CMPQ AX, _S_obj_delim + self.Sjmp("JNE", "_invalid_char") // JNE _invalid_char + self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX] + self.Emit("MOVQ", jit.Imm(_S_obj), jit.Sib(_ST, _CX, 8, _ST_Vt-8)) // MOVQ _S_obj, ST.Vt[CX - 1] + self.Sjmp("JMP", "_next") // JMP _next + + /** V_ELEM_SEP **/ + self.Link("_decode_V_ELEM_SEP") // _decode_V_ELEM_SEP: + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("CMPQ", _AX, jit.Imm(_S_arr)) // CMPQ _AX, _S_arr + self.Sjmp("JE", "_array_sep") // JZ _next + self.Emit("CMPQ", _AX, jit.Imm(_S_obj)) // CMPQ _AX, _S_arr + self.Sjmp("JNE", "_invalid_char") // JNE _invalid_char + self.Emit("MOVQ", jit.Imm(_S_obj_sep), jit.Sib(_ST, _CX, 8, _ST_Vt)) + self.Sjmp("JMP", "_next") // JMP _next + + /* arrays */ + self.Link("_array_sep") + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _DX) // MOVQ 8(SI), DX + self.Emit("CMPQ", _DX, jit.Ptr(_SI, 16)) // CMPQ DX, 16(SI) + self.Sjmp("JAE", "_array_more") // JAE _array_more + + /* add a slot for the new element */ + self.Link("_array_append") // _array_append: + self.Emit("ADDQ", jit.Imm(1), jit.Ptr(_SI, 8)) // ADDQ $1, 8(SI) + self.Emit("MOVQ", jit.Ptr(_SI, 0), _SI) // MOVQ (SI), SI + self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX + self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} + self.Sjmp("JAE", "_stack_overflow") + self.Emit("SHLQ", jit.Imm(1), _DX) // SHLQ $1, DX + self.Emit("LEAQ", jit.Sib(_SI, _DX, 8, 0), _SI) // LEAQ (SI)(DX*8), SI + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.WriteRecNotAX(7, _SI, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ SI, ST.Vp[CX] + self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX} + self.Sjmp("JMP", "_next") // JMP _next + + /** V_ARRAY_END **/ + self.Link("_decode_V_ARRAY_END") // _decode_V_ARRAY_END: + self.Emit("XORL", _DX, _DX) // XORL DX, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("CMPQ", _AX, jit.Imm(_S_arr_0)) // CMPQ AX, _S_arr_0 + self.Sjmp("JE", "_first_item") // JE _first_item + self.Emit("CMPQ", _AX, jit.Imm(_S_arr)) // CMPQ AX, _S_arr + self.Sjmp("JNE", "_invalid_char") // JNE _invalid_char + self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp + self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] + self.Sjmp("JMP", "_next") // JMP _next + + /* first element of an array */ + self.Link("_first_item") // _first_item: + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("SUBQ", jit.Imm(2), jit.Ptr(_ST, _ST_Sp)) // SUBQ $2, ST.Sp + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp-8), _SI) // MOVQ ST.Vp[CX - 1], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp-8)) // MOVQ DX, ST.Vp[CX - 1] + self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] + self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) + self.Sjmp("JMP", "_next") // JMP _next + + /** V_OBJECT_END **/ + self.Link("_decode_V_OBJECT_END") // _decode_V_OBJECT_END: + self.Emit("MOVL", jit.Imm(_S_omask_end), _DX) // MOVL _S_omask, DI + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ", _AX, _DX) + self.Sjmp("JNC", "_invalid_char") // JNE _invalid_char + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp + self.Emit("MOVQ", _AX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ AX, ST.Vp[CX] + self.Sjmp("JMP", "_next") // JMP _next + + /* return from decoder */ + self.Link("_return") // _return: + self.Emit("XORL", _EP, _EP) // XORL EP, EP + self.Emit("MOVQ", _EP, jit.Ptr(_ST, _ST_Vp)) // MOVQ EP, ST.Vp[0] + self.Link("_epilogue") // _epilogue: + self.Emit("SUBQ", jit.Imm(_FsmOffset), _ST) // SUBQ _FsmOffset, _ST + self.Emit("MOVQ", jit.Ptr(_SP, _VD_offs), _BP) // MOVQ _VD_offs(SP), BP + self.Emit("ADDQ", jit.Imm(_VD_size), _SP) // ADDQ $_VD_size, SP + self.Emit("RET") // RET + + /* array expand */ + self.Link("_array_more") // _array_more: + self.Emit("MOVQ", _T_eface, _AX) // MOVQ _T_eface, AX + self.Emit("MOVOU", jit.Ptr(_SI, 0), _X0) // MOVOU (SI), X0 + self.Emit("MOVQ", jit.Ptr(_SI, 16), _DX) // MOVQ 16(SI), DX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) + self.Emit("MOVQ", _DX, jit.Ptr(_SP, 24)) // MOVQ DX, 24(SP) + self.Emit("SHLQ", jit.Imm(1), _DX) // SHLQ $1, DX + self.Emit("MOVQ", _DX, jit.Ptr(_SP, 32)) // MOVQ DX, 32(SP) + self.call_go(_F_growslice) // CALL_GO runtime.growslice + self.Emit("MOVQ", jit.Ptr(_SP, 40), _DI) // MOVOU 40(SP), DI + self.Emit("MOVQ", jit.Ptr(_SP, 48), _DX) // MOVOU 48(SP), DX + self.Emit("MOVQ", jit.Ptr(_SP, 56), _AX) // MOVQ 56(SP), AX + + /* update the slice */ + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) + self.Emit("MOVQ", _AX, jit.Ptr(_SI, 16)) // MOVQ AX, 16(AX) + self.WriteRecNotAX(8, _DI, jit.Ptr(_SI, 0), false) // MOVQ R10, (SI) + self.Sjmp("JMP", "_array_append") // JMP _array_append + + /* copy string */ + self.Link("copy_string") // pointer: R8, length: AX, return addr: DI + // self.Byte(0xcc) + self.Emit("MOVQ", _R8, _VAR_cs_p) + self.Emit("MOVQ", _AX, _VAR_cs_n) + self.Emit("MOVQ", _DI, _VAR_cs_LR) + self.Emit("MOVQ", _T_byte, jit.Ptr(_SP, 0)) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) + self.call_go(_F_makeslice) + self.Emit("MOVQ", jit.Ptr(_SP, 24), _R8) + self.Emit("MOVQ", _R8, _VAR_cs_d) + self.Emit("MOVQ", _R8, jit.Ptr(_SP, 0)) + self.Emit("MOVQ", _VAR_cs_p, _R8) + self.Emit("MOVQ", _R8, jit.Ptr(_SP, 8)) + self.Emit("MOVQ", _VAR_cs_n, _AX) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) + self.call_go(_F_memmove) + self.Emit("MOVQ", _VAR_cs_d, _R8) + self.Emit("MOVQ", _VAR_cs_n, _AX) + self.Emit("MOVQ", _VAR_cs_LR, _DI) + // self.Byte(0xcc) + self.Rjmp("JMP", _DI) + + /* error handlers */ + self.Link("_stack_overflow") + self.Emit("MOVL", _E_recurse, _EP) // MOVQ _E_recurse, EP + self.Sjmp("JMP", "_error") // JMP _error + self.Link("_vtype_error") // _vtype_error: + self.Emit("MOVQ", _DI, _IC) // MOVQ DI, IC + self.Emit("MOVL", _E_invalid, _EP) // MOVL _E_invalid, EP + self.Sjmp("JMP", "_error") // JMP _error + self.Link("_invalid_char") // _invalid_char: + self.Emit("SUBQ", jit.Imm(1), _IC) // SUBQ $1, IC + self.Emit("MOVL", _E_invalid, _EP) // MOVL _E_invalid, EP + self.Sjmp("JMP", "_error") // JMP _error + self.Link("_unquote_error") // _unquote_error: + self.Emit("MOVQ", _VAR_ss_Iv, _IC) // MOVQ ss.Iv, IC + self.Emit("SUBQ", jit.Imm(1), _IC) // SUBQ $1, IC + self.Link("_parsing_error") // _parsing_error: + self.Emit("NEGQ", _AX) // NEGQ AX + self.Emit("MOVQ", _AX, _EP) // MOVQ AX, EP + self.Link("_error") // _error: + self.Emit("PXOR", _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) + self.Sjmp("JMP", "_epilogue") // JMP _epilogue + + /* invalid value type, never returns */ + self.Link("_invalid_vtype") + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.call(_F_invalid_vtype) // CALL invalid_type + self.Emit("UD2") // UD2 + + /* switch jump table */ + self.Link("_switch_table") // _switch_table: + self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 + self.Sref("_decode_V_NULL", -4) // SREF &_decode_V_NULL, $-4 + self.Sref("_decode_V_TRUE", -8) // SREF &_decode_V_TRUE, $-8 + self.Sref("_decode_V_FALSE", -12) // SREF &_decode_V_FALSE, $-12 + self.Sref("_decode_V_ARRAY", -16) // SREF &_decode_V_ARRAY, $-16 + self.Sref("_decode_V_OBJECT", -20) // SREF &_decode_V_OBJECT, $-20 + self.Sref("_decode_V_STRING", -24) // SREF &_decode_V_STRING, $-24 + self.Sref("_decode_V_DOUBLE", -28) // SREF &_decode_V_DOUBLE, $-28 + self.Sref("_decode_V_INTEGER", -32) // SREF &_decode_V_INTEGER, $-32 + self.Sref("_decode_V_KEY_SEP", -36) // SREF &_decode_V_KEY_SEP, $-36 + self.Sref("_decode_V_ELEM_SEP", -40) // SREF &_decode_V_ELEM_SEP, $-40 + self.Sref("_decode_V_ARRAY_END", -44) // SREF &_decode_V_ARRAY_END, $-44 + self.Sref("_decode_V_OBJECT_END", -48) // SREF &_decode_V_OBJECT_END, $-48 + + /* fast character lookup table */ + self.Link("_decode_tab") // _decode_tab: + self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 + + /* generate rest of the tabs */ + for i := 1; i < 256; i++ { + if to, ok := _R_tab[i]; ok { + self.Sref(to, -int64(i)*4) + } else { + self.Byte(0x00, 0x00, 0x00, 0x00) + } + } } func (self *_ValueDecoder) WritePtrAX(i int, rec obj.Addr, saveDI bool) { - self.Emit("MOVQ", _V_writeBarrier, _R10) - self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) - self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - if saveDI { - self.save(_DI) - } - self.Emit("LEAQ", rec, _DI) - self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX - self.Rjmp("CALL", _R10) - if saveDI { - self.load(_DI) - } - self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Emit("MOVQ", _AX, rec) - self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", _V_writeBarrier, _R10) + self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier"+strconv.Itoa(i)+"_{n}") + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX + self.Rjmp("CALL", _R10) + if saveDI { + self.load(_DI) + } + self.Sjmp("JMP", "_end_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", _AX, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") } func (self *_ValueDecoder) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool) { - if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { - panic("rec contains AX!") - } - self.Emit("MOVQ", _V_writeBarrier, _R10) - self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) - self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Emit("MOVQ", ptr, _AX) - if saveDI { - self.save(_DI) - } - self.Emit("LEAQ", rec, _DI) - self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX - self.Rjmp("CALL", _R10) - if saveDI { - self.load(_DI) - } - self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Emit("MOVQ", ptr, rec) - self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { + panic("rec contains AX!") + } + self.Emit("MOVQ", _V_writeBarrier, _R10) + self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.Emit("MOVQ", ptr, _AX) + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX + self.Rjmp("CALL", _R10) + if saveDI { + self.load(_DI) + } + self.Sjmp("JMP", "_end_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") } /** Generic Decoder **/ var ( - _subr_decode_value = new(_ValueDecoder).build() + _subr_decode_value = new(_ValueDecoder).build() ) //go:nosplit func invalid_vtype(vt types.ValueType) { - throw(fmt.Sprintf("invalid value type: %d", vt)) + throw(fmt.Sprintf("invalid value type: %d", vt)) } diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/generic_amd64_go117.go b/vendor/github.com/bytedance/sonic/internal/decoder/generic_amd64_go117.go index df1cd9f5b..788aa584f 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/generic_amd64_go117.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/generic_amd64_go117.go @@ -20,16 +20,16 @@ package decoder import ( - `encoding/json` - `fmt` - `reflect` - `strconv` - - `github.com/bytedance/sonic/internal/jit` - `github.com/bytedance/sonic/internal/native` - `github.com/bytedance/sonic/internal/native/types` - `github.com/twitchyliquid64/golang-asm/obj` - `github.com/twitchyliquid64/golang-asm/obj/x86` + "encoding/json" + "fmt" + "reflect" + "strconv" + + "github.com/bytedance/sonic/internal/jit" + "github.com/bytedance/sonic/internal/native" + "github.com/bytedance/sonic/internal/native/types" + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/obj/x86" ) /** Crucial Registers: @@ -44,729 +44,730 @@ import ( */ const ( - _VD_args = 8 // 8 bytes for passing arguments to this functions - _VD_fargs = 64 // 64 bytes for passing arguments to other Go functions - _VD_saves = 48 // 48 bytes for saving the registers before CALL instructions - _VD_locals = 96 // 96 bytes for local variables + _VD_args = 8 // 8 bytes for passing arguments to this functions + _VD_fargs = 64 // 64 bytes for passing arguments to other Go functions + _VD_saves = 48 // 48 bytes for saving the registers before CALL instructions + _VD_locals = 96 // 96 bytes for local variables ) const ( - _VD_offs = _VD_fargs + _VD_saves + _VD_locals - _VD_size = _VD_offs + 8 // 8 bytes for the parent frame pointer + _VD_offs = _VD_fargs + _VD_saves + _VD_locals + _VD_size = _VD_offs + 8 // 8 bytes for the parent frame pointer ) var ( - _VAR_ss = _VAR_ss_Vt - _VAR_df = jit.Ptr(_SP, _VD_fargs + _VD_saves) + _VAR_ss = _VAR_ss_Vt + _VAR_df = jit.Ptr(_SP, _VD_fargs+_VD_saves) ) var ( - _VAR_ss_Vt = jit.Ptr(_SP, _VD_fargs + _VD_saves + 8) - _VAR_ss_Dv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 16) - _VAR_ss_Iv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 24) - _VAR_ss_Ep = jit.Ptr(_SP, _VD_fargs + _VD_saves + 32) - _VAR_ss_Db = jit.Ptr(_SP, _VD_fargs + _VD_saves + 40) - _VAR_ss_Dc = jit.Ptr(_SP, _VD_fargs + _VD_saves + 48) + _VAR_ss_Vt = jit.Ptr(_SP, _VD_fargs+_VD_saves+8) + _VAR_ss_Dv = jit.Ptr(_SP, _VD_fargs+_VD_saves+16) + _VAR_ss_Iv = jit.Ptr(_SP, _VD_fargs+_VD_saves+24) + _VAR_ss_Ep = jit.Ptr(_SP, _VD_fargs+_VD_saves+32) + _VAR_ss_Db = jit.Ptr(_SP, _VD_fargs+_VD_saves+40) + _VAR_ss_Dc = jit.Ptr(_SP, _VD_fargs+_VD_saves+48) ) var ( - _VAR_R9 = jit.Ptr(_SP, _VD_fargs + _VD_saves + 56) + _VAR_R9 = jit.Ptr(_SP, _VD_fargs+_VD_saves+56) ) + type _ValueDecoder struct { - jit.BaseAssembler + jit.BaseAssembler } var ( - _VAR_cs_LR = jit.Ptr(_SP, _VD_fargs + _VD_saves + 64) - _VAR_cs_p = jit.Ptr(_SP, _VD_fargs + _VD_saves + 72) - _VAR_cs_n = jit.Ptr(_SP, _VD_fargs + _VD_saves + 80) - _VAR_cs_d = jit.Ptr(_SP, _VD_fargs + _VD_saves + 88) + _VAR_cs_LR = jit.Ptr(_SP, _VD_fargs+_VD_saves+64) + _VAR_cs_p = jit.Ptr(_SP, _VD_fargs+_VD_saves+72) + _VAR_cs_n = jit.Ptr(_SP, _VD_fargs+_VD_saves+80) + _VAR_cs_d = jit.Ptr(_SP, _VD_fargs+_VD_saves+88) ) func (self *_ValueDecoder) build() uintptr { - self.Init(self.compile) - return *(*uintptr)(self.Load("decode_value", _VD_size, _VD_args, argPtrs_generic, localPtrs_generic)) + self.Init(self.compile) + return *(*uintptr)(self.Load("decode_value", _VD_size, _VD_args, argPtrs_generic, localPtrs_generic)) } /** Function Calling Helpers **/ func (self *_ValueDecoder) save(r ...obj.Addr) { - for i, v := range r { - if i > _VD_saves / 8 - 1 { - panic("too many registers to save") - } else { - self.Emit("MOVQ", v, jit.Ptr(_SP, _VD_fargs + int64(i) * 8)) - } - } + for i, v := range r { + if i > _VD_saves/8-1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _VD_fargs+int64(i)*8)) + } + } } func (self *_ValueDecoder) load(r ...obj.Addr) { - for i, v := range r { - if i > _VD_saves / 8 - 1 { - panic("too many registers to load") - } else { - self.Emit("MOVQ", jit.Ptr(_SP, _VD_fargs + int64(i) * 8), v) - } - } + for i, v := range r { + if i > _VD_saves/8-1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _VD_fargs+int64(i)*8), v) + } + } } func (self *_ValueDecoder) call(fn obj.Addr) { - self.Emit("MOVQ", fn, _R9) // MOVQ ${fn}, AX - self.Rjmp("CALL", _R9) // CALL AX + self.Emit("MOVQ", fn, _R9) // MOVQ ${fn}, AX + self.Rjmp("CALL", _R9) // CALL AX } func (self *_ValueDecoder) call_go(fn obj.Addr) { - self.save(_REG_go...) // SAVE $REG_go - self.call(fn) // CALL ${fn} - self.load(_REG_go...) // LOAD $REG_go + self.save(_REG_go...) // SAVE $REG_go + self.call(fn) // CALL ${fn} + self.load(_REG_go...) // LOAD $REG_go } func (self *_ValueDecoder) callc(fn obj.Addr) { - self.Emit("XCHGQ", _IP, _BP) - self.call(fn) - self.Emit("XCHGQ", _IP, _BP) + self.Emit("XCHGQ", _IP, _BP) + self.call(fn) + self.Emit("XCHGQ", _IP, _BP) } func (self *_ValueDecoder) call_c(fn obj.Addr) { - self.Emit("XCHGQ", _IC, _BX) - self.callc(fn) - self.Emit("XCHGQ", _IC, _BX) + self.Emit("XCHGQ", _IC, _BX) + self.callc(fn) + self.Emit("XCHGQ", _IC, _BX) } /** Decoder Assembler **/ const ( - _S_val = iota + 1 - _S_arr - _S_arr_0 - _S_obj - _S_obj_0 - _S_obj_delim - _S_obj_sep + _S_val = iota + 1 + _S_arr + _S_arr_0 + _S_obj + _S_obj_0 + _S_obj_delim + _S_obj_sep ) const ( - _S_omask_key = (1 << _S_obj_0) | (1 << _S_obj_sep) - _S_omask_end = (1 << _S_obj_0) | (1 << _S_obj) - _S_vmask = (1 << _S_val) | (1 << _S_arr_0) + _S_omask_key = (1 << _S_obj_0) | (1 << _S_obj_sep) + _S_omask_end = (1 << _S_obj_0) | (1 << _S_obj) + _S_vmask = (1 << _S_val) | (1 << _S_arr_0) ) const ( - _A_init_len = 1 - _A_init_cap = 16 + _A_init_len = 1 + _A_init_cap = 16 ) const ( - _ST_Sp = 0 - _ST_Vt = _PtrBytes - _ST_Vp = _PtrBytes * (types.MAX_RECURSE + 1) + _ST_Sp = 0 + _ST_Vt = _PtrBytes + _ST_Vp = _PtrBytes * (types.MAX_RECURSE + 1) ) var ( - _V_true = jit.Imm(int64(pbool(true))) - _V_false = jit.Imm(int64(pbool(false))) - _F_value = jit.Imm(int64(native.S_value)) + _V_true = jit.Imm(int64(pbool(true))) + _V_false = jit.Imm(int64(pbool(false))) + _F_value = jit.Imm(int64(native.S_value)) ) var ( - _V_max = jit.Imm(int64(types.V_MAX)) - _E_eof = jit.Imm(int64(types.ERR_EOF)) - _E_invalid = jit.Imm(int64(types.ERR_INVALID_CHAR)) - _E_recurse = jit.Imm(int64(types.ERR_RECURSE_EXCEED_MAX)) + _V_max = jit.Imm(int64(types.V_MAX)) + _E_eof = jit.Imm(int64(types.ERR_EOF)) + _E_invalid = jit.Imm(int64(types.ERR_INVALID_CHAR)) + _E_recurse = jit.Imm(int64(types.ERR_RECURSE_EXCEED_MAX)) ) var ( - _F_convTslice = jit.Func(convTslice) - _F_convTstring = jit.Func(convTstring) - _F_invalid_vtype = jit.Func(invalid_vtype) + _F_convTslice = jit.Func(convTslice) + _F_convTstring = jit.Func(convTstring) + _F_invalid_vtype = jit.Func(invalid_vtype) ) var ( - _T_map = jit.Type(reflect.TypeOf((map[string]interface{})(nil))) - _T_bool = jit.Type(reflect.TypeOf(false)) - _T_int64 = jit.Type(reflect.TypeOf(int64(0))) - _T_eface = jit.Type(reflect.TypeOf((*interface{})(nil)).Elem()) - _T_slice = jit.Type(reflect.TypeOf(([]interface{})(nil))) - _T_string = jit.Type(reflect.TypeOf("")) - _T_number = jit.Type(reflect.TypeOf(json.Number(""))) - _T_float64 = jit.Type(reflect.TypeOf(float64(0))) + _T_map = jit.Type(reflect.TypeOf((map[string]interface{})(nil))) + _T_bool = jit.Type(reflect.TypeOf(false)) + _T_int64 = jit.Type(reflect.TypeOf(int64(0))) + _T_eface = jit.Type(reflect.TypeOf((*interface{})(nil)).Elem()) + _T_slice = jit.Type(reflect.TypeOf(([]interface{})(nil))) + _T_string = jit.Type(reflect.TypeOf("")) + _T_number = jit.Type(reflect.TypeOf(json.Number(""))) + _T_float64 = jit.Type(reflect.TypeOf(float64(0))) ) -var _R_tab = map[int]string { - '[': "_decode_V_ARRAY", - '{': "_decode_V_OBJECT", - ':': "_decode_V_KEY_SEP", - ',': "_decode_V_ELEM_SEP", - ']': "_decode_V_ARRAY_END", - '}': "_decode_V_OBJECT_END", +var _R_tab = map[int]string{ + '[': "_decode_V_ARRAY", + '{': "_decode_V_OBJECT", + ':': "_decode_V_KEY_SEP", + ',': "_decode_V_ELEM_SEP", + ']': "_decode_V_ARRAY_END", + '}': "_decode_V_OBJECT_END", } func (self *_ValueDecoder) compile() { - self.Emit("SUBQ", jit.Imm(_VD_size), _SP) // SUBQ $_VD_size, SP - self.Emit("MOVQ", _BP, jit.Ptr(_SP, _VD_offs)) // MOVQ BP, _VD_offs(SP) - self.Emit("LEAQ", jit.Ptr(_SP, _VD_offs), _BP) // LEAQ _VD_offs(SP), BP - - /* initialize the state machine */ - self.Emit("XORL", _CX, _CX) // XORL CX, CX - self.Emit("MOVQ", _DF, _VAR_df) // MOVQ DF, df - /* initialize digital buffer first */ - self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_ss_Dc) // MOVQ $_MaxDigitNums, ss.Dcap - self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX - self.Emit("MOVQ", _AX, _VAR_ss_Db) // MOVQ AX, ss.Dbuf - /* add ST offset */ - self.Emit("ADDQ", jit.Imm(_FsmOffset), _ST) // ADDQ _FsmOffset, _ST - self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp - self.WriteRecNotAX(0, _VP, jit.Ptr(_ST, _ST_Vp), false) // MOVQ VP, ST.Vp[0] - self.Emit("MOVQ", jit.Imm(_S_val), jit.Ptr(_ST, _ST_Vt)) // MOVQ _S_val, ST.Vt[0] - self.Sjmp("JMP" , "_next") // JMP _next - - /* set the value from previous round */ - self.Link("_set_value") // _set_value: - self.Emit("MOVL" , jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX - self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ" , jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX - self.Sjmp("JNC" , "_vtype_error") // JNC _vtype_error - self.Emit("XORL" , _SI, _SI) // XORL SI, SI - self.Emit("SUBQ" , jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp - self.Emit("XCHGQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // XCHGQ ST.Vp[CX], SI - self.Emit("MOVQ" , _R8, jit.Ptr(_SI, 0)) // MOVQ R8, (SI) - self.WriteRecNotAX(1, _R9, jit.Ptr(_SI, 8), false) // MOVQ R9, 8(SI) - - /* check for value stack */ - self.Link("_next") // _next: - self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _AX) // MOVQ ST.Sp, AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , "_return") // JS _return - - /* fast path: test up to 4 characters manually */ - self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL - self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF - self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX - self.Emit("MOVQ" , jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX - self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' - self.Sjmp("JA" , "_decode_fast") // JA _decode_fast - self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX - self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - - /* at least 1 to 3 spaces */ - for i := 0; i < 3; i++ { - self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL - self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF - self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX - self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' - self.Sjmp("JA" , "_decode_fast") // JA _decode_fast - self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX - self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - } - - /* at least 4 spaces */ - self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL - self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF - self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX - - /* fast path: use lookup table to select decoder */ - self.Link("_decode_fast") // _decode_fast: - self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI - self.Sref("_decode_tab", 4) // .... &_decode_tab - self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX - self.Emit("TESTQ" , _AX, _AX) // TESTQ AX, AX - self.Sjmp("JZ" , "_decode_native") // JZ _decode_native - self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC - self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX - self.Rjmp("JMP" , _AX) // JMP AX - - /* decode with native decoder */ - self.Link("_decode_native") // _decode_native: - self.Emit("MOVQ", _IP, _DI) // MOVQ IP, DI - self.Emit("MOVQ", _IL, _SI) // MOVQ IL, SI - self.Emit("MOVQ", _IC, _DX) // MOVQ IC, DX - self.Emit("LEAQ", _VAR_ss, _CX) // LEAQ ss, CX - self.Emit("MOVQ", _VAR_df, _R8) // MOVQ $df, R8 - self.Emit("BTSQ", jit.Imm(_F_allow_control), _R8) // ANDQ $1<<_F_allow_control, R8 - self.callc(_F_value) // CALL value - self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC - - /* check for errors */ - self.Emit("MOVQ" , _VAR_ss_Vt, _AX) // MOVQ ss.Vt, AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , "_parsing_error") - self.Sjmp("JZ" , "_invalid_vtype") // JZ _invalid_vtype - self.Emit("CMPQ" , _AX, _V_max) // CMPQ AX, _V_max - self.Sjmp("JA" , "_invalid_vtype") // JA _invalid_vtype - - /* jump table selector */ - self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI - self.Sref("_switch_table", 4) // .... &_switch_table - self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, -4), _AX) // MOVLQSX -4(DI)(AX*4), AX - self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX - self.Rjmp("JMP" , _AX) // JMP AX - - /** V_EOF **/ - self.Link("_decode_V_EOF") // _decode_V_EOF: - self.Emit("MOVL", _E_eof, _EP) // MOVL _E_eof, EP - self.Sjmp("JMP" , "_error") // JMP _error - - /** V_NULL **/ - self.Link("_decode_V_NULL") // _decode_V_NULL: - self.Emit("XORL", _R8, _R8) // XORL R8, R8 - self.Emit("XORL", _R9, _R9) // XORL R9, R9 - self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /** V_TRUE **/ - self.Link("_decode_V_TRUE") // _decode_V_TRUE: - self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 - // TODO: maybe modified by users? - self.Emit("MOVQ", _V_true, _R9) // MOVQ _V_true, R9 - self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /** V_FALSE **/ - self.Link("_decode_V_FALSE") // _decode_V_FALSE: - self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 - self.Emit("MOVQ", _V_false, _R9) // MOVQ _V_false, R9 - self.Emit("LEAQ", jit.Ptr(_IC, -5), _DI) // LEAQ -5(IC), DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /** V_ARRAY **/ - self.Link("_decode_V_ARRAY") // _decode_V_ARRAY - self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX - self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char - - /* create a new array */ - self.Emit("MOVQ", _T_eface, _AX) // MOVQ _T_eface, AX - self.Emit("MOVQ", jit.Imm(_A_init_len), _BX) // MOVQ _A_init_len, BX - self.Emit("MOVQ", jit.Imm(_A_init_cap), _CX) // MOVQ _A_init_cap, CX - self.call_go(_F_makeslice) // CALL_GO runtime.makeslice - - /* pack into an interface */ - self.Emit("MOVQ", jit.Imm(_A_init_len), _BX) // MOVQ _A_init_len, BX - self.Emit("MOVQ", jit.Imm(_A_init_cap), _CX) // MOVQ _A_init_cap, CX - self.call_go(_F_convTslice) // CALL_GO runtime.convTslice - self.Emit("MOVQ", _AX, _R8) // MOVQ AX, R8 - - /* replace current state with an array */ - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI - self.Emit("MOVQ", jit.Imm(_S_arr), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr, ST.Vt[CX] - self.Emit("MOVQ", _T_slice, _AX) // MOVQ _T_slice, AX - self.Emit("MOVQ", _AX, jit.Ptr(_SI, 0)) // MOVQ AX, (SI) - self.WriteRecNotAX(2, _R8, jit.Ptr(_SI, 8), false) // MOVQ R8, 8(SI) - - /* add a new slot for the first element */ - self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX - self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} - self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow - self.Emit("MOVQ", jit.Ptr(_R8, 0), _AX) // MOVQ (R8), AX - self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp - self.WritePtrAX(3, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] - self.Emit("MOVQ", jit.Imm(_S_arr_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr_0, ST.Vt[CX] - self.Sjmp("JMP" , "_next") // JMP _next - - /** V_OBJECT **/ - self.Link("_decode_V_OBJECT") // _decode_V_OBJECT: - self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX - self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char - self.call_go(_F_makemap_small) // CALL_GO runtime.makemap_small - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Imm(_S_obj_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj_0, ST.Vt[CX] - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI - self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX - self.Emit("MOVQ", _DX, jit.Ptr(_SI, 0)) // MOVQ DX, (SI) - self.WritePtrAX(4, jit.Ptr(_SI, 8), false) // MOVQ AX, 8(SI) - self.Sjmp("JMP" , "_next") // JMP _next - - /** V_STRING **/ - self.Link("_decode_V_STRING") // _decode_V_STRING: - self.Emit("MOVQ", _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX - self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX - self.Emit("SUBQ", _CX, _AX) // SUBQ CX, AX - - /* check for escapes */ - self.Emit("CMPQ", _VAR_ss_Ep, jit.Imm(-1)) // CMPQ ss.Ep, $-1 - self.Sjmp("JNE" , "_unquote") // JNE _unquote - self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX - self.Emit("LEAQ", jit.Sib(_IP, _CX, 1, 0), _R8) // LEAQ (IP)(CX), R8 - self.Byte(0x48, 0x8d, 0x3d) // LEAQ (PC), DI - self.Sref("_copy_string_end", 4) - self.Emit("BTQ", jit.Imm(_F_copy_string), _VAR_df) - self.Sjmp("JC", "copy_string") - self.Link("_copy_string_end") - self.Emit("XORL", _DX, _DX) - - /* strings with no escape sequences */ - self.Link("_noescape") // _noescape: - self.Emit("MOVL", jit.Imm(_S_omask_key), _DI) // MOVL _S_omask, DI - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _SI) // MOVQ ST.Vt[CX], SI - self.Emit("BTQ" , _SI, _DI) // BTQ SI, DI - self.Sjmp("JC" , "_object_key") // JC _object_key - - /* check for pre-packed strings, avoid 1 allocation */ - self.Emit("TESTQ", _DX, _DX) // TESTQ DX, DX - self.Sjmp("JNZ" , "_packed_str") // JNZ _packed_str - self.Emit("MOVQ" , _AX, _BX) // MOVQ AX, BX - self.Emit("MOVQ" , _R8, _AX) // MOVQ R8, AX - self.call_go(_F_convTstring) // CALL_GO runtime.convTstring - self.Emit("MOVQ" , _AX, _R9) // MOVQ AX, R9 - - /* packed string already in R9 */ - self.Link("_packed_str") // _packed_str: - self.Emit("MOVQ", _T_string, _R8) // MOVQ _T_string, R8 - self.Emit("MOVQ", _VAR_ss_Iv, _DI) // MOVQ ss.Iv, DI - self.Emit("SUBQ", jit.Imm(1), _DI) // SUBQ $1, DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /* the string is an object key, get the map */ - self.Link("_object_key") - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI - self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI - - /* add a new delimiter */ - self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX - self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} - self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow - self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp - self.Emit("MOVQ", jit.Imm(_S_obj_delim), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj_delim, ST.Vt[CX] - - /* add a new slot int the map */ - self.Emit("MOVQ", _AX, _DI) // MOVQ AX, DI - self.Emit("MOVQ", _T_map, _AX) // MOVQ _T_map, AX - self.Emit("MOVQ", _SI, _BX) // MOVQ SI, BX - self.Emit("MOVQ", _R8, _CX) // MOVQ R9, CX - self.call_go(_F_mapassign_faststr) // CALL_GO runtime.mapassign_faststr - - /* add to the pointer stack */ - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.WritePtrAX(6, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] - self.Sjmp("JMP" , "_next") // JMP _next - - /* allocate memory to store the string header and unquoted result */ - self.Link("_unquote") // _unquote: - self.Emit("ADDQ", jit.Imm(15), _AX) // ADDQ $15, AX - self.Emit("MOVQ", _T_byte, _BX) // MOVQ _T_byte, BX - self.Emit("MOVB", jit.Imm(0), _CX) // MOVB $0, CX - self.call_go(_F_mallocgc) // CALL_GO runtime.mallocgc - self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9 - - /* prepare the unquoting parameters */ - self.Emit("MOVQ" , _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX - self.Emit("LEAQ" , jit.Sib(_IP, _CX, 1, 0), _DI) // LEAQ (IP)(CX), DI - self.Emit("NEGQ" , _CX) // NEGQ CX - self.Emit("LEAQ" , jit.Sib(_IC, _CX, 1, -1), _SI) // LEAQ -1(IC)(CX), SI - self.Emit("LEAQ" , jit.Ptr(_R9, 16), _DX) // LEAQ 16(R8), DX - self.Emit("LEAQ" , _VAR_ss_Ep, _CX) // LEAQ ss.Ep, CX - self.Emit("XORL" , _R8, _R8) // XORL R8, R8 - self.Emit("BTQ" , jit.Imm(_F_disable_urc), _VAR_df) // BTQ ${_F_disable_urc}, fv - self.Emit("SETCC", _R8) // SETCC R8 - self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8 - - /* unquote the string, with R9 been preserved */ - self.Emit("MOVQ", _R9, _VAR_R9) // SAVE R9 - self.call_c(_F_unquote) // CALL unquote - self.Emit("MOVQ", _VAR_R9, _R9) // LOAD R9 - - /* check for errors */ - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , "_unquote_error") // JS _unquote_error - self.Emit("MOVL" , jit.Imm(1), _DX) // MOVL $1, DX - self.Emit("LEAQ" , jit.Ptr(_R9, 16), _R8) // ADDQ $16, R8 - self.Emit("MOVQ" , _R8, jit.Ptr(_R9, 0)) // MOVQ R8, (R9) - self.Emit("MOVQ" , _AX, jit.Ptr(_R9, 8)) // MOVQ AX, 8(R9) - self.Sjmp("JMP" , "_noescape") // JMP _noescape - - /** V_DOUBLE **/ - self.Link("_decode_V_DOUBLE") // _decode_V_DOUBLE: - self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df - self.Sjmp("JC" , "_use_number") // JC _use_number - self.Emit("MOVSD", _VAR_ss_Dv, _X0) // MOVSD ss.Dv, X0 - self.Sjmp("JMP" , "_use_float64") // JMP _use_float64 - - /** V_INTEGER **/ - self.Link("_decode_V_INTEGER") // _decode_V_INTEGER: - self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df - self.Sjmp("JC" , "_use_number") // JC _use_number - self.Emit("BTQ" , jit.Imm(_F_use_int64), _VAR_df) // BTQ _F_use_int64, df - self.Sjmp("JC" , "_use_int64") // JC _use_int64 - //TODO: use ss.Dv directly - self.Emit("MOVSD", _VAR_ss_Dv, _X0) // MOVSD ss.Dv, X0 - - /* represent numbers as `float64` */ - self.Link("_use_float64") // _use_float64: - self.Emit("MOVQ" , _X0, _AX) // MOVQ X0, AX - self.call_go(_F_convT64) // CALL_GO runtime.convT64 - self.Emit("MOVQ" , _T_float64, _R8) // MOVQ _T_float64, R8 - self.Emit("MOVQ" , _AX, _R9) // MOVQ AX, R9 - self.Emit("MOVQ" , _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /* represent numbers as `json.Number` */ - self.Link("_use_number") // _use_number - self.Emit("MOVQ", _VAR_ss_Ep, _AX) // MOVQ ss.Ep, AX - self.Emit("LEAQ", jit.Sib(_IP, _AX, 1, 0), _SI) // LEAQ (IP)(AX), SI - self.Emit("MOVQ", _IC, _CX) // MOVQ IC, CX - self.Emit("SUBQ", _AX, _CX) // SUBQ AX, CX - self.Emit("MOVQ", _SI, _AX) // MOVQ SI, AX - self.Emit("MOVQ", _CX, _BX) // MOVQ CX, BX - self.call_go(_F_convTstring) // CALL_GO runtime.convTstring - self.Emit("MOVQ", _T_number, _R8) // MOVQ _T_number, R8 - self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9 - self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /* represent numbers as `int64` */ - self.Link("_use_int64") // _use_int64: - self.Emit("MOVQ", _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX - self.call_go(_F_convT64) // CALL_GO runtime.convT64 - self.Emit("MOVQ", _T_int64, _R8) // MOVQ _T_int64, R8 - self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9 - self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI - self.Sjmp("JMP" , "_set_value") // JMP _set_value - - /** V_KEY_SEP **/ - self.Link("_decode_V_KEY_SEP") // _decode_V_KEY_SEP: - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("CMPQ", _AX, jit.Imm(_S_obj_delim)) // CMPQ AX, _S_obj_delim - self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char - self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX] - self.Emit("MOVQ", jit.Imm(_S_obj), jit.Sib(_ST, _CX, 8, _ST_Vt - 8)) // MOVQ _S_obj, ST.Vt[CX - 1] - self.Sjmp("JMP" , "_next") // JMP _next - - /** V_ELEM_SEP **/ - self.Link("_decode_V_ELEM_SEP") // _decode_V_ELEM_SEP: - self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ" , jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("CMPQ" , _AX, jit.Imm(_S_arr)) - self.Sjmp("JE" , "_array_sep") // JZ _next - self.Emit("CMPQ" , _AX, jit.Imm(_S_obj)) // CMPQ _AX, _S_arr - self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char - self.Emit("MOVQ" , jit.Imm(_S_obj_sep), jit.Sib(_ST, _CX, 8, _ST_Vt)) - self.Sjmp("JMP" , "_next") // JMP _next - - /* arrays */ - self.Link("_array_sep") - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI - self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI - self.Emit("MOVQ", jit.Ptr(_SI, 8), _DX) // MOVQ 8(SI), DX - self.Emit("CMPQ", _DX, jit.Ptr(_SI, 16)) // CMPQ DX, 16(SI) - self.Sjmp("JAE" , "_array_more") // JAE _array_more - - /* add a slot for the new element */ - self.Link("_array_append") // _array_append: - self.Emit("ADDQ", jit.Imm(1), jit.Ptr(_SI, 8)) // ADDQ $1, 8(SI) - self.Emit("MOVQ", jit.Ptr(_SI, 0), _SI) // MOVQ (SI), SI - self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX - self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} - self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow - self.Emit("SHLQ", jit.Imm(1), _DX) // SHLQ $1, DX - self.Emit("LEAQ", jit.Sib(_SI, _DX, 8, 0), _SI) // LEAQ (SI)(DX*8), SI - self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp - self.WriteRecNotAX(7 , _SI, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ SI, ST.Vp[CX] - self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX} - self.Sjmp("JMP" , "_next") // JMP _next - - /** V_ARRAY_END **/ - self.Link("_decode_V_ARRAY_END") // _decode_V_ARRAY_END: - self.Emit("XORL", _DX, _DX) // XORL DX, DX - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("CMPQ", _AX, jit.Imm(_S_arr_0)) // CMPQ AX, _S_arr_0 - self.Sjmp("JE" , "_first_item") // JE _first_item - self.Emit("CMPQ", _AX, jit.Imm(_S_arr)) // CMPQ AX, _S_arr - self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char - self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp - self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] - self.Sjmp("JMP" , "_next") // JMP _next - - /* first element of an array */ - self.Link("_first_item") // _first_item: - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("SUBQ", jit.Imm(2), jit.Ptr(_ST, _ST_Sp)) // SUBQ $2, ST.Sp - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp - 8), _SI) // MOVQ ST.Vp[CX - 1], SI - self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI - self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp - 8)) // MOVQ DX, ST.Vp[CX - 1] - self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] - self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) - self.Sjmp("JMP" , "_next") // JMP _next - - /** V_OBJECT_END **/ - self.Link("_decode_V_OBJECT_END") // _decode_V_OBJECT_END: - self.Emit("MOVL", jit.Imm(_S_omask_end), _DI) // MOVL _S_omask, DI - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX - self.Emit("BTQ" , _AX, _DI) - self.Sjmp("JNC" , "_invalid_char") // JNE _invalid_char - self.Emit("XORL", _AX, _AX) // XORL AX, AX - self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp - self.Emit("MOVQ", _AX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ AX, ST.Vp[CX] - self.Sjmp("JMP" , "_next") // JMP _next - - /* return from decoder */ - self.Link("_return") // _return: - self.Emit("XORL", _EP, _EP) // XORL EP, EP - self.Emit("MOVQ", _EP, jit.Ptr(_ST, _ST_Vp)) // MOVQ EP, ST.Vp[0] - self.Link("_epilogue") // _epilogue: - self.Emit("SUBQ", jit.Imm(_FsmOffset), _ST) // SUBQ _FsmOffset, _ST - self.Emit("MOVQ", jit.Ptr(_SP, _VD_offs), _BP) // MOVQ _VD_offs(SP), BP - self.Emit("ADDQ", jit.Imm(_VD_size), _SP) // ADDQ $_VD_size, SP - self.Emit("RET") // RET - - /* array expand */ - self.Link("_array_more") // _array_more: - self.Emit("MOVQ" , _T_eface, _AX) // MOVQ _T_eface, AX - self.Emit("MOVQ" , jit.Ptr(_SI, 0), _BX) // MOVQ (SI), BX - self.Emit("MOVQ" , jit.Ptr(_SI, 8), _CX) // MOVQ 8(SI), CX - self.Emit("MOVQ" , jit.Ptr(_SI, 16), _DI) // MOVQ 16(SI), DI - self.Emit("MOVQ" , _DI, _SI) // MOVQ DI, 24(SP) - self.Emit("SHLQ" , jit.Imm(1), _SI) // SHLQ $1, SI - self.call_go(_F_growslice) // CALL_GO runtime.growslice - self.Emit("MOVQ" , _AX, _DI) // MOVQ AX, DI - self.Emit("MOVQ" , _BX, _DX) // MOVQ BX, DX - self.Emit("MOVQ" , _CX, _AX) // MOVQ CX, AX - - /* update the slice */ - self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX - self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI - self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI - self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) - self.Emit("MOVQ", _AX, jit.Ptr(_SI, 16)) // MOVQ AX, 16(AX) - self.WriteRecNotAX(8 , _DI, jit.Ptr(_SI, 0), false) // MOVQ R10, (SI) - self.Sjmp("JMP" , "_array_append") // JMP _array_append - - /* copy string */ - self.Link("copy_string") // pointer: R8, length: AX, return addr: DI - self.Emit("MOVQ", _R8, _VAR_cs_p) - self.Emit("MOVQ", _AX, _VAR_cs_n) - self.Emit("MOVQ", _DI, _VAR_cs_LR) - self.Emit("MOVQ", _AX, _BX) - self.Emit("MOVQ", _AX, _CX) - self.Emit("MOVQ", _T_byte, _AX) - self.call_go(_F_makeslice) - self.Emit("MOVQ", _AX, _VAR_cs_d) - self.Emit("MOVQ", _VAR_cs_p, _BX) - self.Emit("MOVQ", _VAR_cs_n, _CX) - self.call_go(_F_memmove) - self.Emit("MOVQ", _VAR_cs_d, _R8) - self.Emit("MOVQ", _VAR_cs_n, _AX) - self.Emit("MOVQ", _VAR_cs_LR, _DI) - self.Rjmp("JMP", _DI) - - /* error handlers */ - self.Link("_stack_overflow") - self.Emit("MOVL" , _E_recurse, _EP) // MOVQ _E_recurse, EP - self.Sjmp("JMP" , "_error") // JMP _error - self.Link("_vtype_error") // _vtype_error: - self.Emit("MOVQ" , _DI, _IC) // MOVQ DI, IC - self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP - self.Sjmp("JMP" , "_error") // JMP _error - self.Link("_invalid_char") // _invalid_char: - self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC - self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP - self.Sjmp("JMP" , "_error") // JMP _error - self.Link("_unquote_error") // _unquote_error: - self.Emit("MOVQ" , _VAR_ss_Iv, _IC) // MOVQ ss.Iv, IC - self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC - self.Link("_parsing_error") // _parsing_error: - self.Emit("NEGQ" , _AX) // NEGQ AX - self.Emit("MOVQ" , _AX, _EP) // MOVQ AX, EP - self.Link("_error") // _error: - self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 - self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) - self.Sjmp("JMP" , "_epilogue") // JMP _epilogue - - /* invalid value type, never returns */ - self.Link("_invalid_vtype") - self.call_go(_F_invalid_vtype) // CALL invalid_type - self.Emit("UD2") // UD2 - - /* switch jump table */ - self.Link("_switch_table") // _switch_table: - self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 - self.Sref("_decode_V_NULL", -4) // SREF &_decode_V_NULL, $-4 - self.Sref("_decode_V_TRUE", -8) // SREF &_decode_V_TRUE, $-8 - self.Sref("_decode_V_FALSE", -12) // SREF &_decode_V_FALSE, $-12 - self.Sref("_decode_V_ARRAY", -16) // SREF &_decode_V_ARRAY, $-16 - self.Sref("_decode_V_OBJECT", -20) // SREF &_decode_V_OBJECT, $-20 - self.Sref("_decode_V_STRING", -24) // SREF &_decode_V_STRING, $-24 - self.Sref("_decode_V_DOUBLE", -28) // SREF &_decode_V_DOUBLE, $-28 - self.Sref("_decode_V_INTEGER", -32) // SREF &_decode_V_INTEGER, $-32 - self.Sref("_decode_V_KEY_SEP", -36) // SREF &_decode_V_KEY_SEP, $-36 - self.Sref("_decode_V_ELEM_SEP", -40) // SREF &_decode_V_ELEM_SEP, $-40 - self.Sref("_decode_V_ARRAY_END", -44) // SREF &_decode_V_ARRAY_END, $-44 - self.Sref("_decode_V_OBJECT_END", -48) // SREF &_decode_V_OBJECT_END, $-48 - - /* fast character lookup table */ - self.Link("_decode_tab") // _decode_tab: - self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 - - /* generate rest of the tabs */ - for i := 1; i < 256; i++ { - if to, ok := _R_tab[i]; ok { - self.Sref(to, -int64(i) * 4) - } else { - self.Byte(0x00, 0x00, 0x00, 0x00) - } - } + self.Emit("SUBQ", jit.Imm(_VD_size), _SP) // SUBQ $_VD_size, SP + self.Emit("MOVQ", _BP, jit.Ptr(_SP, _VD_offs)) // MOVQ BP, _VD_offs(SP) + self.Emit("LEAQ", jit.Ptr(_SP, _VD_offs), _BP) // LEAQ _VD_offs(SP), BP + + /* initialize the state machine */ + self.Emit("XORL", _CX, _CX) // XORL CX, CX + self.Emit("MOVQ", _DF, _VAR_df) // MOVQ DF, df + /* initialize digital buffer first */ + self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_ss_Dc) // MOVQ $_MaxDigitNums, ss.Dcap + self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX + self.Emit("MOVQ", _AX, _VAR_ss_Db) // MOVQ AX, ss.Dbuf + /* add ST offset */ + self.Emit("ADDQ", jit.Imm(_FsmOffset), _ST) // ADDQ _FsmOffset, _ST + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.WriteRecNotAX(0, _VP, jit.Ptr(_ST, _ST_Vp), false) // MOVQ VP, ST.Vp[0] + self.Emit("MOVQ", jit.Imm(_S_val), jit.Ptr(_ST, _ST_Vt)) // MOVQ _S_val, ST.Vt[0] + self.Sjmp("JMP", "_next") // JMP _next + + /* set the value from previous round */ + self.Link("_set_value") // _set_value: + self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ", _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC", "_vtype_error") // JNC _vtype_error + self.Emit("XORL", _SI, _SI) // XORL SI, SI + self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp + self.Emit("XCHGQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // XCHGQ ST.Vp[CX], SI + self.Emit("MOVQ", _R8, jit.Ptr(_SI, 0)) // MOVQ R8, (SI) + self.WriteRecNotAX(1, _R9, jit.Ptr(_SI, 8), false) // MOVQ R9, 8(SI) + + /* check for value stack */ + self.Link("_next") // _next: + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _AX) // MOVQ ST.Sp, AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", "_return") // JS _return + + /* fast path: test up to 4 characters manually */ + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE", "_decode_V_EOF") // JAE _decode_V_EOF + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("MOVQ", jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX + self.Emit("CMPQ", _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA", "_decode_fast") // JA _decode_fast + self.Emit("BTQ", _AX, _DX) // BTQ _AX, _DX + self.Sjmp("JNC", "_decode_fast") // JNC _decode_fast + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + + /* at least 1 to 3 spaces */ + for i := 0; i < 3; i++ { + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE", "_decode_V_EOF") // JAE _decode_V_EOF + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("CMPQ", _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA", "_decode_fast") // JA _decode_fast + self.Emit("BTQ", _AX, _DX) // BTQ _AX, _DX + self.Sjmp("JNC", "_decode_fast") // JNC _decode_fast + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + } + + /* at least 4 spaces */ + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE", "_decode_V_EOF") // JAE _decode_V_EOF + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + + /* fast path: use lookup table to select decoder */ + self.Link("_decode_fast") // _decode_fast: + self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI + self.Sref("_decode_tab", 4) // .... &_decode_tab + self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ", "_decode_native") // JZ _decode_native + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Emit("ADDQ", _DI, _AX) // ADDQ DI, AX + self.Rjmp("JMP", _AX) // JMP AX + + /* decode with native decoder */ + self.Link("_decode_native") // _decode_native: + self.Emit("MOVQ", _IP, _DI) // MOVQ IP, DI + self.Emit("MOVQ", _IL, _SI) // MOVQ IL, SI + self.Emit("MOVQ", _IC, _DX) // MOVQ IC, DX + self.Emit("LEAQ", _VAR_ss, _CX) // LEAQ ss, CX + self.Emit("MOVQ", _VAR_df, _R8) // MOVQ $df, R8 + self.Emit("BTSQ", jit.Imm(_F_allow_control), _R8) // ANDQ $1<<_F_allow_control, R8 + self.callc(_F_value) // CALL value + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + + /* check for errors */ + self.Emit("MOVQ", _VAR_ss_Vt, _AX) // MOVQ ss.Vt, AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", "_parsing_error") + self.Sjmp("JZ", "_invalid_vtype") // JZ _invalid_vtype + self.Emit("CMPQ", _AX, _V_max) // CMPQ AX, _V_max + self.Sjmp("JA", "_invalid_vtype") // JA _invalid_vtype + + /* jump table selector */ + self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI + self.Sref("_switch_table", 4) // .... &_switch_table + self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, -4), _AX) // MOVLQSX -4(DI)(AX*4), AX + self.Emit("ADDQ", _DI, _AX) // ADDQ DI, AX + self.Rjmp("JMP", _AX) // JMP AX + + /** V_EOF **/ + self.Link("_decode_V_EOF") // _decode_V_EOF: + self.Emit("MOVL", _E_eof, _EP) // MOVL _E_eof, EP + self.Sjmp("JMP", "_error") // JMP _error + + /** V_NULL **/ + self.Link("_decode_V_NULL") // _decode_V_NULL: + self.Emit("XORL", _R8, _R8) // XORL R8, R8 + self.Emit("XORL", _R9, _R9) // XORL R9, R9 + self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /** V_TRUE **/ + self.Link("_decode_V_TRUE") // _decode_V_TRUE: + self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 + // TODO: maybe modified by users? + self.Emit("MOVQ", _V_true, _R9) // MOVQ _V_true, R9 + self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /** V_FALSE **/ + self.Link("_decode_V_FALSE") // _decode_V_FALSE: + self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 + self.Emit("MOVQ", _V_false, _R9) // MOVQ _V_false, R9 + self.Emit("LEAQ", jit.Ptr(_IC, -5), _DI) // LEAQ -5(IC), DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /** V_ARRAY **/ + self.Link("_decode_V_ARRAY") // _decode_V_ARRAY + self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ", _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC", "_invalid_char") // JNC _invalid_char + + /* create a new array */ + self.Emit("MOVQ", _T_eface, _AX) // MOVQ _T_eface, AX + self.Emit("MOVQ", jit.Imm(_A_init_len), _BX) // MOVQ _A_init_len, BX + self.Emit("MOVQ", jit.Imm(_A_init_cap), _CX) // MOVQ _A_init_cap, CX + self.call_go(_F_makeslice) // CALL_GO runtime.makeslice + + /* pack into an interface */ + self.Emit("MOVQ", jit.Imm(_A_init_len), _BX) // MOVQ _A_init_len, BX + self.Emit("MOVQ", jit.Imm(_A_init_cap), _CX) // MOVQ _A_init_cap, CX + self.call_go(_F_convTslice) // CALL_GO runtime.convTslice + self.Emit("MOVQ", _AX, _R8) // MOVQ AX, R8 + + /* replace current state with an array */ + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Imm(_S_arr), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr, ST.Vt[CX] + self.Emit("MOVQ", _T_slice, _AX) // MOVQ _T_slice, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SI, 0)) // MOVQ AX, (SI) + self.WriteRecNotAX(2, _R8, jit.Ptr(_SI, 8), false) // MOVQ R8, 8(SI) + + /* add a new slot for the first element */ + self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX + self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} + self.Sjmp("JAE", "_stack_overflow") // JA _stack_overflow + self.Emit("MOVQ", jit.Ptr(_R8, 0), _AX) // MOVQ (R8), AX + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.WritePtrAX(3, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] + self.Emit("MOVQ", jit.Imm(_S_arr_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr_0, ST.Vt[CX] + self.Sjmp("JMP", "_next") // JMP _next + + /** V_OBJECT **/ + self.Link("_decode_V_OBJECT") // _decode_V_OBJECT: + self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ", _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC", "_invalid_char") // JNC _invalid_char + self.call_go(_F_makemap_small) // CALL_GO runtime.makemap_small + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Imm(_S_obj_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj_0, ST.Vt[CX] + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX + self.Emit("MOVQ", _DX, jit.Ptr(_SI, 0)) // MOVQ DX, (SI) + self.WritePtrAX(4, jit.Ptr(_SI, 8), false) // MOVQ AX, 8(SI) + self.Sjmp("JMP", "_next") // JMP _next + + /** V_STRING **/ + self.Link("_decode_V_STRING") // _decode_V_STRING: + self.Emit("MOVQ", _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX + self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX + self.Emit("SUBQ", _CX, _AX) // SUBQ CX, AX + + /* check for escapes */ + self.Emit("CMPQ", _VAR_ss_Ep, jit.Imm(-1)) // CMPQ ss.Ep, $-1 + self.Sjmp("JNE", "_unquote") // JNE _unquote + self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX + self.Emit("LEAQ", jit.Sib(_IP, _CX, 1, 0), _R8) // LEAQ (IP)(CX), R8 + self.Byte(0x48, 0x8d, 0x3d) // LEAQ (PC), DI + self.Sref("_copy_string_end", 4) + self.Emit("BTQ", jit.Imm(_F_copy_string), _VAR_df) + self.Sjmp("JC", "copy_string") + self.Link("_copy_string_end") + self.Emit("XORL", _DX, _DX) + + /* strings with no escape sequences */ + self.Link("_noescape") // _noescape: + self.Emit("MOVL", jit.Imm(_S_omask_key), _DI) // MOVL _S_omask, DI + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _SI) // MOVQ ST.Vt[CX], SI + self.Emit("BTQ", _SI, _DI) // BTQ SI, DI + self.Sjmp("JC", "_object_key") // JC _object_key + + /* check for pre-packed strings, avoid 1 allocation */ + self.Emit("TESTQ", _DX, _DX) // TESTQ DX, DX + self.Sjmp("JNZ", "_packed_str") // JNZ _packed_str + self.Emit("MOVQ", _AX, _BX) // MOVQ AX, BX + self.Emit("MOVQ", _R8, _AX) // MOVQ R8, AX + self.call_go(_F_convTstring) // CALL_GO runtime.convTstring + self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9 + + /* packed string already in R9 */ + self.Link("_packed_str") // _packed_str: + self.Emit("MOVQ", _T_string, _R8) // MOVQ _T_string, R8 + self.Emit("MOVQ", _VAR_ss_Iv, _DI) // MOVQ ss.Iv, DI + self.Emit("SUBQ", jit.Imm(1), _DI) // SUBQ $1, DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /* the string is an object key, get the map */ + self.Link("_object_key") + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + + /* add a new delimiter */ + self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX + self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} + self.Sjmp("JAE", "_stack_overflow") // JA _stack_overflow + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.Emit("MOVQ", jit.Imm(_S_obj_delim), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj_delim, ST.Vt[CX] + + /* add a new slot int the map */ + self.Emit("MOVQ", _AX, _DI) // MOVQ AX, DI + self.Emit("MOVQ", _T_map, _AX) // MOVQ _T_map, AX + self.Emit("MOVQ", _SI, _BX) // MOVQ SI, BX + self.Emit("MOVQ", _R8, _CX) // MOVQ R9, CX + self.call_go(_F_mapassign_faststr) // CALL_GO runtime.mapassign_faststr + + /* add to the pointer stack */ + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.WritePtrAX(6, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] + self.Sjmp("JMP", "_next") // JMP _next + + /* allocate memory to store the string header and unquoted result */ + self.Link("_unquote") // _unquote: + self.Emit("ADDQ", jit.Imm(15), _AX) // ADDQ $15, AX + self.Emit("MOVQ", _T_byte, _BX) // MOVQ _T_byte, BX + self.Emit("MOVB", jit.Imm(0), _CX) // MOVB $0, CX + self.call_go(_F_mallocgc) // CALL_GO runtime.mallocgc + self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9 + + /* prepare the unquoting parameters */ + self.Emit("MOVQ", _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX + self.Emit("LEAQ", jit.Sib(_IP, _CX, 1, 0), _DI) // LEAQ (IP)(CX), DI + self.Emit("NEGQ", _CX) // NEGQ CX + self.Emit("LEAQ", jit.Sib(_IC, _CX, 1, -1), _SI) // LEAQ -1(IC)(CX), SI + self.Emit("LEAQ", jit.Ptr(_R9, 16), _DX) // LEAQ 16(R8), DX + self.Emit("LEAQ", _VAR_ss_Ep, _CX) // LEAQ ss.Ep, CX + self.Emit("XORL", _R8, _R8) // XORL R8, R8 + self.Emit("BTQ", jit.Imm(_F_disable_urc), _VAR_df) // BTQ ${_F_disable_urc}, fv + self.Emit("SETCC", _R8) // SETCC R8 + self.Emit("SHLQ", jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8 + + /* unquote the string, with R9 been preserved */ + self.Emit("MOVQ", _R9, _VAR_R9) // SAVE R9 + self.call_c(_F_unquote) // CALL unquote + self.Emit("MOVQ", _VAR_R9, _R9) // LOAD R9 + + /* check for errors */ + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", "_unquote_error") // JS _unquote_error + self.Emit("MOVL", jit.Imm(1), _DX) // MOVL $1, DX + self.Emit("LEAQ", jit.Ptr(_R9, 16), _R8) // ADDQ $16, R8 + self.Emit("MOVQ", _R8, jit.Ptr(_R9, 0)) // MOVQ R8, (R9) + self.Emit("MOVQ", _AX, jit.Ptr(_R9, 8)) // MOVQ AX, 8(R9) + self.Sjmp("JMP", "_noescape") // JMP _noescape + + /** V_DOUBLE **/ + self.Link("_decode_V_DOUBLE") // _decode_V_DOUBLE: + self.Emit("BTQ", jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df + self.Sjmp("JC", "_use_number") // JC _use_number + self.Emit("MOVSD", _VAR_ss_Dv, _X0) // MOVSD ss.Dv, X0 + self.Sjmp("JMP", "_use_float64") // JMP _use_float64 + + /** V_INTEGER **/ + self.Link("_decode_V_INTEGER") // _decode_V_INTEGER: + self.Emit("BTQ", jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df + self.Sjmp("JC", "_use_number") // JC _use_number + self.Emit("BTQ", jit.Imm(_F_use_int64), _VAR_df) // BTQ _F_use_int64, df + self.Sjmp("JC", "_use_int64") // JC _use_int64 + //TODO: use ss.Dv directly + self.Emit("MOVSD", _VAR_ss_Dv, _X0) // MOVSD ss.Dv, X0 + + /* represent numbers as `float64` */ + self.Link("_use_float64") // _use_float64: + self.Emit("MOVQ", _X0, _AX) // MOVQ X0, AX + self.call_go(_F_convT64) // CALL_GO runtime.convT64 + self.Emit("MOVQ", _T_float64, _R8) // MOVQ _T_float64, R8 + self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9 + self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /* represent numbers as `json.Number` */ + self.Link("_use_number") // _use_number + self.Emit("MOVQ", _VAR_ss_Ep, _AX) // MOVQ ss.Ep, AX + self.Emit("LEAQ", jit.Sib(_IP, _AX, 1, 0), _SI) // LEAQ (IP)(AX), SI + self.Emit("MOVQ", _IC, _CX) // MOVQ IC, CX + self.Emit("SUBQ", _AX, _CX) // SUBQ AX, CX + self.Emit("MOVQ", _SI, _AX) // MOVQ SI, AX + self.Emit("MOVQ", _CX, _BX) // MOVQ CX, BX + self.call_go(_F_convTstring) // CALL_GO runtime.convTstring + self.Emit("MOVQ", _T_number, _R8) // MOVQ _T_number, R8 + self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9 + self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /* represent numbers as `int64` */ + self.Link("_use_int64") // _use_int64: + self.Emit("MOVQ", _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX + self.call_go(_F_convT64) // CALL_GO runtime.convT64 + self.Emit("MOVQ", _T_int64, _R8) // MOVQ _T_int64, R8 + self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9 + self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI + self.Sjmp("JMP", "_set_value") // JMP _set_value + + /** V_KEY_SEP **/ + self.Link("_decode_V_KEY_SEP") // _decode_V_KEY_SEP: + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("CMPQ", _AX, jit.Imm(_S_obj_delim)) // CMPQ AX, _S_obj_delim + self.Sjmp("JNE", "_invalid_char") // JNE _invalid_char + self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX] + self.Emit("MOVQ", jit.Imm(_S_obj), jit.Sib(_ST, _CX, 8, _ST_Vt-8)) // MOVQ _S_obj, ST.Vt[CX - 1] + self.Sjmp("JMP", "_next") // JMP _next + + /** V_ELEM_SEP **/ + self.Link("_decode_V_ELEM_SEP") // _decode_V_ELEM_SEP: + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("CMPQ", _AX, jit.Imm(_S_arr)) + self.Sjmp("JE", "_array_sep") // JZ _next + self.Emit("CMPQ", _AX, jit.Imm(_S_obj)) // CMPQ _AX, _S_arr + self.Sjmp("JNE", "_invalid_char") // JNE _invalid_char + self.Emit("MOVQ", jit.Imm(_S_obj_sep), jit.Sib(_ST, _CX, 8, _ST_Vt)) + self.Sjmp("JMP", "_next") // JMP _next + + /* arrays */ + self.Link("_array_sep") + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _DX) // MOVQ 8(SI), DX + self.Emit("CMPQ", _DX, jit.Ptr(_SI, 16)) // CMPQ DX, 16(SI) + self.Sjmp("JAE", "_array_more") // JAE _array_more + + /* add a slot for the new element */ + self.Link("_array_append") // _array_append: + self.Emit("ADDQ", jit.Imm(1), jit.Ptr(_SI, 8)) // ADDQ $1, 8(SI) + self.Emit("MOVQ", jit.Ptr(_SI, 0), _SI) // MOVQ (SI), SI + self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX + self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} + self.Sjmp("JAE", "_stack_overflow") // JA _stack_overflow + self.Emit("SHLQ", jit.Imm(1), _DX) // SHLQ $1, DX + self.Emit("LEAQ", jit.Sib(_SI, _DX, 8, 0), _SI) // LEAQ (SI)(DX*8), SI + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.WriteRecNotAX(7, _SI, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ SI, ST.Vp[CX] + self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX} + self.Sjmp("JMP", "_next") // JMP _next + + /** V_ARRAY_END **/ + self.Link("_decode_V_ARRAY_END") // _decode_V_ARRAY_END: + self.Emit("XORL", _DX, _DX) // XORL DX, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("CMPQ", _AX, jit.Imm(_S_arr_0)) // CMPQ AX, _S_arr_0 + self.Sjmp("JE", "_first_item") // JE _first_item + self.Emit("CMPQ", _AX, jit.Imm(_S_arr)) // CMPQ AX, _S_arr + self.Sjmp("JNE", "_invalid_char") // JNE _invalid_char + self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp + self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] + self.Sjmp("JMP", "_next") // JMP _next + + /* first element of an array */ + self.Link("_first_item") // _first_item: + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("SUBQ", jit.Imm(2), jit.Ptr(_ST, _ST_Sp)) // SUBQ $2, ST.Sp + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp-8), _SI) // MOVQ ST.Vp[CX - 1], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp-8)) // MOVQ DX, ST.Vp[CX - 1] + self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] + self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) + self.Sjmp("JMP", "_next") // JMP _next + + /** V_OBJECT_END **/ + self.Link("_decode_V_OBJECT_END") // _decode_V_OBJECT_END: + self.Emit("MOVL", jit.Imm(_S_omask_end), _DI) // MOVL _S_omask, DI + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ", _AX, _DI) + self.Sjmp("JNC", "_invalid_char") // JNE _invalid_char + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp + self.Emit("MOVQ", _AX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ AX, ST.Vp[CX] + self.Sjmp("JMP", "_next") // JMP _next + + /* return from decoder */ + self.Link("_return") // _return: + self.Emit("XORL", _EP, _EP) // XORL EP, EP + self.Emit("MOVQ", _EP, jit.Ptr(_ST, _ST_Vp)) // MOVQ EP, ST.Vp[0] + self.Link("_epilogue") // _epilogue: + self.Emit("SUBQ", jit.Imm(_FsmOffset), _ST) // SUBQ _FsmOffset, _ST + self.Emit("MOVQ", jit.Ptr(_SP, _VD_offs), _BP) // MOVQ _VD_offs(SP), BP + self.Emit("ADDQ", jit.Imm(_VD_size), _SP) // ADDQ $_VD_size, SP + self.Emit("RET") // RET + + /* array expand */ + self.Link("_array_more") // _array_more: + self.Emit("MOVQ", _T_eface, _AX) // MOVQ _T_eface, AX + self.Emit("MOVQ", jit.Ptr(_SI, 0), _BX) // MOVQ (SI), BX + self.Emit("MOVQ", jit.Ptr(_SI, 8), _CX) // MOVQ 8(SI), CX + self.Emit("MOVQ", jit.Ptr(_SI, 16), _DI) // MOVQ 16(SI), DI + self.Emit("MOVQ", _DI, _SI) // MOVQ DI, 24(SP) + self.Emit("SHLQ", jit.Imm(1), _SI) // SHLQ $1, SI + self.call_go(_F_growslice) // CALL_GO runtime.growslice + self.Emit("MOVQ", _AX, _DI) // MOVQ AX, DI + self.Emit("MOVQ", _BX, _DX) // MOVQ BX, DX + self.Emit("MOVQ", _CX, _AX) // MOVQ CX, AX + + /* update the slice */ + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) + self.Emit("MOVQ", _AX, jit.Ptr(_SI, 16)) // MOVQ AX, 16(AX) + self.WriteRecNotAX(8, _DI, jit.Ptr(_SI, 0), false) // MOVQ R10, (SI) + self.Sjmp("JMP", "_array_append") // JMP _array_append + + /* copy string */ + self.Link("copy_string") // pointer: R8, length: AX, return addr: DI + self.Emit("MOVQ", _R8, _VAR_cs_p) + self.Emit("MOVQ", _AX, _VAR_cs_n) + self.Emit("MOVQ", _DI, _VAR_cs_LR) + self.Emit("MOVQ", _AX, _BX) + self.Emit("MOVQ", _AX, _CX) + self.Emit("MOVQ", _T_byte, _AX) + self.call_go(_F_makeslice) + self.Emit("MOVQ", _AX, _VAR_cs_d) + self.Emit("MOVQ", _VAR_cs_p, _BX) + self.Emit("MOVQ", _VAR_cs_n, _CX) + self.call_go(_F_memmove) + self.Emit("MOVQ", _VAR_cs_d, _R8) + self.Emit("MOVQ", _VAR_cs_n, _AX) + self.Emit("MOVQ", _VAR_cs_LR, _DI) + self.Rjmp("JMP", _DI) + + /* error handlers */ + self.Link("_stack_overflow") + self.Emit("MOVL", _E_recurse, _EP) // MOVQ _E_recurse, EP + self.Sjmp("JMP", "_error") // JMP _error + self.Link("_vtype_error") // _vtype_error: + self.Emit("MOVQ", _DI, _IC) // MOVQ DI, IC + self.Emit("MOVL", _E_invalid, _EP) // MOVL _E_invalid, EP + self.Sjmp("JMP", "_error") // JMP _error + self.Link("_invalid_char") // _invalid_char: + self.Emit("SUBQ", jit.Imm(1), _IC) // SUBQ $1, IC + self.Emit("MOVL", _E_invalid, _EP) // MOVL _E_invalid, EP + self.Sjmp("JMP", "_error") // JMP _error + self.Link("_unquote_error") // _unquote_error: + self.Emit("MOVQ", _VAR_ss_Iv, _IC) // MOVQ ss.Iv, IC + self.Emit("SUBQ", jit.Imm(1), _IC) // SUBQ $1, IC + self.Link("_parsing_error") // _parsing_error: + self.Emit("NEGQ", _AX) // NEGQ AX + self.Emit("MOVQ", _AX, _EP) // MOVQ AX, EP + self.Link("_error") // _error: + self.Emit("PXOR", _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) + self.Sjmp("JMP", "_epilogue") // JMP _epilogue + + /* invalid value type, never returns */ + self.Link("_invalid_vtype") + self.call_go(_F_invalid_vtype) // CALL invalid_type + self.Emit("UD2") // UD2 + + /* switch jump table */ + self.Link("_switch_table") // _switch_table: + self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 + self.Sref("_decode_V_NULL", -4) // SREF &_decode_V_NULL, $-4 + self.Sref("_decode_V_TRUE", -8) // SREF &_decode_V_TRUE, $-8 + self.Sref("_decode_V_FALSE", -12) // SREF &_decode_V_FALSE, $-12 + self.Sref("_decode_V_ARRAY", -16) // SREF &_decode_V_ARRAY, $-16 + self.Sref("_decode_V_OBJECT", -20) // SREF &_decode_V_OBJECT, $-20 + self.Sref("_decode_V_STRING", -24) // SREF &_decode_V_STRING, $-24 + self.Sref("_decode_V_DOUBLE", -28) // SREF &_decode_V_DOUBLE, $-28 + self.Sref("_decode_V_INTEGER", -32) // SREF &_decode_V_INTEGER, $-32 + self.Sref("_decode_V_KEY_SEP", -36) // SREF &_decode_V_KEY_SEP, $-36 + self.Sref("_decode_V_ELEM_SEP", -40) // SREF &_decode_V_ELEM_SEP, $-40 + self.Sref("_decode_V_ARRAY_END", -44) // SREF &_decode_V_ARRAY_END, $-44 + self.Sref("_decode_V_OBJECT_END", -48) // SREF &_decode_V_OBJECT_END, $-48 + + /* fast character lookup table */ + self.Link("_decode_tab") // _decode_tab: + self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 + + /* generate rest of the tabs */ + for i := 1; i < 256; i++ { + if to, ok := _R_tab[i]; ok { + self.Sref(to, -int64(i)*4) + } else { + self.Byte(0x00, 0x00, 0x00, 0x00) + } + } } func (self *_ValueDecoder) WritePtrAX(i int, rec obj.Addr, saveDI bool) { - self.Emit("MOVQ", _V_writeBarrier, _R9) - self.Emit("CMPL", jit.Ptr(_R9, 0), jit.Imm(0)) - self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - if saveDI { - self.save(_DI) - } - self.Emit("LEAQ", rec, _DI) - self.call(_F_gcWriteBarrierAX) - if saveDI { - self.load(_DI) - } - self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Emit("MOVQ", _AX, rec) - self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", _V_writeBarrier, _R9) + self.Emit("CMPL", jit.Ptr(_R9, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier"+strconv.Itoa(i)+"_{n}") + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.call(_F_gcWriteBarrierAX) + if saveDI { + self.load(_DI) + } + self.Sjmp("JMP", "_end_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", _AX, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") } func (self *_ValueDecoder) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool) { - if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { - panic("rec contains AX!") - } - self.Emit("MOVQ", _V_writeBarrier, _AX) - self.Emit("CMPL", jit.Ptr(_AX, 0), jit.Imm(0)) - self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Emit("MOVQ", ptr, _AX) - if saveDI { - self.save(_DI) - } - self.Emit("LEAQ", rec, _DI) - self.call(_F_gcWriteBarrierAX) - if saveDI { - self.load(_DI) - } - self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Emit("MOVQ", ptr, rec) - self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { + panic("rec contains AX!") + } + self.Emit("MOVQ", _V_writeBarrier, _AX) + self.Emit("CMPL", jit.Ptr(_AX, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.Emit("MOVQ", ptr, _AX) + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.call(_F_gcWriteBarrierAX) + if saveDI { + self.load(_DI) + } + self.Sjmp("JMP", "_end_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") } /** Generic Decoder **/ var ( - _subr_decode_value = new(_ValueDecoder).build() + _subr_decode_value = new(_ValueDecoder).build() ) //go:nosplit func invalid_vtype(vt types.ValueType) { - throw(fmt.Sprintf("invalid value type: %d", vt)) + throw(fmt.Sprintf("invalid value type: %d", vt)) } diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/pools.go b/vendor/github.com/bytedance/sonic/internal/decoder/pools.go index 06adc7fa1..4a81e0b9f 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/pools.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/pools.go @@ -17,127 +17,127 @@ package decoder import ( - `sync` - `unsafe` + "sync" + "unsafe" - `github.com/bytedance/sonic/internal/caching` - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/caching" + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/internal/rt" ) const ( - _MinSlice = 2 - _MaxStack = 4096 // 4k slots - _MaxStackBytes = _MaxStack * _PtrBytes - _MaxDigitNums = 800 // used in atof fallback algorithm + _MinSlice = 2 + _MaxStack = 4096 // 4k slots + _MaxStackBytes = _MaxStack * _PtrBytes + _MaxDigitNums = 800 // used in atof fallback algorithm ) const ( - _PtrBytes = _PTR_SIZE / 8 - _FsmOffset = (_MaxStack + 1) * _PtrBytes - _DbufOffset = _FsmOffset + int64(unsafe.Sizeof(types.StateMachine{})) + types.MAX_RECURSE * _PtrBytes - _StackSize = unsafe.Sizeof(_Stack{}) + _PtrBytes = _PTR_SIZE / 8 + _FsmOffset = (_MaxStack + 1) * _PtrBytes + _DbufOffset = _FsmOffset + int64(unsafe.Sizeof(types.StateMachine{})) + types.MAX_RECURSE*_PtrBytes + _StackSize = unsafe.Sizeof(_Stack{}) ) var ( - stackPool = sync.Pool{} - valueCache = []unsafe.Pointer(nil) - fieldCache = []*caching.FieldMap(nil) - fieldCacheMux = sync.Mutex{} - programCache = caching.CreateProgramCache() + stackPool = sync.Pool{} + valueCache = []unsafe.Pointer(nil) + fieldCache = []*caching.FieldMap(nil) + fieldCacheMux = sync.Mutex{} + programCache = caching.CreateProgramCache() ) type _Stack struct { - sp uintptr - sb [_MaxStack]unsafe.Pointer - mm types.StateMachine - vp [types.MAX_RECURSE]unsafe.Pointer - dp [_MaxDigitNums]byte + sp uintptr + sb [_MaxStack]unsafe.Pointer + mm types.StateMachine + vp [types.MAX_RECURSE]unsafe.Pointer + dp [_MaxDigitNums]byte } type _Decoder func( - s string, - i int, - vp unsafe.Pointer, - sb *_Stack, - fv uint64, - sv string, // DO NOT pass value to this arguement, since it is only used for local _VAR_sv - vk unsafe.Pointer, // DO NOT pass value to this arguement, since it is only used for local _VAR_vk + s string, + i int, + vp unsafe.Pointer, + sb *_Stack, + fv uint64, + sv string, // DO NOT pass value to this arguement, since it is only used for local _VAR_sv + vk unsafe.Pointer, // DO NOT pass value to this arguement, since it is only used for local _VAR_vk ) (int, error) var _KeepAlive struct { - s string - i int - vp unsafe.Pointer - sb *_Stack - fv uint64 - sv string - vk unsafe.Pointer - - ret int - err error - - frame_decoder [_FP_offs]byte - frame_generic [_VD_offs]byte + s string + i int + vp unsafe.Pointer + sb *_Stack + fv uint64 + sv string + vk unsafe.Pointer + + ret int + err error + + frame_decoder [_FP_offs]byte + frame_generic [_VD_offs]byte } var ( - argPtrs = []bool{true, false, false, true, true, false, true, false, true} - localPtrs = []bool{} + argPtrs = []bool{true, false, false, true, true, false, true, false, true} + localPtrs = []bool{} ) var ( - argPtrs_generic = []bool{true} - localPtrs_generic = []bool{} + argPtrs_generic = []bool{true} + localPtrs_generic = []bool{} ) func newStack() *_Stack { - if ret := stackPool.Get(); ret == nil { - return new(_Stack) - } else { - return ret.(*_Stack) - } + if ret := stackPool.Get(); ret == nil { + return new(_Stack) + } else { + return ret.(*_Stack) + } } func resetStack(p *_Stack) { - memclrNoHeapPointers(unsafe.Pointer(p), _StackSize) + memclrNoHeapPointers(unsafe.Pointer(p), _StackSize) } func freeStack(p *_Stack) { - p.sp = 0 - stackPool.Put(p) + p.sp = 0 + stackPool.Put(p) } func freezeValue(v unsafe.Pointer) uintptr { - valueCache = append(valueCache, v) - return uintptr(v) + valueCache = append(valueCache, v) + return uintptr(v) } func freezeFields(v *caching.FieldMap) int64 { - fieldCacheMux.Lock() - fieldCache = append(fieldCache, v) - fieldCacheMux.Unlock() - return referenceFields(v) + fieldCacheMux.Lock() + fieldCache = append(fieldCache, v) + fieldCacheMux.Unlock() + return referenceFields(v) } func referenceFields(v *caching.FieldMap) int64 { - return int64(uintptr(unsafe.Pointer(v))) + return int64(uintptr(unsafe.Pointer(v))) } func makeDecoder(vt *rt.GoType, _ ...interface{}) (interface{}, error) { - if pp, err := newCompiler().compile(vt.Pack()); err != nil { - return nil, err - } else { - return newAssembler(pp).Load(), nil - } + if pp, err := newCompiler().compile(vt.Pack()); err != nil { + return nil, err + } else { + return newAssembler(pp).Load(), nil + } } func findOrCompile(vt *rt.GoType) (_Decoder, error) { - if val := programCache.Get(vt); val != nil { - return val.(_Decoder), nil - } else if ret, err := programCache.Compute(vt, makeDecoder); err == nil { - return ret.(_Decoder), nil - } else { - return nil, err - } -} \ No newline at end of file + if val := programCache.Get(vt); val != nil { + return val.(_Decoder), nil + } else if ret, err := programCache.Compute(vt, makeDecoder); err == nil { + return ret.(_Decoder), nil + } else { + return nil, err + } +} diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/primitives.go b/vendor/github.com/bytedance/sonic/internal/decoder/primitives.go index d6053e2cb..79517e68c 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/primitives.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/primitives.go @@ -17,30 +17,30 @@ package decoder import ( - `encoding` - `encoding/json` - `unsafe` + "encoding" + "encoding/json" + "unsafe" - `github.com/bytedance/sonic/internal/native` - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/native" + "github.com/bytedance/sonic/internal/rt" ) func decodeTypedPointer(s string, i int, vt *rt.GoType, vp unsafe.Pointer, sb *_Stack, fv uint64) (int, error) { - if fn, err := findOrCompile(vt); err != nil { - return 0, err - } else { - rt.MoreStack(_FP_size + _VD_size + native.MaxFrameSize) - rt.StopProf() - ret, err := fn(s, i, vp, sb, fv, "", nil) - rt.StartProf() - return ret, err - } + if fn, err := findOrCompile(vt); err != nil { + return 0, err + } else { + rt.MoreStack(_FP_size + _VD_size + native.MaxFrameSize) + rt.StopProf() + ret, err := fn(s, i, vp, sb, fv, "", nil) + rt.StartProf() + return ret, err + } } func decodeJsonUnmarshaler(vv interface{}, s string) error { - return vv.(json.Unmarshaler).UnmarshalJSON(rt.Str2Mem(s)) + return vv.(json.Unmarshaler).UnmarshalJSON(rt.Str2Mem(s)) } func decodeTextUnmarshaler(vv interface{}, s string) error { - return vv.(encoding.TextUnmarshaler).UnmarshalText(rt.Str2Mem(s)) + return vv.(encoding.TextUnmarshaler).UnmarshalText(rt.Str2Mem(s)) } diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/stream.go b/vendor/github.com/bytedance/sonic/internal/decoder/stream.go index e1e0f73b9..aa9ee9791 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/stream.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/stream.go @@ -17,201 +17,200 @@ package decoder import ( - `bytes` - `io` - `sync` + "bytes" + "io" + "sync" - `github.com/bytedance/sonic/option` - `github.com/bytedance/sonic/internal/native/types` + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/option" ) var ( - minLeftBufferShift uint = 1 + minLeftBufferShift uint = 1 ) // StreamDecoder is the decoder context object for streaming input. type StreamDecoder struct { - r io.Reader - buf []byte - scanp int - scanned int64 - err error - Decoder + r io.Reader + buf []byte + scanp int + scanned int64 + err error + Decoder } var bufPool = sync.Pool{ - New: func () interface{} { - return make([]byte, 0, option.DefaultDecoderBufferSize) - }, + New: func() interface{} { + return make([]byte, 0, option.DefaultDecoderBufferSize) + }, } // NewStreamDecoder adapts to encoding/json.NewDecoder API. // // NewStreamDecoder returns a new decoder that reads from r. func NewStreamDecoder(r io.Reader) *StreamDecoder { - return &StreamDecoder{r : r} + return &StreamDecoder{r: r} } -// Decode decodes input stream into val with corresponding data. +// Decode decodes input stream into val with corresponding data. // Redundantly bytes may be read and left in its buffer, and can be used at next call. -// Either io error from underlying io.Reader (except io.EOF) +// Either io error from underlying io.Reader (except io.EOF) // or syntax error from data will be recorded and stop subsequently decoding. func (self *StreamDecoder) Decode(val interface{}) (err error) { - if self.err != nil { - return self.err - } - - var buf = self.buf[self.scanp:] - var p = 0 - var recycle bool - if cap(buf) == 0 { - buf = bufPool.Get().([]byte) - recycle = true - } - - var first = true - var repeat = true + if self.err != nil { + return self.err + } + + var buf = self.buf[self.scanp:] + var p = 0 + var recycle bool + if cap(buf) == 0 { + buf = bufPool.Get().([]byte) + recycle = true + } + + var first = true + var repeat = true read_more: - for { - l := len(buf) - realloc(&buf) - n, err := self.r.Read(buf[l:cap(buf)]) - buf = buf[:l+n] - if err != nil { - repeat = false - if err == io.EOF { - if len(buf) == 0 { - return err - } - break - } - self.err = err - return err - } - if n > 0 || first { - break - } - } - first = false - - l := len(buf) - if l > 0 { - self.Decoder.Reset(string(buf)) - err = self.Decoder.Decode(val) - if err != nil { - if repeat && self.repeatable(err) { - goto read_more - } - self.err = err - } - - p = self.Decoder.Pos() - self.scanned += int64(p) - self.scanp = 0 - } - - if l > p { - // remain undecoded bytes, so copy them into self.buf - self.buf = append(self.buf[:0], buf[p:]...) - } else { - self.buf = nil - recycle = true - } - - if recycle { - buf = buf[:0] - bufPool.Put(buf) - } - return err + for { + l := len(buf) + realloc(&buf) + n, err := self.r.Read(buf[l:cap(buf)]) + buf = buf[:l+n] + if err != nil { + repeat = false + if err == io.EOF { + if len(buf) == 0 { + return err + } + break + } + self.err = err + return err + } + if n > 0 || first { + break + } + } + first = false + + l := len(buf) + if l > 0 { + self.Decoder.Reset(string(buf)) + err = self.Decoder.Decode(val) + if err != nil { + if repeat && self.repeatable(err) { + goto read_more + } + self.err = err + } + + p = self.Decoder.Pos() + self.scanned += int64(p) + self.scanp = 0 + } + + if l > p { + // remain undecoded bytes, so copy them into self.buf + self.buf = append(self.buf[:0], buf[p:]...) + } else { + self.buf = nil + recycle = true + } + + if recycle { + buf = buf[:0] + bufPool.Put(buf) + } + return err } func (self StreamDecoder) repeatable(err error) bool { - if ee, ok := err.(SyntaxError); ok && - (ee.Code == types.ERR_EOF || (ee.Code == types.ERR_INVALID_CHAR && self.i >= len(self.s)-1)) { - return true - } - return false + if ee, ok := err.(SyntaxError); ok && + (ee.Code == types.ERR_EOF || (ee.Code == types.ERR_INVALID_CHAR && self.i >= len(self.s)-1)) { + return true + } + return false } -// InputOffset returns the input stream byte offset of the current decoder position. +// InputOffset returns the input stream byte offset of the current decoder position. // The offset gives the location of the end of the most recently returned token and the beginning of the next token. func (self *StreamDecoder) InputOffset() int64 { - return self.scanned + int64(self.scanp) + return self.scanned + int64(self.scanp) } -// Buffered returns a reader of the data remaining in the Decoder's buffer. +// Buffered returns a reader of the data remaining in the Decoder's buffer. // The reader is valid until the next call to Decode. func (self *StreamDecoder) Buffered() io.Reader { - return bytes.NewReader(self.buf[self.scanp:]) + return bytes.NewReader(self.buf[self.scanp:]) } // More reports whether there is another element in the // current array or object being parsed. func (self *StreamDecoder) More() bool { - if self.err != nil { - return false - } - c, err := self.peek() - return err == nil && c != ']' && c != '}' + if self.err != nil { + return false + } + c, err := self.peek() + return err == nil && c != ']' && c != '}' } func (self *StreamDecoder) peek() (byte, error) { - var err error - for { - for i := self.scanp; i < len(self.buf); i++ { - c := self.buf[i] - if isSpace(c) { - continue - } - self.scanp = i - return c, nil - } - // buffer has been scanned, now report any error - if err != nil { - if err != io.EOF { - self.err = err - } - return 0, err - } - err = self.refill() - } + var err error + for { + for i := self.scanp; i < len(self.buf); i++ { + c := self.buf[i] + if isSpace(c) { + continue + } + self.scanp = i + return c, nil + } + // buffer has been scanned, now report any error + if err != nil { + if err != io.EOF { + self.err = err + } + return 0, err + } + err = self.refill() + } } func isSpace(c byte) bool { - return types.SPACE_MASK & (1 << c) != 0 + return types.SPACE_MASK&(1<<c) != 0 } func (self *StreamDecoder) refill() error { - // Make room to read more into the buffer. - // First slide down data already consumed. - if self.scanp > 0 { - self.scanned += int64(self.scanp) - n := copy(self.buf, self.buf[self.scanp:]) - self.buf = self.buf[:n] - self.scanp = 0 - } - - // Grow buffer if not large enough. - realloc(&self.buf) - - // Read. Delay error for next iteration (after scan). - n, err := self.r.Read(self.buf[len(self.buf):cap(self.buf)]) - self.buf = self.buf[0 : len(self.buf)+n] - - return err + // Make room to read more into the buffer. + // First slide down data already consumed. + if self.scanp > 0 { + self.scanned += int64(self.scanp) + n := copy(self.buf, self.buf[self.scanp:]) + self.buf = self.buf[:n] + self.scanp = 0 + } + + // Grow buffer if not large enough. + realloc(&self.buf) + + // Read. Delay error for next iteration (after scan). + n, err := self.r.Read(self.buf[len(self.buf):cap(self.buf)]) + self.buf = self.buf[0 : len(self.buf)+n] + + return err } func realloc(buf *[]byte) { - l := uint(len(*buf)) - c := uint(cap(*buf)) - if c - l <= c >> minLeftBufferShift { - e := l+(l>>minLeftBufferShift) - if e < option.DefaultDecoderBufferSize { - e = option.DefaultDecoderBufferSize - } - tmp := make([]byte, l, e) - copy(tmp, *buf) - *buf = tmp - } + l := uint(len(*buf)) + c := uint(cap(*buf)) + if c-l <= c>>minLeftBufferShift { + e := l + (l >> minLeftBufferShift) + if e < option.DefaultDecoderBufferSize { + e = option.DefaultDecoderBufferSize + } + tmp := make([]byte, l, e) + copy(tmp, *buf) + *buf = tmp + } } - diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/stubs_go115.go b/vendor/github.com/bytedance/sonic/internal/decoder/stubs_go115.go index 1a0917c3c..447051f33 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/stubs_go115.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/stubs_go115.go @@ -1,3 +1,4 @@ +//go:build go1.15 && !go1.20 // +build go1.15,!go1.20 /* @@ -19,12 +20,12 @@ package decoder import ( - `unsafe` - `reflect` + "reflect" + "unsafe" - _ `github.com/chenzhuoyu/base64x` + _ "github.com/chenzhuoyu/base64x" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) //go:linkname _subr__b64decode github.com/chenzhuoyu/base64x._subr__b64decode @@ -34,7 +35,7 @@ var _subr__b64decode uintptr const _max_map_element_size uintptr = 128 func mapfast(vt reflect.Type) bool { - return vt.Elem().Size() <= _max_map_element_size + return vt.Elem().Size() <= _max_map_element_size } //go:nosplit @@ -108,4 +109,4 @@ func memclrHasPointers(ptr unsafe.Pointer, n uintptr) //go:noescape //go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers //goland:noinspection GoUnusedParameter -func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) \ No newline at end of file +func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/stubs_go120.go b/vendor/github.com/bytedance/sonic/internal/decoder/stubs_go120.go index cde6a1972..6c028c59a 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/stubs_go120.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/stubs_go120.go @@ -1,3 +1,4 @@ +//go:build go1.20 // +build go1.20 /* @@ -19,12 +20,12 @@ package decoder import ( - `unsafe` - `reflect` + "reflect" + "unsafe" - _ `github.com/chenzhuoyu/base64x` + _ "github.com/chenzhuoyu/base64x" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) //go:linkname _subr__b64decode github.com/chenzhuoyu/base64x._subr__b64decode @@ -34,7 +35,7 @@ var _subr__b64decode uintptr const _max_map_element_size uintptr = 128 func mapfast(vt reflect.Type) bool { - return vt.Elem().Size() <= _max_map_element_size + return vt.Elem().Size() <= _max_map_element_size } //go:nosplit @@ -108,4 +109,4 @@ func memclrHasPointers(ptr unsafe.Pointer, n uintptr) //go:noescape //go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers //goland:noinspection GoUnusedParameter -func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) \ No newline at end of file +func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/types.go b/vendor/github.com/bytedance/sonic/internal/decoder/types.go index 6fc0e706c..2d586df6c 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/types.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/types.go @@ -17,42 +17,42 @@ package decoder import ( - `encoding` - `encoding/base64` - `encoding/json` - `reflect` - `unsafe` + "encoding" + "encoding/base64" + "encoding/json" + "reflect" + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) var ( - byteType = reflect.TypeOf(byte(0)) - intType = reflect.TypeOf(int(0)) - int8Type = reflect.TypeOf(int8(0)) - int16Type = reflect.TypeOf(int16(0)) - int32Type = reflect.TypeOf(int32(0)) - int64Type = reflect.TypeOf(int64(0)) - uintType = reflect.TypeOf(uint(0)) - uint8Type = reflect.TypeOf(uint8(0)) - uint16Type = reflect.TypeOf(uint16(0)) - uint32Type = reflect.TypeOf(uint32(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float32Type = reflect.TypeOf(float32(0)) - float64Type = reflect.TypeOf(float64(0)) - stringType = reflect.TypeOf("") - bytesType = reflect.TypeOf([]byte(nil)) - jsonNumberType = reflect.TypeOf(json.Number("")) - base64CorruptInputError = reflect.TypeOf(base64.CorruptInputError(0)) + byteType = reflect.TypeOf(byte(0)) + intType = reflect.TypeOf(int(0)) + int8Type = reflect.TypeOf(int8(0)) + int16Type = reflect.TypeOf(int16(0)) + int32Type = reflect.TypeOf(int32(0)) + int64Type = reflect.TypeOf(int64(0)) + uintType = reflect.TypeOf(uint(0)) + uint8Type = reflect.TypeOf(uint8(0)) + uint16Type = reflect.TypeOf(uint16(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float32Type = reflect.TypeOf(float32(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf("") + bytesType = reflect.TypeOf([]byte(nil)) + jsonNumberType = reflect.TypeOf(json.Number("")) + base64CorruptInputError = reflect.TypeOf(base64.CorruptInputError(0)) ) var ( - errorType = reflect.TypeOf((*error)(nil)).Elem() - jsonUnmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() - encodingTextUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + errorType = reflect.TypeOf((*error)(nil)).Elem() + jsonUnmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + encodingTextUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() ) func rtype(t reflect.Type) (*rt.GoItab, *rt.GoType) { - p := (*rt.GoIface)(unsafe.Pointer(&t)) - return p.Itab, (*rt.GoType)(p.Value) + p := (*rt.GoIface)(unsafe.Pointer(&t)) + return p.Itab, (*rt.GoType)(p.Value) } diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/utils.go b/vendor/github.com/bytedance/sonic/internal/decoder/utils.go index 23ee5d501..81b63e161 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/utils.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/utils.go @@ -17,23 +17,23 @@ package decoder import ( - `unsafe` + "unsafe" - `github.com/bytedance/sonic/loader` + "github.com/bytedance/sonic/loader" ) //go:nosplit func pbool(v bool) uintptr { - return freezeValue(unsafe.Pointer(&v)) + return freezeValue(unsafe.Pointer(&v)) } //go:nosplit func ptodec(p loader.Function) _Decoder { - return *(*_Decoder)(unsafe.Pointer(&p)) + return *(*_Decoder)(unsafe.Pointer(&p)) } func assert_eq(v int64, exp int64, msg string) { - if v != exp { - panic(msg) - } + if v != exp { + panic(msg) + } } diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/assembler_amd64_go116.go b/vendor/github.com/bytedance/sonic/internal/encoder/assembler_amd64_go116.go index d056259f2..8179837d6 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/assembler_amd64_go116.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/assembler_amd64_go116.go @@ -1,3 +1,4 @@ +//go:build go1.15 && !go1.17 // +build go1.15,!go1.17 /* @@ -19,19 +20,19 @@ package encoder import ( - `fmt` - `reflect` - `strconv` - `unsafe` - - `github.com/bytedance/sonic/internal/cpu` - `github.com/bytedance/sonic/internal/jit` - `github.com/bytedance/sonic/internal/native/types` - `github.com/twitchyliquid64/golang-asm/obj` - `github.com/twitchyliquid64/golang-asm/obj/x86` - - `github.com/bytedance/sonic/internal/native` - `github.com/bytedance/sonic/internal/rt` + "fmt" + "reflect" + "strconv" + "unsafe" + + "github.com/bytedance/sonic/internal/cpu" + "github.com/bytedance/sonic/internal/jit" + "github.com/bytedance/sonic/internal/native/types" + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/obj/x86" + + "github.com/bytedance/sonic/internal/native" + "github.com/bytedance/sonic/internal/rt" ) /** Register Allocations @@ -66,1134 +67,1137 @@ import ( */ const ( - _S_cond = iota - _S_init + _S_cond = iota + _S_init ) const ( - _FP_args = 48 // 48 bytes for passing arguments to this function - _FP_fargs = 64 // 64 bytes for passing arguments to other Go functions - _FP_saves = 64 // 64 bytes for saving the registers before CALL instructions - _FP_locals = 24 // 24 bytes for local variables + _FP_args = 48 // 48 bytes for passing arguments to this function + _FP_fargs = 64 // 64 bytes for passing arguments to other Go functions + _FP_saves = 64 // 64 bytes for saving the registers before CALL instructions + _FP_locals = 24 // 24 bytes for local variables ) const ( - _FP_offs = _FP_fargs + _FP_saves + _FP_locals - _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer - _FP_base = _FP_size + 8 // 8 bytes for the return address + _FP_offs = _FP_fargs + _FP_saves + _FP_locals + _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer + _FP_base = _FP_size + 8 // 8 bytes for the return address ) const ( - _FM_exp32 = 0x7f800000 - _FM_exp64 = 0x7ff0000000000000 + _FM_exp32 = 0x7f800000 + _FM_exp64 = 0x7ff0000000000000 ) const ( - _IM_null = 0x6c6c756e // 'null' - _IM_true = 0x65757274 // 'true' - _IM_fals = 0x736c6166 // 'fals' ('false' without the 'e') - _IM_open = 0x00225c22 // '"\"∅' - _IM_array = 0x5d5b // '[]' - _IM_object = 0x7d7b // '{}' - _IM_mulv = -0x5555555555555555 + _IM_null = 0x6c6c756e // 'null' + _IM_true = 0x65757274 // 'true' + _IM_fals = 0x736c6166 // 'fals' ('false' without the 'e') + _IM_open = 0x00225c22 // '"\"∅' + _IM_array = 0x5d5b // '[]' + _IM_object = 0x7d7b // '{}' + _IM_mulv = -0x5555555555555555 ) const ( - _LB_more_space = "_more_space" - _LB_more_space_return = "_more_space_return_" + _LB_more_space = "_more_space" + _LB_more_space_return = "_more_space_return_" ) const ( - _LB_error = "_error" - _LB_error_too_deep = "_error_too_deep" - _LB_error_invalid_number = "_error_invalid_number" - _LB_error_nan_or_infinite = "_error_nan_or_infinite" - _LB_panic = "_panic" + _LB_error = "_error" + _LB_error_too_deep = "_error_too_deep" + _LB_error_invalid_number = "_error_invalid_number" + _LB_error_nan_or_infinite = "_error_nan_or_infinite" + _LB_panic = "_panic" ) var ( - _AX = jit.Reg("AX") - _CX = jit.Reg("CX") - _DX = jit.Reg("DX") - _DI = jit.Reg("DI") - _SI = jit.Reg("SI") - _BP = jit.Reg("BP") - _SP = jit.Reg("SP") - _R8 = jit.Reg("R8") + _AX = jit.Reg("AX") + _CX = jit.Reg("CX") + _DX = jit.Reg("DX") + _DI = jit.Reg("DI") + _SI = jit.Reg("SI") + _BP = jit.Reg("BP") + _SP = jit.Reg("SP") + _R8 = jit.Reg("R8") ) var ( - _X0 = jit.Reg("X0") - _Y0 = jit.Reg("Y0") + _X0 = jit.Reg("X0") + _Y0 = jit.Reg("Y0") ) var ( - _ST = jit.Reg("BX") - _RP = jit.Reg("DI") - _RL = jit.Reg("SI") - _RC = jit.Reg("DX") + _ST = jit.Reg("BX") + _RP = jit.Reg("DI") + _RL = jit.Reg("SI") + _RC = jit.Reg("DX") ) var ( - _LR = jit.Reg("R9") - _R10 = jit.Reg("R10") // used for gcWriterBarrier - _ET = jit.Reg("R10") - _EP = jit.Reg("R11") + _LR = jit.Reg("R9") + _R10 = jit.Reg("R10") // used for gcWriterBarrier + _ET = jit.Reg("R10") + _EP = jit.Reg("R11") ) var ( - _SP_p = jit.Reg("R12") - _SP_q = jit.Reg("R13") - _SP_x = jit.Reg("R14") - _SP_f = jit.Reg("R15") + _SP_p = jit.Reg("R12") + _SP_q = jit.Reg("R13") + _SP_x = jit.Reg("R14") + _SP_f = jit.Reg("R15") ) var ( - _ARG_rb = jit.Ptr(_SP, _FP_base) - _ARG_vp = jit.Ptr(_SP, _FP_base + 8) - _ARG_sb = jit.Ptr(_SP, _FP_base + 16) - _ARG_fv = jit.Ptr(_SP, _FP_base + 24) + _ARG_rb = jit.Ptr(_SP, _FP_base) + _ARG_vp = jit.Ptr(_SP, _FP_base+8) + _ARG_sb = jit.Ptr(_SP, _FP_base+16) + _ARG_fv = jit.Ptr(_SP, _FP_base+24) ) var ( - _RET_et = jit.Ptr(_SP, _FP_base + 32) - _RET_ep = jit.Ptr(_SP, _FP_base + 40) + _RET_et = jit.Ptr(_SP, _FP_base+32) + _RET_ep = jit.Ptr(_SP, _FP_base+40) ) var ( - _VAR_sp = jit.Ptr(_SP, _FP_fargs + _FP_saves) - _VAR_dn = jit.Ptr(_SP, _FP_fargs + _FP_saves + 8) - _VAR_vp = jit.Ptr(_SP, _FP_fargs + _FP_saves + 16) + _VAR_sp = jit.Ptr(_SP, _FP_fargs+_FP_saves) + _VAR_dn = jit.Ptr(_SP, _FP_fargs+_FP_saves+8) + _VAR_vp = jit.Ptr(_SP, _FP_fargs+_FP_saves+16) ) var ( - _REG_ffi = []obj.Addr{_RP, _RL, _RC} - _REG_enc = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RL} - _REG_jsr = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _LR} - _REG_all = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RP, _RL, _RC} + _REG_ffi = []obj.Addr{_RP, _RL, _RC} + _REG_enc = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RL} + _REG_jsr = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _LR} + _REG_all = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RP, _RL, _RC} ) type _Assembler struct { - jit.BaseAssembler - p _Program - x int - name string + jit.BaseAssembler + p _Program + x int + name string } func newAssembler(p _Program) *_Assembler { - return new(_Assembler).Init(p) + return new(_Assembler).Init(p) } /** Assembler Interface **/ func (self *_Assembler) Load() _Encoder { - return ptoenc(self.BaseAssembler.Load("encode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs)) + return ptoenc(self.BaseAssembler.Load("encode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs)) } func (self *_Assembler) Init(p _Program) *_Assembler { - self.p = p - self.BaseAssembler.Init(self.compile) - return self + self.p = p + self.BaseAssembler.Init(self.compile) + return self } func (self *_Assembler) compile() { - self.prologue() - self.instrs() - self.epilogue() - self.builtins() + self.prologue() + self.instrs() + self.epilogue() + self.builtins() } /** Assembler Stages **/ -var _OpFuncTab = [256]func(*_Assembler, *_Instr) { - _OP_null : (*_Assembler)._asm_OP_null, - _OP_empty_arr : (*_Assembler)._asm_OP_empty_arr, - _OP_empty_obj : (*_Assembler)._asm_OP_empty_obj, - _OP_bool : (*_Assembler)._asm_OP_bool, - _OP_i8 : (*_Assembler)._asm_OP_i8, - _OP_i16 : (*_Assembler)._asm_OP_i16, - _OP_i32 : (*_Assembler)._asm_OP_i32, - _OP_i64 : (*_Assembler)._asm_OP_i64, - _OP_u8 : (*_Assembler)._asm_OP_u8, - _OP_u16 : (*_Assembler)._asm_OP_u16, - _OP_u32 : (*_Assembler)._asm_OP_u32, - _OP_u64 : (*_Assembler)._asm_OP_u64, - _OP_f32 : (*_Assembler)._asm_OP_f32, - _OP_f64 : (*_Assembler)._asm_OP_f64, - _OP_str : (*_Assembler)._asm_OP_str, - _OP_bin : (*_Assembler)._asm_OP_bin, - _OP_quote : (*_Assembler)._asm_OP_quote, - _OP_number : (*_Assembler)._asm_OP_number, - _OP_eface : (*_Assembler)._asm_OP_eface, - _OP_iface : (*_Assembler)._asm_OP_iface, - _OP_byte : (*_Assembler)._asm_OP_byte, - _OP_text : (*_Assembler)._asm_OP_text, - _OP_deref : (*_Assembler)._asm_OP_deref, - _OP_index : (*_Assembler)._asm_OP_index, - _OP_load : (*_Assembler)._asm_OP_load, - _OP_save : (*_Assembler)._asm_OP_save, - _OP_drop : (*_Assembler)._asm_OP_drop, - _OP_drop_2 : (*_Assembler)._asm_OP_drop_2, - _OP_recurse : (*_Assembler)._asm_OP_recurse, - _OP_is_nil : (*_Assembler)._asm_OP_is_nil, - _OP_is_nil_p1 : (*_Assembler)._asm_OP_is_nil_p1, - _OP_is_zero_1 : (*_Assembler)._asm_OP_is_zero_1, - _OP_is_zero_2 : (*_Assembler)._asm_OP_is_zero_2, - _OP_is_zero_4 : (*_Assembler)._asm_OP_is_zero_4, - _OP_is_zero_8 : (*_Assembler)._asm_OP_is_zero_8, - _OP_is_zero_map : (*_Assembler)._asm_OP_is_zero_map, - _OP_goto : (*_Assembler)._asm_OP_goto, - _OP_map_iter : (*_Assembler)._asm_OP_map_iter, - _OP_map_stop : (*_Assembler)._asm_OP_map_stop, - _OP_map_check_key : (*_Assembler)._asm_OP_map_check_key, - _OP_map_write_key : (*_Assembler)._asm_OP_map_write_key, - _OP_map_value_next : (*_Assembler)._asm_OP_map_value_next, - _OP_slice_len : (*_Assembler)._asm_OP_slice_len, - _OP_slice_next : (*_Assembler)._asm_OP_slice_next, - _OP_marshal : (*_Assembler)._asm_OP_marshal, - _OP_marshal_p : (*_Assembler)._asm_OP_marshal_p, - _OP_marshal_text : (*_Assembler)._asm_OP_marshal_text, - _OP_marshal_text_p : (*_Assembler)._asm_OP_marshal_text_p, - _OP_cond_set : (*_Assembler)._asm_OP_cond_set, - _OP_cond_testc : (*_Assembler)._asm_OP_cond_testc, +var _OpFuncTab = [256]func(*_Assembler, *_Instr){ + _OP_null: (*_Assembler)._asm_OP_null, + _OP_empty_arr: (*_Assembler)._asm_OP_empty_arr, + _OP_empty_obj: (*_Assembler)._asm_OP_empty_obj, + _OP_bool: (*_Assembler)._asm_OP_bool, + _OP_i8: (*_Assembler)._asm_OP_i8, + _OP_i16: (*_Assembler)._asm_OP_i16, + _OP_i32: (*_Assembler)._asm_OP_i32, + _OP_i64: (*_Assembler)._asm_OP_i64, + _OP_u8: (*_Assembler)._asm_OP_u8, + _OP_u16: (*_Assembler)._asm_OP_u16, + _OP_u32: (*_Assembler)._asm_OP_u32, + _OP_u64: (*_Assembler)._asm_OP_u64, + _OP_f32: (*_Assembler)._asm_OP_f32, + _OP_f64: (*_Assembler)._asm_OP_f64, + _OP_str: (*_Assembler)._asm_OP_str, + _OP_bin: (*_Assembler)._asm_OP_bin, + _OP_quote: (*_Assembler)._asm_OP_quote, + _OP_number: (*_Assembler)._asm_OP_number, + _OP_eface: (*_Assembler)._asm_OP_eface, + _OP_iface: (*_Assembler)._asm_OP_iface, + _OP_byte: (*_Assembler)._asm_OP_byte, + _OP_text: (*_Assembler)._asm_OP_text, + _OP_deref: (*_Assembler)._asm_OP_deref, + _OP_index: (*_Assembler)._asm_OP_index, + _OP_load: (*_Assembler)._asm_OP_load, + _OP_save: (*_Assembler)._asm_OP_save, + _OP_drop: (*_Assembler)._asm_OP_drop, + _OP_drop_2: (*_Assembler)._asm_OP_drop_2, + _OP_recurse: (*_Assembler)._asm_OP_recurse, + _OP_is_nil: (*_Assembler)._asm_OP_is_nil, + _OP_is_nil_p1: (*_Assembler)._asm_OP_is_nil_p1, + _OP_is_zero_1: (*_Assembler)._asm_OP_is_zero_1, + _OP_is_zero_2: (*_Assembler)._asm_OP_is_zero_2, + _OP_is_zero_4: (*_Assembler)._asm_OP_is_zero_4, + _OP_is_zero_8: (*_Assembler)._asm_OP_is_zero_8, + _OP_is_zero_map: (*_Assembler)._asm_OP_is_zero_map, + _OP_goto: (*_Assembler)._asm_OP_goto, + _OP_map_iter: (*_Assembler)._asm_OP_map_iter, + _OP_map_stop: (*_Assembler)._asm_OP_map_stop, + _OP_map_check_key: (*_Assembler)._asm_OP_map_check_key, + _OP_map_write_key: (*_Assembler)._asm_OP_map_write_key, + _OP_map_value_next: (*_Assembler)._asm_OP_map_value_next, + _OP_slice_len: (*_Assembler)._asm_OP_slice_len, + _OP_slice_next: (*_Assembler)._asm_OP_slice_next, + _OP_marshal: (*_Assembler)._asm_OP_marshal, + _OP_marshal_p: (*_Assembler)._asm_OP_marshal_p, + _OP_marshal_text: (*_Assembler)._asm_OP_marshal_text, + _OP_marshal_text_p: (*_Assembler)._asm_OP_marshal_text_p, + _OP_cond_set: (*_Assembler)._asm_OP_cond_set, + _OP_cond_testc: (*_Assembler)._asm_OP_cond_testc, } func (self *_Assembler) instr(v *_Instr) { - if fn := _OpFuncTab[v.op()]; fn != nil { - fn(self, v) - } else { - panic(fmt.Sprintf("invalid opcode: %d", v.op())) - } + if fn := _OpFuncTab[v.op()]; fn != nil { + fn(self, v) + } else { + panic(fmt.Sprintf("invalid opcode: %d", v.op())) + } } func (self *_Assembler) instrs() { - for i, v := range self.p { - self.Mark(i) - self.instr(&v) - self.debug_instr(i, &v) - } + for i, v := range self.p { + self.Mark(i) + self.instr(&v) + self.debug_instr(i, &v) + } } func (self *_Assembler) builtins() { - self.more_space() - self.error_too_deep() - self.error_invalid_number() - self.error_nan_or_infinite() - self.go_panic() + self.more_space() + self.error_too_deep() + self.error_invalid_number() + self.error_nan_or_infinite() + self.go_panic() } func (self *_Assembler) epilogue() { - self.Mark(len(self.p)) - self.Emit("XORL", _ET, _ET) - self.Emit("XORL", _EP, _EP) - self.Link(_LB_error) - self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX - self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX) - self.Emit("MOVQ", _ET, _RET_et) // MOVQ ET, et<>+24(FP) - self.Emit("MOVQ", _EP, _RET_ep) // MOVQ EP, ep<>+32(FP) - self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP - self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP - self.Emit("RET") // RET + self.Mark(len(self.p)) + self.Emit("XORL", _ET, _ET) + self.Emit("XORL", _EP, _EP) + self.Link(_LB_error) + self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX + self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX) + self.Emit("MOVQ", _ET, _RET_et) // MOVQ ET, et<>+24(FP) + self.Emit("MOVQ", _EP, _RET_ep) // MOVQ EP, ep<>+32(FP) + self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP + self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP + self.Emit("RET") // RET } func (self *_Assembler) prologue() { - self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP - self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP) - self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP - self.load_buffer() // LOAD {buf} - self.Emit("MOVQ", _ARG_vp, _SP_p) // MOVQ vp<>+8(FP), SP.p - self.Emit("MOVQ", _ARG_sb, _ST) // MOVQ sb<>+16(FP), ST - self.Emit("XORL", _SP_x, _SP_x) // XORL SP.x, SP.x - self.Emit("XORL", _SP_f, _SP_f) // XORL SP.f, SP.f - self.Emit("XORL", _SP_q, _SP_q) // XORL SP.q, SP.q + self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP + self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP) + self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP + self.load_buffer() // LOAD {buf} + self.Emit("MOVQ", _ARG_vp, _SP_p) // MOVQ vp<>+8(FP), SP.p + self.Emit("MOVQ", _ARG_sb, _ST) // MOVQ sb<>+16(FP), ST + self.Emit("XORL", _SP_x, _SP_x) // XORL SP.x, SP.x + self.Emit("XORL", _SP_f, _SP_f) // XORL SP.f, SP.f + self.Emit("XORL", _SP_q, _SP_q) // XORL SP.q, SP.q } /** Assembler Inline Functions **/ func (self *_Assembler) xsave(reg ...obj.Addr) { - for i, v := range reg { - if i > _FP_saves / 8 - 1 { - panic("too many registers to save") - } else { - self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + int64(i) * 8)) - } - } + for i, v := range reg { + if i > _FP_saves/8-1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs+int64(i)*8)) + } + } } func (self *_Assembler) xload(reg ...obj.Addr) { - for i, v := range reg { - if i > _FP_saves / 8 - 1 { - panic("too many registers to load") - } else { - self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + int64(i) * 8), v) - } - } + for i, v := range reg { + if i > _FP_saves/8-1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs+int64(i)*8), v) + } + } } func (self *_Assembler) rbuf_di() { - if _RP.Reg != x86.REG_DI { - panic("register allocation messed up: RP != DI") - } else { - self.Emit("ADDQ", _RL, _RP) - } + if _RP.Reg != x86.REG_DI { + panic("register allocation messed up: RP != DI") + } else { + self.Emit("ADDQ", _RL, _RP) + } } func (self *_Assembler) store_int(nd int, fn obj.Addr, ins string) { - self.check_size(nd) - self.save_c() // SAVE $C_regs - self.rbuf_di() // MOVQ RP, DI - self.Emit(ins, jit.Ptr(_SP_p, 0), _SI) // $ins (SP.p), SI - self.call_c(fn) // CALL_C $fn - self.Emit("ADDQ", _AX, _RL) // ADDQ AX, RL + self.check_size(nd) + self.save_c() // SAVE $C_regs + self.rbuf_di() // MOVQ RP, DI + self.Emit(ins, jit.Ptr(_SP_p, 0), _SI) // $ins (SP.p), SI + self.call_c(fn) // CALL_C $fn + self.Emit("ADDQ", _AX, _RL) // ADDQ AX, RL } func (self *_Assembler) store_str(s string) { - i := 0 - m := rt.Str2Mem(s) - - /* 8-byte stores */ - for i <= len(m) - 8 { - self.Emit("MOVQ", jit.Imm(rt.Get64(m[i:])), _AX) // MOVQ $s[i:], AX - self.Emit("MOVQ", _AX, jit.Sib(_RP, _RL, 1, int64(i))) // MOVQ AX, i(RP)(RL) - i += 8 - } - - /* 4-byte stores */ - if i <= len(m) - 4 { - self.Emit("MOVL", jit.Imm(int64(rt.Get32(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVL $s[i:], i(RP)(RL) - i += 4 - } - - /* 2-byte stores */ - if i <= len(m) - 2 { - self.Emit("MOVW", jit.Imm(int64(rt.Get16(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVW $s[i:], i(RP)(RL) - i += 2 - } - - /* last byte */ - if i < len(m) { - self.Emit("MOVB", jit.Imm(int64(m[i])), jit.Sib(_RP, _RL, 1, int64(i))) // MOVB $s[i:], i(RP)(RL) - } + i := 0 + m := rt.Str2Mem(s) + + /* 8-byte stores */ + for i <= len(m)-8 { + self.Emit("MOVQ", jit.Imm(rt.Get64(m[i:])), _AX) // MOVQ $s[i:], AX + self.Emit("MOVQ", _AX, jit.Sib(_RP, _RL, 1, int64(i))) // MOVQ AX, i(RP)(RL) + i += 8 + } + + /* 4-byte stores */ + if i <= len(m)-4 { + self.Emit("MOVL", jit.Imm(int64(rt.Get32(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVL $s[i:], i(RP)(RL) + i += 4 + } + + /* 2-byte stores */ + if i <= len(m)-2 { + self.Emit("MOVW", jit.Imm(int64(rt.Get16(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVW $s[i:], i(RP)(RL) + i += 2 + } + + /* last byte */ + if i < len(m) { + self.Emit("MOVB", jit.Imm(int64(m[i])), jit.Sib(_RP, _RL, 1, int64(i))) // MOVB $s[i:], i(RP)(RL) + } } func (self *_Assembler) check_size(n int) { - self.check_size_rl(jit.Ptr(_RL, int64(n))) + self.check_size_rl(jit.Ptr(_RL, int64(n))) } func (self *_Assembler) check_size_r(r obj.Addr, d int) { - self.check_size_rl(jit.Sib(_RL, r, 1, int64(d))) + self.check_size_rl(jit.Sib(_RL, r, 1, int64(d))) } func (self *_Assembler) check_size_rl(v obj.Addr) { - idx := self.x - key := _LB_more_space_return + strconv.Itoa(idx) + idx := self.x + key := _LB_more_space_return + strconv.Itoa(idx) - /* the following code relies on LR == R9 to work */ - if _LR.Reg != x86.REG_R9 { - panic("register allocation messed up: LR != R9") - } + /* the following code relies on LR == R9 to work */ + if _LR.Reg != x86.REG_R9 { + panic("register allocation messed up: LR != R9") + } - /* check for buffer capacity */ - self.x++ - self.Emit("LEAQ", v, _AX) // LEAQ $v, AX - self.Emit("CMPQ", _AX, _RC) // CMPQ AX, RC - self.Sjmp("JBE" , key) // JBE _more_space_return_{n} - self.slice_grow_ax(key) // GROW $key - self.Link(key) // _more_space_return_{n}: + /* check for buffer capacity */ + self.x++ + self.Emit("LEAQ", v, _AX) // LEAQ $v, AX + self.Emit("CMPQ", _AX, _RC) // CMPQ AX, RC + self.Sjmp("JBE", key) // JBE _more_space_return_{n} + self.slice_grow_ax(key) // GROW $key + self.Link(key) // _more_space_return_{n}: } func (self *_Assembler) slice_grow_ax(ret string) { - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ ?(PC), R9 - self.Sref(ret, 4) // .... &ret - self.Sjmp("JMP" , _LB_more_space) // JMP _more_space + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ ?(PC), R9 + self.Sref(ret, 4) // .... &ret + self.Sjmp("JMP", _LB_more_space) // JMP _more_space } /** State Stack Helpers **/ const ( - _StateSize = int64(unsafe.Sizeof(_State{})) - _StackLimit = _MaxStack * _StateSize + _StateSize = int64(unsafe.Sizeof(_State{})) + _StackLimit = _MaxStack * _StateSize ) func (self *_Assembler) save_state() { - self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX - self.Emit("LEAQ", jit.Ptr(_CX, _StateSize), _R8) // LEAQ _StateSize(CX), R8 - self.Emit("CMPQ", _R8, jit.Imm(_StackLimit)) // CMPQ R8, $_StackLimit - self.Sjmp("JAE" , _LB_error_too_deep) // JA _error_too_deep - self.Emit("MOVQ", _SP_x, jit.Sib(_ST, _CX, 1, 8)) // MOVQ SP.x, 8(ST)(CX) - self.Emit("MOVQ", _SP_f, jit.Sib(_ST, _CX, 1, 16)) // MOVQ SP.f, 16(ST)(CX) - self.WriteRecNotAX(0, _SP_p, jit.Sib(_ST, _CX, 1, 24)) // MOVQ SP.p, 24(ST)(CX) - self.WriteRecNotAX(1, _SP_q, jit.Sib(_ST, _CX, 1, 32)) // MOVQ SP.q, 32(ST)(CX) - self.Emit("MOVQ", _R8, jit.Ptr(_ST, 0)) // MOVQ R8, (ST) + self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX + self.Emit("LEAQ", jit.Ptr(_CX, _StateSize), _R8) // LEAQ _StateSize(CX), R8 + self.Emit("CMPQ", _R8, jit.Imm(_StackLimit)) // CMPQ R8, $_StackLimit + self.Sjmp("JAE", _LB_error_too_deep) // JA _error_too_deep + self.Emit("MOVQ", _SP_x, jit.Sib(_ST, _CX, 1, 8)) // MOVQ SP.x, 8(ST)(CX) + self.Emit("MOVQ", _SP_f, jit.Sib(_ST, _CX, 1, 16)) // MOVQ SP.f, 16(ST)(CX) + self.WriteRecNotAX(0, _SP_p, jit.Sib(_ST, _CX, 1, 24)) // MOVQ SP.p, 24(ST)(CX) + self.WriteRecNotAX(1, _SP_q, jit.Sib(_ST, _CX, 1, 32)) // MOVQ SP.q, 32(ST)(CX) + self.Emit("MOVQ", _R8, jit.Ptr(_ST, 0)) // MOVQ R8, (ST) } func (self *_Assembler) drop_state(decr int64) { - self.Emit("MOVQ" , jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX - self.Emit("SUBQ" , jit.Imm(decr), _AX) // SUBQ $decr, AX - self.Emit("MOVQ" , _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) - self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 8), _SP_x) // MOVQ 8(ST)(AX), SP.x - self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 16), _SP_f) // MOVQ 16(ST)(AX), SP.f - self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 24), _SP_p) // MOVQ 24(ST)(AX), SP.p - self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 32), _SP_q) // MOVQ 32(ST)(AX), SP.q - self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 - self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX) - self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 24)) // MOVOU X0, 24(ST)(AX) + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("SUBQ", jit.Imm(decr), _AX) // SUBQ $decr, AX + self.Emit("MOVQ", _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 8), _SP_x) // MOVQ 8(ST)(AX), SP.x + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 16), _SP_f) // MOVQ 16(ST)(AX), SP.f + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 24), _SP_p) // MOVQ 24(ST)(AX), SP.p + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 32), _SP_q) // MOVQ 32(ST)(AX), SP.q + self.Emit("PXOR", _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX) + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 24)) // MOVOU X0, 24(ST)(AX) } /** Buffer Helpers **/ func (self *_Assembler) add_char(ch byte) { - self.Emit("MOVB", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVB $ch, (RP)(RL) - self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL + self.Emit("MOVB", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVB $ch, (RP)(RL) + self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL } func (self *_Assembler) add_long(ch uint32, n int64) { - self.Emit("MOVL", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVL $ch, (RP)(RL) - self.Emit("ADDQ", jit.Imm(n), _RL) // ADDQ $n, RL + self.Emit("MOVL", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVL $ch, (RP)(RL) + self.Emit("ADDQ", jit.Imm(n), _RL) // ADDQ $n, RL } func (self *_Assembler) add_text(ss string) { - self.store_str(ss) // TEXT $ss - self.Emit("ADDQ", jit.Imm(int64(len(ss))), _RL) // ADDQ ${len(ss)}, RL + self.store_str(ss) // TEXT $ss + self.Emit("ADDQ", jit.Imm(int64(len(ss))), _RL) // ADDQ ${len(ss)}, RL } func (self *_Assembler) prep_buffer() { - self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX - self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX) - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX + self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) } func (self *_Assembler) prep_buffer_c() { - self.Emit("MOVQ", _ARG_rb, _DI) // MOVQ rb<>+0(FP), DI - self.Emit("MOVQ", _RL, jit.Ptr(_DI, 8)) // MOVQ RL, 8(DI) + self.Emit("MOVQ", _ARG_rb, _DI) // MOVQ rb<>+0(FP), DI + self.Emit("MOVQ", _RL, jit.Ptr(_DI, 8)) // MOVQ RL, 8(DI) } func (self *_Assembler) save_buffer() { - self.Emit("MOVQ", _ARG_rb, _CX) // MOVQ rb<>+0(FP), CX - self.Emit("MOVQ", _RP, jit.Ptr(_CX, 0)) // MOVQ RP, (CX) - self.Emit("MOVQ", _RL, jit.Ptr(_CX, 8)) // MOVQ RL, 8(CX) - self.Emit("MOVQ", _RC, jit.Ptr(_CX, 16)) // MOVQ RC, 16(CX) + self.Emit("MOVQ", _ARG_rb, _CX) // MOVQ rb<>+0(FP), CX + self.Emit("MOVQ", _RP, jit.Ptr(_CX, 0)) // MOVQ RP, (CX) + self.Emit("MOVQ", _RL, jit.Ptr(_CX, 8)) // MOVQ RL, 8(CX) + self.Emit("MOVQ", _RC, jit.Ptr(_CX, 16)) // MOVQ RC, 16(CX) } func (self *_Assembler) load_buffer() { - self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX - self.Emit("MOVQ", jit.Ptr(_AX, 0), _RP) // MOVQ (AX), RP - self.Emit("MOVQ", jit.Ptr(_AX, 8), _RL) // MOVQ 8(AX), RL - self.Emit("MOVQ", jit.Ptr(_AX, 16), _RC) // MOVQ 16(AX), RC + self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX + self.Emit("MOVQ", jit.Ptr(_AX, 0), _RP) // MOVQ (AX), RP + self.Emit("MOVQ", jit.Ptr(_AX, 8), _RL) // MOVQ 8(AX), RL + self.Emit("MOVQ", jit.Ptr(_AX, 16), _RC) // MOVQ 16(AX), RC } /** Function Interface Helpers **/ func (self *_Assembler) call(pc obj.Addr) { - self.Emit("MOVQ", pc, _AX) // MOVQ $pc, AX - self.Rjmp("CALL", _AX) // CALL AX + self.Emit("MOVQ", pc, _AX) // MOVQ $pc, AX + self.Rjmp("CALL", _AX) // CALL AX } func (self *_Assembler) save_c() { - self.xsave(_REG_ffi...) // SAVE $REG_ffi + self.xsave(_REG_ffi...) // SAVE $REG_ffi } func (self *_Assembler) call_c(pc obj.Addr) { - self.call(pc) // CALL $pc - self.xload(_REG_ffi...) // LOAD $REG_ffi + self.call(pc) // CALL $pc + self.xload(_REG_ffi...) // LOAD $REG_ffi } func (self *_Assembler) call_go(pc obj.Addr) { - self.xsave(_REG_all...) // SAVE $REG_all - self.call(pc) // CALL $pc - self.xload(_REG_all...) // LOAD $REG_all + self.xsave(_REG_all...) // SAVE $REG_all + self.call(pc) // CALL $pc + self.xload(_REG_all...) // LOAD $REG_all } func (self *_Assembler) call_encoder(pc obj.Addr) { - self.xsave(_REG_enc...) // SAVE $REG_enc - self.call(pc) // CALL $pc - self.xload(_REG_enc...) // LOAD $REG_enc - self.load_buffer() // LOAD {buf} + self.xsave(_REG_enc...) // SAVE $REG_enc + self.call(pc) // CALL $pc + self.xload(_REG_enc...) // LOAD $REG_enc + self.load_buffer() // LOAD {buf} } func (self *_Assembler) call_marshaler(fn obj.Addr, it *rt.GoType, vt reflect.Type) { - switch vt.Kind() { - case reflect.Interface : self.call_marshaler_i(fn, it) - case reflect.Ptr, reflect.Map: self.call_marshaler_v(fn, it, vt, true) - // struct/array of 1 direct iface type can be direct - default : self.call_marshaler_v(fn, it, vt, !rt.UnpackType(vt).Indirect()) - } + switch vt.Kind() { + case reflect.Interface: + self.call_marshaler_i(fn, it) + case reflect.Ptr, reflect.Map: + self.call_marshaler_v(fn, it, vt, true) + // struct/array of 1 direct iface type can be direct + default: + self.call_marshaler_v(fn, it, vt, !rt.UnpackType(vt).Indirect()) + } } func (self *_Assembler) call_marshaler_i(fn obj.Addr, it *rt.GoType) { - self.Emit("MOVQ" , jit.Gtype(it), _AX) // MOVQ $it, AX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX - self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _CX) // MOVQ 8(SP.p), CX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JZ" , "_null_{n}") // JZ _null_{n} - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) - self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 16)) // MOVQ CX, 16(SP) - self.call_go(_F_assertI2I) // CALL_GO assertI2I - self.prep_buffer() // MOVE {buf}, (SP) - self.Emit("MOVOU", jit.Ptr(_SP, 24), _X0) // MOVOU 24(SP), X0 - self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) - self.Emit("MOVQ", _ARG_fv, _CX) // MOVQ ARG.fv, CX - self.Emit("MOVQ", _CX, jit.Ptr(_SP, 24)) // MOVQ CX, 24(SP) - self.call_encoder(fn) // CALL $fn - self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET - self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error - self.Sjmp("JMP" , "_done_{n}") // JMP _done_{n} - self.Link("_null_{n}") // _null_{n}: - self.check_size(4) // SIZE $4 - self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1) - self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL - self.Link("_done_{n}") // _done_{n}: + self.Emit("MOVQ", jit.Gtype(it), _AX) // MOVQ $it, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _CX) // MOVQ 8(SP.p), CX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ", "_null_{n}") // JZ _null_{n} + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 16)) // MOVQ CX, 16(SP) + self.call_go(_F_assertI2I) // CALL_GO assertI2I + self.prep_buffer() // MOVE {buf}, (SP) + self.Emit("MOVOU", jit.Ptr(_SP, 24), _X0) // MOVOU 24(SP), X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) + self.Emit("MOVQ", _ARG_fv, _CX) // MOVQ ARG.fv, CX + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 24)) // MOVQ CX, 24(SP) + self.call_encoder(fn) // CALL $fn + self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error + self.Sjmp("JMP", "_done_{n}") // JMP _done_{n} + self.Link("_null_{n}") // _null_{n}: + self.check_size(4) // SIZE $4 + self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL + self.Link("_done_{n}") // _done_{n}: } func (self *_Assembler) call_marshaler_v(fn obj.Addr, it *rt.GoType, vt reflect.Type, deref bool) { - self.prep_buffer() // MOVE {buf}, (SP) - self.Emit("MOVQ", jit.Itab(it, vt), _AX) // MOVQ $(itab(it, vt)), AX - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) - - /* dereference the pointer if needed */ - if !deref { - self.Emit("MOVQ", _SP_p, jit.Ptr(_SP, 16)) // MOVQ SP.p, 16(SP) - } else { - self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) - } - - /* call the encoder, and perform error checks */ - self.Emit("MOVQ", _ARG_fv, _CX) // MOVQ ARG.fv, CX - self.Emit("MOVQ", _CX, jit.Ptr(_SP, 24)) // MOVQ CX, 24(SP) - self.call_encoder(fn) // CALL $fn - self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET - self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.prep_buffer() // MOVE {buf}, (SP) + self.Emit("MOVQ", jit.Itab(it, vt), _AX) // MOVQ $(itab(it, vt)), AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + + /* dereference the pointer if needed */ + if !deref { + self.Emit("MOVQ", _SP_p, jit.Ptr(_SP, 16)) // MOVQ SP.p, 16(SP) + } else { + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + } + + /* call the encoder, and perform error checks */ + self.Emit("MOVQ", _ARG_fv, _CX) // MOVQ ARG.fv, CX + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 24)) // MOVQ CX, 24(SP) + self.call_encoder(fn) // CALL $fn + self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error } /** Builtin: _more_space **/ var ( - _T_byte = jit.Type(byteType) - _F_growslice = jit.Func(growslice) + _T_byte = jit.Type(byteType) + _F_growslice = jit.Func(growslice) ) func (self *_Assembler) more_space() { - self.Link(_LB_more_space) - self.Emit("MOVQ", _T_byte, jit.Ptr(_SP, 0)) // MOVQ $_T_byte, (SP) - self.Emit("MOVQ", _RP, jit.Ptr(_SP, 8)) // MOVQ RP, 8(SP) - self.Emit("MOVQ", _RL, jit.Ptr(_SP, 16)) // MOVQ RL, 16(SP) - self.Emit("MOVQ", _RC, jit.Ptr(_SP, 24)) // MOVQ RC, 24(SP) - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) - self.xsave(_REG_jsr...) // SAVE $REG_jsr - self.call(_F_growslice) // CALL $pc - self.xload(_REG_jsr...) // LOAD $REG_jsr - self.Emit("MOVQ", jit.Ptr(_SP, 40), _RP) // MOVQ 40(SP), RP - self.Emit("MOVQ", jit.Ptr(_SP, 48), _RL) // MOVQ 48(SP), RL - self.Emit("MOVQ", jit.Ptr(_SP, 56), _RC) // MOVQ 56(SP), RC - self.save_buffer() // SAVE {buf} - self.Rjmp("JMP" , _LR) // JMP LR + self.Link(_LB_more_space) + self.Emit("MOVQ", _T_byte, jit.Ptr(_SP, 0)) // MOVQ $_T_byte, (SP) + self.Emit("MOVQ", _RP, jit.Ptr(_SP, 8)) // MOVQ RP, 8(SP) + self.Emit("MOVQ", _RL, jit.Ptr(_SP, 16)) // MOVQ RL, 16(SP) + self.Emit("MOVQ", _RC, jit.Ptr(_SP, 24)) // MOVQ RC, 24(SP) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) + self.xsave(_REG_jsr...) // SAVE $REG_jsr + self.call(_F_growslice) // CALL $pc + self.xload(_REG_jsr...) // LOAD $REG_jsr + self.Emit("MOVQ", jit.Ptr(_SP, 40), _RP) // MOVQ 40(SP), RP + self.Emit("MOVQ", jit.Ptr(_SP, 48), _RL) // MOVQ 48(SP), RL + self.Emit("MOVQ", jit.Ptr(_SP, 56), _RC) // MOVQ 56(SP), RC + self.save_buffer() // SAVE {buf} + self.Rjmp("JMP", _LR) // JMP LR } /** Builtin Errors **/ var ( - _V_ERR_too_deep = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_too_deep)))) - _V_ERR_nan_or_infinite = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_nan_or_infinite)))) - _I_json_UnsupportedValueError = jit.Itab(rt.UnpackType(errorType), jsonUnsupportedValueType) + _V_ERR_too_deep = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_too_deep)))) + _V_ERR_nan_or_infinite = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_nan_or_infinite)))) + _I_json_UnsupportedValueError = jit.Itab(rt.UnpackType(errorType), jsonUnsupportedValueType) ) func (self *_Assembler) error_too_deep() { - self.Link(_LB_error_too_deep) - self.Emit("MOVQ", _V_ERR_too_deep, _EP) // MOVQ $_V_ERR_too_deep, EP - self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_error_too_deep) + self.Emit("MOVQ", _V_ERR_too_deep, _EP) // MOVQ $_V_ERR_too_deep, EP + self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) error_invalid_number() { - self.Link(_LB_error_invalid_number) - self.call_go(_F_error_number) // CALL_GO error_number - self.Emit("MOVQ", jit.Ptr(_SP, 16), _ET) // MOVQ 16(SP), ET - self.Emit("MOVQ", jit.Ptr(_SP, 24), _EP) // MOVQ 24(SP), EP - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_error_invalid_number) + self.call_go(_F_error_number) // CALL_GO error_number + self.Emit("MOVQ", jit.Ptr(_SP, 16), _ET) // MOVQ 16(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 24), _EP) // MOVQ 24(SP), EP + self.Sjmp("JMP", _LB_error) // JMP _error } -func (self *_Assembler) error_nan_or_infinite() { - self.Link(_LB_error_nan_or_infinite) - self.Emit("MOVQ", _V_ERR_nan_or_infinite, _EP) // MOVQ $_V_ERR_nan_or_infinite, EP - self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET - self.Sjmp("JMP" , _LB_error) // JMP _error +func (self *_Assembler) error_nan_or_infinite() { + self.Link(_LB_error_nan_or_infinite) + self.Emit("MOVQ", _V_ERR_nan_or_infinite, _EP) // MOVQ $_V_ERR_nan_or_infinite, EP + self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET + self.Sjmp("JMP", _LB_error) // JMP _error } /** String Encoding Routine **/ var ( - _F_quote = jit.Imm(int64(native.S_quote)) - _F_panic = jit.Func(goPanic) + _F_quote = jit.Imm(int64(native.S_quote)) + _F_panic = jit.Func(goPanic) ) func (self *_Assembler) go_panic() { - self.Link(_LB_panic) - self.Emit("MOVQ", _SP_p, jit.Ptr(_SP, 8)) - self.call_go(_F_panic) + self.Link(_LB_panic) + self.Emit("MOVQ", _SP_p, jit.Ptr(_SP, 8)) + self.call_go(_F_panic) } func (self *_Assembler) encode_string(doubleQuote bool) { - self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JZ" , "_str_empty_{n}") // JZ _str_empty_{n} - self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) - self.Sjmp("JNE" , "_str_next_{n}") - self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), jit.Ptr(_SP, 0)) - self.Sjmp("JMP", _LB_panic) - self.Link("_str_next_{n}") - - /* openning quote, check for double quote */ - if !doubleQuote { - self.check_size_r(_AX, 2) // SIZE $2 - self.add_char('"') // CHAR $'"' - } else { - self.check_size_r(_AX, 6) // SIZE $6 - self.add_long(_IM_open, 3) // TEXT $`"\"` - } - - /* quoting loop */ - self.Emit("XORL", _AX, _AX) // XORL AX, AX - self.Emit("MOVQ", _AX, _VAR_sp) // MOVQ AX, sp - self.Link("_str_loop_{n}") // _str_loop_{n}: - self.save_c() // SAVE $REG_ffi - - /* load the output buffer first, and then input buffer, - * because the parameter registers collide with RP / RL / RC */ - self.Emit("MOVQ", _RC, _CX) // MOVQ RC, CX - self.Emit("SUBQ", _RL, _CX) // SUBQ RL, CX - self.Emit("MOVQ", _CX, _VAR_dn) // MOVQ CX, dn - self.Emit("LEAQ", jit.Sib(_RP, _RL, 1, 0), _DX) // LEAQ (RP)(RL), DX - self.Emit("LEAQ", _VAR_dn, _CX) // LEAQ dn, CX - self.Emit("MOVQ", _VAR_sp, _AX) // MOVQ sp, AX - self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _DI) // MOVQ (SP.p), DI - self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _SI) // MOVQ 8(SP.p), SI - self.Emit("ADDQ", _AX, _DI) // ADDQ AX, DI - self.Emit("SUBQ", _AX, _SI) // SUBQ AX, SI - - /* set the flags based on `doubleQuote` */ - if !doubleQuote { - self.Emit("XORL", _R8, _R8) // XORL R8, R8 - } else { - self.Emit("MOVL", jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8 - } - - /* call the native quoter */ - self.call_c(_F_quote) // CALL quote - self.Emit("ADDQ" , _VAR_dn, _RL) // ADDQ dn, RL - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , "_str_space_{n}") // JS _str_space_{n} - - /* close the string, check for double quote */ - if !doubleQuote { - self.check_size(1) // SIZE $1 - self.add_char('"') // CHAR $'"' - self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n} - } else { - self.check_size(3) // SIZE $3 - self.add_text("\\\"\"") // TEXT $'\""' - self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n} - } - - /* not enough space to contain the quoted string */ - self.Link("_str_space_{n}") // _str_space_{n}: - self.Emit("NOTQ", _AX) // NOTQ AX - self.Emit("ADDQ", _AX, _VAR_sp) // ADDQ AX, sp - self.Emit("LEAQ", jit.Sib(_RC, _RC, 1, 0), _AX) // LEAQ (RC)(RC), AX - self.slice_grow_ax("_str_loop_{n}") // GROW _str_loop_{n} - - /* empty string, check for double quote */ - if !doubleQuote { - self.Link("_str_empty_{n}") // _str_empty_{n}: - self.check_size(2) // SIZE $2 - self.add_text("\"\"") // TEXT $'""' - self.Link("_str_end_{n}") // _str_end_{n}: - } else { - self.Link("_str_empty_{n}") // _str_empty_{n}: - self.check_size(6) // SIZE $6 - self.add_text("\"\\\"\\\"\"") // TEXT $'"\"\""' - self.Link("_str_end_{n}") // _str_end_{n}: - } + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ", "_str_empty_{n}") // JZ _str_empty_{n} + self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) + self.Sjmp("JNE", "_str_next_{n}") + self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), jit.Ptr(_SP, 0)) + self.Sjmp("JMP", _LB_panic) + self.Link("_str_next_{n}") + + /* openning quote, check for double quote */ + if !doubleQuote { + self.check_size_r(_AX, 2) // SIZE $2 + self.add_char('"') // CHAR $'"' + } else { + self.check_size_r(_AX, 6) // SIZE $6 + self.add_long(_IM_open, 3) // TEXT $`"\"` + } + + /* quoting loop */ + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, _VAR_sp) // MOVQ AX, sp + self.Link("_str_loop_{n}") // _str_loop_{n}: + self.save_c() // SAVE $REG_ffi + + /* load the output buffer first, and then input buffer, + * because the parameter registers collide with RP / RL / RC */ + self.Emit("MOVQ", _RC, _CX) // MOVQ RC, CX + self.Emit("SUBQ", _RL, _CX) // SUBQ RL, CX + self.Emit("MOVQ", _CX, _VAR_dn) // MOVQ CX, dn + self.Emit("LEAQ", jit.Sib(_RP, _RL, 1, 0), _DX) // LEAQ (RP)(RL), DX + self.Emit("LEAQ", _VAR_dn, _CX) // LEAQ dn, CX + self.Emit("MOVQ", _VAR_sp, _AX) // MOVQ sp, AX + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _DI) // MOVQ (SP.p), DI + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _SI) // MOVQ 8(SP.p), SI + self.Emit("ADDQ", _AX, _DI) // ADDQ AX, DI + self.Emit("SUBQ", _AX, _SI) // SUBQ AX, SI + + /* set the flags based on `doubleQuote` */ + if !doubleQuote { + self.Emit("XORL", _R8, _R8) // XORL R8, R8 + } else { + self.Emit("MOVL", jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8 + } + + /* call the native quoter */ + self.call_c(_F_quote) // CALL quote + self.Emit("ADDQ", _VAR_dn, _RL) // ADDQ dn, RL + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", "_str_space_{n}") // JS _str_space_{n} + + /* close the string, check for double quote */ + if !doubleQuote { + self.check_size(1) // SIZE $1 + self.add_char('"') // CHAR $'"' + self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n} + } else { + self.check_size(3) // SIZE $3 + self.add_text("\\\"\"") // TEXT $'\""' + self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n} + } + + /* not enough space to contain the quoted string */ + self.Link("_str_space_{n}") // _str_space_{n}: + self.Emit("NOTQ", _AX) // NOTQ AX + self.Emit("ADDQ", _AX, _VAR_sp) // ADDQ AX, sp + self.Emit("LEAQ", jit.Sib(_RC, _RC, 1, 0), _AX) // LEAQ (RC)(RC), AX + self.slice_grow_ax("_str_loop_{n}") // GROW _str_loop_{n} + + /* empty string, check for double quote */ + if !doubleQuote { + self.Link("_str_empty_{n}") // _str_empty_{n}: + self.check_size(2) // SIZE $2 + self.add_text("\"\"") // TEXT $'""' + self.Link("_str_end_{n}") // _str_end_{n}: + } else { + self.Link("_str_empty_{n}") // _str_empty_{n}: + self.check_size(6) // SIZE $6 + self.add_text("\"\\\"\\\"\"") // TEXT $'"\"\""' + self.Link("_str_end_{n}") // _str_end_{n}: + } } /** OpCode Assembler Functions **/ var ( - _T_json_Marshaler = rt.UnpackType(jsonMarshalerType) - _T_encoding_TextMarshaler = rt.UnpackType(encodingTextMarshalerType) + _T_json_Marshaler = rt.UnpackType(jsonMarshalerType) + _T_encoding_TextMarshaler = rt.UnpackType(encodingTextMarshalerType) ) var ( - _F_f64toa = jit.Imm(int64(native.S_f64toa)) - _F_f32toa = jit.Imm(int64(native.S_f32toa)) - _F_i64toa = jit.Imm(int64(native.S_i64toa)) - _F_u64toa = jit.Imm(int64(native.S_u64toa)) - _F_b64encode = jit.Imm(int64(_subr__b64encode)) + _F_f64toa = jit.Imm(int64(native.S_f64toa)) + _F_f32toa = jit.Imm(int64(native.S_f32toa)) + _F_i64toa = jit.Imm(int64(native.S_i64toa)) + _F_u64toa = jit.Imm(int64(native.S_u64toa)) + _F_b64encode = jit.Imm(int64(_subr__b64encode)) ) var ( - _F_memmove = jit.Func(memmove) - _F_error_number = jit.Func(error_number) - _F_isValidNumber = jit.Func(isValidNumber) + _F_memmove = jit.Func(memmove) + _F_error_number = jit.Func(error_number) + _F_isValidNumber = jit.Func(isValidNumber) ) var ( - _F_iteratorStop = jit.Func(iteratorStop) - _F_iteratorNext = jit.Func(iteratorNext) - _F_iteratorStart = jit.Func(iteratorStart) + _F_iteratorStop = jit.Func(iteratorStop) + _F_iteratorNext = jit.Func(iteratorNext) + _F_iteratorStart = jit.Func(iteratorStart) ) var ( - _F_encodeTypedPointer obj.Addr - _F_encodeJsonMarshaler obj.Addr - _F_encodeTextMarshaler obj.Addr + _F_encodeTypedPointer obj.Addr + _F_encodeJsonMarshaler obj.Addr + _F_encodeTextMarshaler obj.Addr ) const ( - _MODE_AVX2 = 1 << 2 + _MODE_AVX2 = 1 << 2 ) func init() { - _F_encodeTypedPointer = jit.Func(encodeTypedPointer) - _F_encodeJsonMarshaler = jit.Func(encodeJsonMarshaler) - _F_encodeTextMarshaler = jit.Func(encodeTextMarshaler) + _F_encodeTypedPointer = jit.Func(encodeTypedPointer) + _F_encodeJsonMarshaler = jit.Func(encodeJsonMarshaler) + _F_encodeTextMarshaler = jit.Func(encodeTextMarshaler) } func (self *_Assembler) _asm_OP_null(_ *_Instr) { - self.check_size(4) - self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1) - self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL + self.check_size(4) + self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL } func (self *_Assembler) _asm_OP_empty_arr(_ *_Instr) { - self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv) - self.Sjmp("JC", "_empty_arr_{n}") - self._asm_OP_null(nil) - self.Sjmp("JMP", "_empty_arr_end_{n}") - self.Link("_empty_arr_{n}") - self.check_size(2) - self.Emit("MOVW", jit.Imm(_IM_array), jit.Sib(_RP, _RL, 1, 0)) - self.Emit("ADDQ", jit.Imm(2), _RL) - self.Link("_empty_arr_end_{n}") + self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv) + self.Sjmp("JC", "_empty_arr_{n}") + self._asm_OP_null(nil) + self.Sjmp("JMP", "_empty_arr_end_{n}") + self.Link("_empty_arr_{n}") + self.check_size(2) + self.Emit("MOVW", jit.Imm(_IM_array), jit.Sib(_RP, _RL, 1, 0)) + self.Emit("ADDQ", jit.Imm(2), _RL) + self.Link("_empty_arr_end_{n}") } func (self *_Assembler) _asm_OP_empty_obj(_ *_Instr) { - self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv) - self.Sjmp("JC", "_empty_obj_{n}") - self._asm_OP_null(nil) - self.Sjmp("JMP", "_empty_obj_end_{n}") - self.Link("_empty_obj_{n}") - self.check_size(2) - self.Emit("MOVW", jit.Imm(_IM_object), jit.Sib(_RP, _RL, 1, 0)) - self.Emit("ADDQ", jit.Imm(2), _RL) - self.Link("_empty_obj_end_{n}") + self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv) + self.Sjmp("JC", "_empty_obj_{n}") + self._asm_OP_null(nil) + self.Sjmp("JMP", "_empty_obj_end_{n}") + self.Link("_empty_obj_{n}") + self.check_size(2) + self.Emit("MOVW", jit.Imm(_IM_object), jit.Sib(_RP, _RL, 1, 0)) + self.Emit("ADDQ", jit.Imm(2), _RL) + self.Link("_empty_obj_end_{n}") } func (self *_Assembler) _asm_OP_bool(_ *_Instr) { - self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0 - self.Sjmp("JE" , "_false_{n}") // JE _false_{n} - self.check_size(4) // SIZE $4 - self.Emit("MOVL", jit.Imm(_IM_true), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'true', (RP)(RL*1) - self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL - self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n} - self.Link("_false_{n}") // _false_{n}: - self.check_size(5) // SIZE $5 - self.Emit("MOVL", jit.Imm(_IM_fals), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'fals', (RP)(RL*1) - self.Emit("MOVB", jit.Imm('e'), jit.Sib(_RP, _RL, 1, 4)) // MOVB $'e', 4(RP)(RL*1) - self.Emit("ADDQ", jit.Imm(5), _RL) // ADDQ $5, RL - self.Link("_end_{n}") // _end_{n}: + self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0 + self.Sjmp("JE", "_false_{n}") // JE _false_{n} + self.check_size(4) // SIZE $4 + self.Emit("MOVL", jit.Imm(_IM_true), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'true', (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL + self.Sjmp("JMP", "_end_{n}") // JMP _end_{n} + self.Link("_false_{n}") // _false_{n}: + self.check_size(5) // SIZE $5 + self.Emit("MOVL", jit.Imm(_IM_fals), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'fals', (RP)(RL*1) + self.Emit("MOVB", jit.Imm('e'), jit.Sib(_RP, _RL, 1, 4)) // MOVB $'e', 4(RP)(RL*1) + self.Emit("ADDQ", jit.Imm(5), _RL) // ADDQ $5, RL + self.Link("_end_{n}") // _end_{n}: } func (self *_Assembler) _asm_OP_i8(_ *_Instr) { - self.store_int(4, _F_i64toa, "MOVBQSX") + self.store_int(4, _F_i64toa, "MOVBQSX") } func (self *_Assembler) _asm_OP_i16(_ *_Instr) { - self.store_int(6, _F_i64toa, "MOVWQSX") + self.store_int(6, _F_i64toa, "MOVWQSX") } func (self *_Assembler) _asm_OP_i32(_ *_Instr) { - self.store_int(17, _F_i64toa, "MOVLQSX") + self.store_int(17, _F_i64toa, "MOVLQSX") } func (self *_Assembler) _asm_OP_i64(_ *_Instr) { - self.store_int(21, _F_i64toa, "MOVQ") + self.store_int(21, _F_i64toa, "MOVQ") } func (self *_Assembler) _asm_OP_u8(_ *_Instr) { - self.store_int(3, _F_u64toa, "MOVBQZX") + self.store_int(3, _F_u64toa, "MOVBQZX") } func (self *_Assembler) _asm_OP_u16(_ *_Instr) { - self.store_int(5, _F_u64toa, "MOVWQZX") + self.store_int(5, _F_u64toa, "MOVWQZX") } func (self *_Assembler) _asm_OP_u32(_ *_Instr) { - self.store_int(16, _F_u64toa, "MOVLQZX") + self.store_int(16, _F_u64toa, "MOVLQZX") } func (self *_Assembler) _asm_OP_u64(_ *_Instr) { - self.store_int(20, _F_u64toa, "MOVQ") + self.store_int(20, _F_u64toa, "MOVQ") } func (self *_Assembler) _asm_OP_f32(_ *_Instr) { - self.check_size(32) - self.Emit("MOVL" , jit.Ptr(_SP_p, 0), _AX) // MOVL (SP.p), AX - self.Emit("ANDL" , jit.Imm(_FM_exp32), _AX) // ANDL $_FM_exp32, AX - self.Emit("XORL" , jit.Imm(_FM_exp32), _AX) // XORL $_FM_exp32, AX - self.Sjmp("JZ" , _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite - self.save_c() // SAVE $C_regs - self.rbuf_di() // MOVQ RP, DI - self.Emit("MOVSS" , jit.Ptr(_SP_p, 0), _X0) // MOVSS (SP.p), X0 - self.call_c(_F_f32toa) // CALL_C f64toa - self.Emit("ADDQ" , _AX, _RL) // ADDQ AX, RL + self.check_size(32) + self.Emit("MOVL", jit.Ptr(_SP_p, 0), _AX) // MOVL (SP.p), AX + self.Emit("ANDL", jit.Imm(_FM_exp32), _AX) // ANDL $_FM_exp32, AX + self.Emit("XORL", jit.Imm(_FM_exp32), _AX) // XORL $_FM_exp32, AX + self.Sjmp("JZ", _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite + self.save_c() // SAVE $C_regs + self.rbuf_di() // MOVQ RP, DI + self.Emit("MOVSS", jit.Ptr(_SP_p, 0), _X0) // MOVSS (SP.p), X0 + self.call_c(_F_f32toa) // CALL_C f64toa + self.Emit("ADDQ", _AX, _RL) // ADDQ AX, RL } func (self *_Assembler) _asm_OP_f64(_ *_Instr) { - self.check_size(32) - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX - self.Emit("MOVQ" , jit.Imm(_FM_exp64), _CX) // MOVQ $_FM_exp64, CX - self.Emit("ANDQ" , _CX, _AX) // ANDQ CX, AX - self.Emit("XORQ" , _CX, _AX) // XORQ CX, AX - self.Sjmp("JZ" , _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite - self.save_c() // SAVE $C_regs - self.rbuf_di() // MOVQ RP, DI - self.Emit("MOVSD" , jit.Ptr(_SP_p, 0), _X0) // MOVSD (SP.p), X0 - self.call_c(_F_f64toa) // CALL_C f64toa - self.Emit("ADDQ" , _AX, _RL) // ADDQ AX, RL + self.check_size(32) + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("MOVQ", jit.Imm(_FM_exp64), _CX) // MOVQ $_FM_exp64, CX + self.Emit("ANDQ", _CX, _AX) // ANDQ CX, AX + self.Emit("XORQ", _CX, _AX) // XORQ CX, AX + self.Sjmp("JZ", _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite + self.save_c() // SAVE $C_regs + self.rbuf_di() // MOVQ RP, DI + self.Emit("MOVSD", jit.Ptr(_SP_p, 0), _X0) // MOVSD (SP.p), X0 + self.call_c(_F_f64toa) // CALL_C f64toa + self.Emit("ADDQ", _AX, _RL) // ADDQ AX, RL } func (self *_Assembler) _asm_OP_str(_ *_Instr) { - self.encode_string(false) + self.encode_string(false) } func (self *_Assembler) _asm_OP_bin(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX - self.Emit("ADDQ", jit.Imm(2), _AX) // ADDQ $2, AX - self.Emit("MOVQ", jit.Imm(_IM_mulv), _CX) // MOVQ $_MF_mulv, CX - self.Emit("MOVQ", _DX, _R8) // MOVQ DX, R8 - self.From("MULQ", _CX) // MULQ CX - self.Emit("LEAQ", jit.Sib(_DX, _DX, 1, 1), _AX) // LEAQ 1(DX)(DX), AX - self.Emit("ORQ" , jit.Imm(2), _AX) // ORQ $2, AX - self.Emit("MOVQ", _R8, _DX) // MOVQ R8, DX - self.check_size_r(_AX, 0) // SIZE AX - self.add_char('"') // CHAR $'"' - self.save_c() // SAVE $REG_ffi - self.prep_buffer_c() // MOVE {buf}, DI - self.Emit("MOVQ", _SP_p, _SI) // MOVQ SP.p, SI - - /* check for AVX2 support */ - if !cpu.HasAVX2 { - self.Emit("XORL", _DX, _DX) // XORL DX, DX - } else { - self.Emit("MOVL", jit.Imm(_MODE_AVX2), _DX) // MOVL $_MODE_AVX2, DX - } - - /* call the encoder */ - self.call_c(_F_b64encode) // CALL b64encode - self.load_buffer() // LOAD {buf} - self.add_char('"') // CHAR $'"' + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX + self.Emit("ADDQ", jit.Imm(2), _AX) // ADDQ $2, AX + self.Emit("MOVQ", jit.Imm(_IM_mulv), _CX) // MOVQ $_MF_mulv, CX + self.Emit("MOVQ", _DX, _R8) // MOVQ DX, R8 + self.From("MULQ", _CX) // MULQ CX + self.Emit("LEAQ", jit.Sib(_DX, _DX, 1, 1), _AX) // LEAQ 1(DX)(DX), AX + self.Emit("ORQ", jit.Imm(2), _AX) // ORQ $2, AX + self.Emit("MOVQ", _R8, _DX) // MOVQ R8, DX + self.check_size_r(_AX, 0) // SIZE AX + self.add_char('"') // CHAR $'"' + self.save_c() // SAVE $REG_ffi + self.prep_buffer_c() // MOVE {buf}, DI + self.Emit("MOVQ", _SP_p, _SI) // MOVQ SP.p, SI + + /* check for AVX2 support */ + if !cpu.HasAVX2 { + self.Emit("XORL", _DX, _DX) // XORL DX, DX + } else { + self.Emit("MOVL", jit.Imm(_MODE_AVX2), _DX) // MOVL $_MODE_AVX2, DX + } + + /* call the encoder */ + self.call_c(_F_b64encode) // CALL b64encode + self.load_buffer() // LOAD {buf} + self.add_char('"') // CHAR $'"' } func (self *_Assembler) _asm_OP_quote(_ *_Instr) { - self.encode_string(true) + self.encode_string(true) } func (self *_Assembler) _asm_OP_number(_ *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _CX) // MOVQ (SP.p), CX - self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX - self.Sjmp("JZ" , "_empty_{n}") // JZ _empty_{n} - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JNZ" , "_number_next_{n}") - self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), jit.Ptr(_SP, 0)) - self.Sjmp("JMP", _LB_panic) - self.Link("_number_next_{n}") - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) - self.call_go(_F_isValidNumber) // CALL_GO isValidNumber - self.Emit("CMPB" , jit.Ptr(_SP, 16), jit.Imm(0)) // CMPB 16(SP), $0 - self.Sjmp("JE" , _LB_error_invalid_number) // JE _error_invalid_number - self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX - self.check_size_r(_AX, 0) // SIZE AX - self.Emit("LEAQ" , jit.Sib(_RP, _RL, 1, 0), _AX) // LEAQ (RP)(RL), AX - self.Emit("ADDQ" , jit.Ptr(_SP_p, 8), _RL) // ADDQ 8(SP.p), RL - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVOU", jit.Ptr(_SP_p, 0), _X0) // MOVOU (SP.p), X0 - self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) - self.call_go(_F_memmove) // CALL_GO memmove - self.Sjmp("JMP" , "_done_{n}") // JMP _done_{n} - self.Link("_empty_{n}") // _empty_{n}: - self.check_size(1) // SIZE $1 - self.add_char('0') // CHAR $'0' - self.Link("_done_{n}") // _done_{n}: + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _CX) // MOVQ (SP.p), CX + self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX + self.Sjmp("JZ", "_empty_{n}") // JZ _empty_{n} + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ", "_number_next_{n}") + self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), jit.Ptr(_SP, 0)) + self.Sjmp("JMP", _LB_panic) + self.Link("_number_next_{n}") + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.call_go(_F_isValidNumber) // CALL_GO isValidNumber + self.Emit("CMPB", jit.Ptr(_SP, 16), jit.Imm(0)) // CMPB 16(SP), $0 + self.Sjmp("JE", _LB_error_invalid_number) // JE _error_invalid_number + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX + self.check_size_r(_AX, 0) // SIZE AX + self.Emit("LEAQ", jit.Sib(_RP, _RL, 1, 0), _AX) // LEAQ (RP)(RL), AX + self.Emit("ADDQ", jit.Ptr(_SP_p, 8), _RL) // ADDQ 8(SP.p), RL + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVOU", jit.Ptr(_SP_p, 0), _X0) // MOVOU (SP.p), X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) + self.call_go(_F_memmove) // CALL_GO memmove + self.Sjmp("JMP", "_done_{n}") // JMP _done_{n} + self.Link("_empty_{n}") // _empty_{n}: + self.check_size(1) // SIZE $1 + self.add_char('0') // CHAR $'0' + self.Link("_done_{n}") // _done_{n}: } func (self *_Assembler) _asm_OP_eface(_ *_Instr) { - self.prep_buffer() // MOVE {buf}, (SP)s - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) - self.Emit("LEAQ" , jit.Ptr(_SP_p, 8), _AX) // LEAQ 8(SP.p), AX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) - self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 24)) // MOVQ ST, 24(SP) - self.Emit("MOVQ" , _ARG_fv, _AX) // MOVQ fv, AX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) - self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer - self.Emit("MOVQ" , jit.Ptr(_SP, 40), _ET) // MOVQ 40(SP), ET - self.Emit("MOVQ" , jit.Ptr(_SP, 48), _EP) // MOVQ 48(SP), EP - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.prep_buffer() // MOVE {buf}, (SP)s + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + self.Emit("LEAQ", jit.Ptr(_SP_p, 8), _AX) // LEAQ 8(SP.p), AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.Emit("MOVQ", _ST, jit.Ptr(_SP, 24)) // MOVQ ST, 24(SP) + self.Emit("MOVQ", _ARG_fv, _AX) // MOVQ fv, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) + self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer + self.Emit("MOVQ", jit.Ptr(_SP, 40), _ET) // MOVQ 40(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 48), _EP) // MOVQ 48(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error } func (self *_Assembler) _asm_OP_iface(_ *_Instr) { - self.prep_buffer() // MOVE {buf}, (SP) - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX - self.Emit("MOVQ" , jit.Ptr(_AX, 8), _AX) // MOVQ 8(AX), AX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) - self.Emit("LEAQ" , jit.Ptr(_SP_p, 8), _AX) // LEAQ 8(SP.p), AX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) - self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 24)) // MOVQ ST, 24(SP) - self.Emit("MOVQ" , _ARG_fv, _AX) // MOVQ fv, AX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) - self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer - self.Emit("MOVQ" , jit.Ptr(_SP, 40), _ET) // MOVQ 40(SP), ET - self.Emit("MOVQ" , jit.Ptr(_SP, 48), _EP) // MOVQ 48(SP), EP - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.prep_buffer() // MOVE {buf}, (SP) + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("MOVQ", jit.Ptr(_AX, 8), _AX) // MOVQ 8(AX), AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + self.Emit("LEAQ", jit.Ptr(_SP_p, 8), _AX) // LEAQ 8(SP.p), AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.Emit("MOVQ", _ST, jit.Ptr(_SP, 24)) // MOVQ ST, 24(SP) + self.Emit("MOVQ", _ARG_fv, _AX) // MOVQ fv, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) + self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer + self.Emit("MOVQ", jit.Ptr(_SP, 40), _ET) // MOVQ 40(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 48), _EP) // MOVQ 48(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error } func (self *_Assembler) _asm_OP_byte(p *_Instr) { - self.check_size(1) - self.Emit("MOVB", jit.Imm(p.i64()), jit.Sib(_RP, _RL, 1, 0)) // MOVL p.vi(), (RP)(RL*1) - self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL + self.check_size(1) + self.Emit("MOVB", jit.Imm(p.i64()), jit.Sib(_RP, _RL, 1, 0)) // MOVL p.vi(), (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL } func (self *_Assembler) _asm_OP_text(p *_Instr) { - self.check_size(len(p.vs())) // SIZE ${len(p.vs())} - self.add_text(p.vs()) // TEXT ${p.vs()} + self.check_size(len(p.vs())) // SIZE ${len(p.vs())} + self.add_text(p.vs()) // TEXT ${p.vs()} } func (self *_Assembler) _asm_OP_deref(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p } func (self *_Assembler) _asm_OP_index(p *_Instr) { - self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ $p.vi(), AX - self.Emit("ADDQ", _AX, _SP_p) // ADDQ AX, SP.p + self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ $p.vi(), AX + self.Emit("ADDQ", _AX, _SP_p) // ADDQ AX, SP.p } func (self *_Assembler) _asm_OP_load(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX - self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -24), _SP_x) // MOVQ -24(ST)(AX), SP.x - self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -8), _SP_p) // MOVQ -8(ST)(AX), SP.p - self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _SP_q) // MOVQ (ST)(AX), SP.q + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -24), _SP_x) // MOVQ -24(ST)(AX), SP.x + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -8), _SP_p) // MOVQ -8(ST)(AX), SP.p + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _SP_q) // MOVQ (ST)(AX), SP.q } func (self *_Assembler) _asm_OP_save(_ *_Instr) { - self.save_state() + self.save_state() } func (self *_Assembler) _asm_OP_drop(_ *_Instr) { - self.drop_state(_StateSize) + self.drop_state(_StateSize) } func (self *_Assembler) _asm_OP_drop_2(_ *_Instr) { - self.drop_state(_StateSize * 2) // DROP $(_StateSize * 2) - self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 56)) // MOVOU X0, 56(ST)(AX) + self.drop_state(_StateSize * 2) // DROP $(_StateSize * 2) + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 56)) // MOVOU X0, 56(ST)(AX) } func (self *_Assembler) _asm_OP_recurse(p *_Instr) { - self.prep_buffer() // MOVE {buf}, (SP) - vt, pv := p.vp() - self.Emit("MOVQ", jit.Type(vt), _AX) // MOVQ $(type(p.vt())), AX - self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) - - /* check for indirection */ - if !rt.UnpackType(vt).Indirect() { - self.Emit("MOVQ", _SP_p, _AX) // MOVQ SP.p, AX - } else { - self.Emit("MOVQ", _SP_p, _VAR_vp) // MOVQ SP.p, 48(SP) - self.Emit("LEAQ", _VAR_vp, _AX) // LEAQ 48(SP), AX - } - - /* call the encoder */ - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) - self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 24)) // MOVQ ST, 24(SP) - self.Emit("MOVQ" , _ARG_fv, _AX) // MOVQ fv, AX - if pv { - self.Emit("BTCQ", jit.Imm(bitPointerValue), _AX) // BTCQ $1, AX - } - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) - self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer - self.Emit("MOVQ" , jit.Ptr(_SP, 40), _ET) // MOVQ 40(SP), ET - self.Emit("MOVQ" , jit.Ptr(_SP, 48), _EP) // MOVQ 48(SP), EP - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.prep_buffer() // MOVE {buf}, (SP) + vt, pv := p.vp() + self.Emit("MOVQ", jit.Type(vt), _AX) // MOVQ $(type(p.vt())), AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + + /* check for indirection */ + if !rt.UnpackType(vt).Indirect() { + self.Emit("MOVQ", _SP_p, _AX) // MOVQ SP.p, AX + } else { + self.Emit("MOVQ", _SP_p, _VAR_vp) // MOVQ SP.p, 48(SP) + self.Emit("LEAQ", _VAR_vp, _AX) // LEAQ 48(SP), AX + } + + /* call the encoder */ + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.Emit("MOVQ", _ST, jit.Ptr(_SP, 24)) // MOVQ ST, 24(SP) + self.Emit("MOVQ", _ARG_fv, _AX) // MOVQ fv, AX + if pv { + self.Emit("BTCQ", jit.Imm(bitPointerValue), _AX) // BTCQ $1, AX + } + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) + self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer + self.Emit("MOVQ", jit.Ptr(_SP, 40), _ET) // MOVQ 40(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 48), _EP) // MOVQ 48(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error } func (self *_Assembler) _asm_OP_is_nil(p *_Instr) { - self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_is_nil_p1(p *_Instr) { - self.Emit("CMPQ", jit.Ptr(_SP_p, 8), jit.Imm(0)) // CMPQ 8(SP.p), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("CMPQ", jit.Ptr(_SP_p, 8), jit.Imm(0)) // CMPQ 8(SP.p), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_is_zero_1(p *_Instr) { - self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_is_zero_2(p *_Instr) { - self.Emit("CMPW", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPW (SP.p), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("CMPW", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPW (SP.p), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_is_zero_4(p *_Instr) { - self.Emit("CMPL", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPL (SP.p), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("CMPL", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPL (SP.p), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_is_zero_8(p *_Instr) { - self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_is_zero_map(p *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Xjmp("JZ" , p.vi()) // JZ p.vi() - self.Emit("CMPQ" , jit.Ptr(_AX, 0), jit.Imm(0)) // CMPQ (AX), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Xjmp("JZ", p.vi()) // JZ p.vi() + self.Emit("CMPQ", jit.Ptr(_AX, 0), jit.Imm(0)) // CMPQ (AX), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_goto(p *_Instr) { - self.Xjmp("JMP", p.vi()) + self.Xjmp("JMP", p.vi()) } func (self *_Assembler) _asm_OP_map_iter(p *_Instr) { - self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ $p.vt(), AX - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _CX) // MOVQ (SP.p), CX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) - self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) - self.Emit("MOVQ" , _ARG_fv, _AX) // MOVQ fv, AX - self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) - self.call_go(_F_iteratorStart) // CALL_GO iteratorStart - self.Emit("MOVQ" , jit.Ptr(_SP, 24), _SP_q) // MOVQ 24(SP), SP.q - self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET - self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.Emit("MOVQ", jit.Type(p.vt()), _AX) // MOVQ $p.vt(), AX + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _CX) // MOVQ (SP.p), CX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.Emit("MOVQ", _ARG_fv, _AX) // MOVQ fv, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.call_go(_F_iteratorStart) // CALL_GO iteratorStart + self.Emit("MOVQ", jit.Ptr(_SP, 24), _SP_q) // MOVQ 24(SP), SP.q + self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error } func (self *_Assembler) _asm_OP_map_stop(_ *_Instr) { - self.Emit("MOVQ", _SP_q, jit.Ptr(_SP, 0)) // MOVQ SP.q, 0(SP) - self.call_go(_F_iteratorStop) // CALL_GO iteratorStop - self.Emit("XORL", _SP_q, _SP_q) // XORL SP.q, SP.q + self.Emit("MOVQ", _SP_q, jit.Ptr(_SP, 0)) // MOVQ SP.q, 0(SP) + self.call_go(_F_iteratorStop) // CALL_GO iteratorStop + self.Emit("XORL", _SP_q, _SP_q) // XORL SP.q, SP.q } func (self *_Assembler) _asm_OP_map_check_key(p *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_SP_q, 0), _SP_p) // MOVQ (SP.q), SP.p - self.Emit("TESTQ", _SP_p, _SP_p) // TESTQ SP.p, SP.p - self.Xjmp("JZ" , p.vi()) // JNZ p.vi() + self.Emit("MOVQ", jit.Ptr(_SP_q, 0), _SP_p) // MOVQ (SP.q), SP.p + self.Emit("TESTQ", _SP_p, _SP_p) // TESTQ SP.p, SP.p + self.Xjmp("JZ", p.vi()) // JNZ p.vi() } func (self *_Assembler) _asm_OP_map_write_key(p *_Instr) { - self.Emit("BTQ", jit.Imm(bitSortMapKeys), _ARG_fv) // BTQ ${SortMapKeys}, fv - self.Sjmp("JNC", "_unordered_key_{n}") // JNC _unordered_key_{n} - self.encode_string(false) // STR $false - self.Xjmp("JMP", p.vi()) // JMP ${p.vi()} - self.Link("_unordered_key_{n}") // _unordered_key_{n}: + self.Emit("BTQ", jit.Imm(bitSortMapKeys), _ARG_fv) // BTQ ${SortMapKeys}, fv + self.Sjmp("JNC", "_unordered_key_{n}") // JNC _unordered_key_{n} + self.encode_string(false) // STR $false + self.Xjmp("JMP", p.vi()) // JMP ${p.vi()} + self.Link("_unordered_key_{n}") // _unordered_key_{n}: } func (self *_Assembler) _asm_OP_map_value_next(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_SP_q, 8), _SP_p) // MOVQ 8(SP.q), SP.p - self.Emit("MOVQ", _SP_q, jit.Ptr(_SP, 0)) // MOVQ SP.q, (SP) - self.call_go(_F_iteratorNext) // CALL_GO iteratorNext + self.Emit("MOVQ", jit.Ptr(_SP_q, 8), _SP_p) // MOVQ 8(SP.q), SP.p + self.Emit("MOVQ", _SP_q, jit.Ptr(_SP, 0)) // MOVQ SP.q, (SP) + self.call_go(_F_iteratorNext) // CALL_GO iteratorNext } func (self *_Assembler) _asm_OP_slice_len(_ *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _SP_x) // MOVQ 8(SP.p), SP.x - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p - self.Emit("ORQ" , jit.Imm(1 << _S_init), _SP_f) // ORQ $(1<<_S_init), SP.f + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _SP_x) // MOVQ 8(SP.p), SP.x + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p + self.Emit("ORQ", jit.Imm(1<<_S_init), _SP_f) // ORQ $(1<<_S_init), SP.f } func (self *_Assembler) _asm_OP_slice_next(p *_Instr) { - self.Emit("TESTQ" , _SP_x, _SP_x) // TESTQ SP.x, SP.x - self.Xjmp("JZ" , p.vi()) // JZ p.vi() - self.Emit("SUBQ" , jit.Imm(1), _SP_x) // SUBQ $1, SP.x - self.Emit("BTRQ" , jit.Imm(_S_init), _SP_f) // BTRQ $_S_init, SP.f - self.Emit("LEAQ" , jit.Ptr(_SP_p, int64(p.vlen())), _AX) // LEAQ $(p.vlen())(SP.p), AX - self.Emit("CMOVQCC", _AX, _SP_p) // CMOVQNC AX, SP.p + self.Emit("TESTQ", _SP_x, _SP_x) // TESTQ SP.x, SP.x + self.Xjmp("JZ", p.vi()) // JZ p.vi() + self.Emit("SUBQ", jit.Imm(1), _SP_x) // SUBQ $1, SP.x + self.Emit("BTRQ", jit.Imm(_S_init), _SP_f) // BTRQ $_S_init, SP.f + self.Emit("LEAQ", jit.Ptr(_SP_p, int64(p.vlen())), _AX) // LEAQ $(p.vlen())(SP.p), AX + self.Emit("CMOVQCC", _AX, _SP_p) // CMOVQNC AX, SP.p } func (self *_Assembler) _asm_OP_marshal(p *_Instr) { - self.call_marshaler(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt()) + self.call_marshaler(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt()) } func (self *_Assembler) _asm_OP_marshal_p(p *_Instr) { - if p.vk() != reflect.Ptr { - panic("marshal_p: invalid type") - } else { - self.call_marshaler_v(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt(), false) - } + if p.vk() != reflect.Ptr { + panic("marshal_p: invalid type") + } else { + self.call_marshaler_v(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt(), false) + } } func (self *_Assembler) _asm_OP_marshal_text(p *_Instr) { - self.call_marshaler(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt()) + self.call_marshaler(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt()) } func (self *_Assembler) _asm_OP_marshal_text_p(p *_Instr) { - if p.vk() != reflect.Ptr { - panic("marshal_text_p: invalid type") - } else { - self.call_marshaler_v(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt(), false) - } + if p.vk() != reflect.Ptr { + panic("marshal_text_p: invalid type") + } else { + self.call_marshaler_v(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt(), false) + } } func (self *_Assembler) _asm_OP_cond_set(_ *_Instr) { - self.Emit("ORQ", jit.Imm(1 << _S_cond), _SP_f) // ORQ $(1<<_S_cond), SP.f + self.Emit("ORQ", jit.Imm(1<<_S_cond), _SP_f) // ORQ $(1<<_S_cond), SP.f } func (self *_Assembler) _asm_OP_cond_testc(p *_Instr) { - self.Emit("BTRQ", jit.Imm(_S_cond), _SP_f) // BTRQ $_S_cond, SP.f - self.Xjmp("JC" , p.vi()) + self.Emit("BTRQ", jit.Imm(_S_cond), _SP_f) // BTRQ $_S_cond, SP.f + self.Xjmp("JC", p.vi()) } func (self *_Assembler) print_gc(i int, p1 *_Instr, p2 *_Instr) { - self.Emit("MOVQ", jit.Imm(int64(p2.op())), jit.Ptr(_SP, 16))// MOVQ $(p2.op()), 16(SP) - self.Emit("MOVQ", jit.Imm(int64(p1.op())), jit.Ptr(_SP, 8)) // MOVQ $(p1.op()), 8(SP) - self.Emit("MOVQ", jit.Imm(int64(i)), jit.Ptr(_SP, 0)) // MOVQ $(i), (SP) - self.call_go(_F_println) + self.Emit("MOVQ", jit.Imm(int64(p2.op())), jit.Ptr(_SP, 16)) // MOVQ $(p2.op()), 16(SP) + self.Emit("MOVQ", jit.Imm(int64(p1.op())), jit.Ptr(_SP, 8)) // MOVQ $(p1.op()), 8(SP) + self.Emit("MOVQ", jit.Imm(int64(i)), jit.Ptr(_SP, 0)) // MOVQ $(i), (SP) + self.call_go(_F_println) } var ( - _V_writeBarrier = jit.Imm(int64(_runtime_writeBarrier)) + _V_writeBarrier = jit.Imm(int64(_runtime_writeBarrier)) - _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX) + _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX) ) func (self *_Assembler) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr) { - if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { - panic("rec contains AX!") - } - self.Emit("MOVQ", _V_writeBarrier, _R10) - self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) - self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Emit("MOVQ", ptr, _AX) - self.xsave(_DI) - self.Emit("LEAQ", rec, _DI) - self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX - self.Rjmp("CALL", _R10) - self.xload(_DI) - self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Emit("MOVQ", ptr, rec) - self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") -} \ No newline at end of file + if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { + panic("rec contains AX!") + } + self.Emit("MOVQ", _V_writeBarrier, _R10) + self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.Emit("MOVQ", ptr, _AX) + self.xsave(_DI) + self.Emit("LEAQ", rec, _DI) + self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX + self.Rjmp("CALL", _R10) + self.xload(_DI) + self.Sjmp("JMP", "_end_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/assembler_amd64_go117.go b/vendor/github.com/bytedance/sonic/internal/encoder/assembler_amd64_go117.go index 1f1b28073..e51da42d7 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/assembler_amd64_go117.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/assembler_amd64_go117.go @@ -20,19 +20,19 @@ package encoder import ( - `fmt` - `reflect` - `strconv` - `unsafe` - - `github.com/bytedance/sonic/internal/cpu` - `github.com/bytedance/sonic/internal/jit` - `github.com/bytedance/sonic/internal/native/types` - `github.com/twitchyliquid64/golang-asm/obj` - `github.com/twitchyliquid64/golang-asm/obj/x86` - - `github.com/bytedance/sonic/internal/native` - `github.com/bytedance/sonic/internal/rt` + "fmt" + "reflect" + "strconv" + "unsafe" + + "github.com/bytedance/sonic/internal/cpu" + "github.com/bytedance/sonic/internal/jit" + "github.com/bytedance/sonic/internal/native/types" + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/obj/x86" + + "github.com/bytedance/sonic/internal/native" + "github.com/bytedance/sonic/internal/rt" ) /** Register Allocations @@ -67,1136 +67,1139 @@ import ( */ const ( - _S_cond = iota - _S_init + _S_cond = iota + _S_init ) const ( - _FP_args = 32 // 32 bytes for spill registers of arguments - _FP_fargs = 40 // 40 bytes for passing arguments to other Go functions - _FP_saves = 64 // 64 bytes for saving the registers before CALL instructions - _FP_locals = 24 // 24 bytes for local variables + _FP_args = 32 // 32 bytes for spill registers of arguments + _FP_fargs = 40 // 40 bytes for passing arguments to other Go functions + _FP_saves = 64 // 64 bytes for saving the registers before CALL instructions + _FP_locals = 24 // 24 bytes for local variables ) const ( - _FP_loffs = _FP_fargs + _FP_saves - _FP_offs = _FP_loffs + _FP_locals - // _FP_offs = _FP_loffs + _FP_locals + _FP_debug - _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer - _FP_base = _FP_size + 8 // 8 bytes for the return address + _FP_loffs = _FP_fargs + _FP_saves + _FP_offs = _FP_loffs + _FP_locals + // _FP_offs = _FP_loffs + _FP_locals + _FP_debug + _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer + _FP_base = _FP_size + 8 // 8 bytes for the return address ) const ( - _FM_exp32 = 0x7f800000 - _FM_exp64 = 0x7ff0000000000000 + _FM_exp32 = 0x7f800000 + _FM_exp64 = 0x7ff0000000000000 ) const ( - _IM_null = 0x6c6c756e // 'null' - _IM_true = 0x65757274 // 'true' - _IM_fals = 0x736c6166 // 'fals' ('false' without the 'e') - _IM_open = 0x00225c22 // '"\"∅' - _IM_array = 0x5d5b // '[]' - _IM_object = 0x7d7b // '{}' - _IM_mulv = -0x5555555555555555 + _IM_null = 0x6c6c756e // 'null' + _IM_true = 0x65757274 // 'true' + _IM_fals = 0x736c6166 // 'fals' ('false' without the 'e') + _IM_open = 0x00225c22 // '"\"∅' + _IM_array = 0x5d5b // '[]' + _IM_object = 0x7d7b // '{}' + _IM_mulv = -0x5555555555555555 ) const ( - _LB_more_space = "_more_space" - _LB_more_space_return = "_more_space_return_" + _LB_more_space = "_more_space" + _LB_more_space_return = "_more_space_return_" ) const ( - _LB_error = "_error" - _LB_error_too_deep = "_error_too_deep" - _LB_error_invalid_number = "_error_invalid_number" - _LB_error_nan_or_infinite = "_error_nan_or_infinite" - _LB_panic = "_panic" + _LB_error = "_error" + _LB_error_too_deep = "_error_too_deep" + _LB_error_invalid_number = "_error_invalid_number" + _LB_error_nan_or_infinite = "_error_nan_or_infinite" + _LB_panic = "_panic" ) var ( - _AX = jit.Reg("AX") - _BX = jit.Reg("BX") - _CX = jit.Reg("CX") - _DX = jit.Reg("DX") - _DI = jit.Reg("DI") - _SI = jit.Reg("SI") - _BP = jit.Reg("BP") - _SP = jit.Reg("SP") - _R8 = jit.Reg("R8") - _R9 = jit.Reg("R9") + _AX = jit.Reg("AX") + _BX = jit.Reg("BX") + _CX = jit.Reg("CX") + _DX = jit.Reg("DX") + _DI = jit.Reg("DI") + _SI = jit.Reg("SI") + _BP = jit.Reg("BP") + _SP = jit.Reg("SP") + _R8 = jit.Reg("R8") + _R9 = jit.Reg("R9") ) var ( - _X0 = jit.Reg("X0") - _Y0 = jit.Reg("Y0") + _X0 = jit.Reg("X0") + _Y0 = jit.Reg("Y0") ) var ( - _ST = jit.Reg("R15") // can't use R14 since it's always scratched by Go... - _RP = jit.Reg("DI") - _RL = jit.Reg("SI") - _RC = jit.Reg("DX") + _ST = jit.Reg("R15") // can't use R14 since it's always scratched by Go... + _RP = jit.Reg("DI") + _RL = jit.Reg("SI") + _RC = jit.Reg("DX") ) var ( - _LR = jit.Reg("R9") - _ET = jit.Reg("AX") - _EP = jit.Reg("BX") + _LR = jit.Reg("R9") + _ET = jit.Reg("AX") + _EP = jit.Reg("BX") ) var ( - _SP_p = jit.Reg("R10") // saved on BX when call_c - _SP_q = jit.Reg("R11") // saved on BP when call_c - _SP_x = jit.Reg("R12") - _SP_f = jit.Reg("R13") + _SP_p = jit.Reg("R10") // saved on BX when call_c + _SP_q = jit.Reg("R11") // saved on BP when call_c + _SP_x = jit.Reg("R12") + _SP_f = jit.Reg("R13") ) var ( - _ARG_rb = jit.Ptr(_SP, _FP_base) - _ARG_vp = jit.Ptr(_SP, _FP_base + 8) - _ARG_sb = jit.Ptr(_SP, _FP_base + 16) - _ARG_fv = jit.Ptr(_SP, _FP_base + 24) + _ARG_rb = jit.Ptr(_SP, _FP_base) + _ARG_vp = jit.Ptr(_SP, _FP_base+8) + _ARG_sb = jit.Ptr(_SP, _FP_base+16) + _ARG_fv = jit.Ptr(_SP, _FP_base+24) ) var ( - _RET_et = _ET - _RET_ep = _EP + _RET_et = _ET + _RET_ep = _EP ) var ( - _VAR_sp = jit.Ptr(_SP, _FP_fargs + _FP_saves) - _VAR_dn = jit.Ptr(_SP, _FP_fargs + _FP_saves + 8) - _VAR_vp = jit.Ptr(_SP, _FP_fargs + _FP_saves + 16) + _VAR_sp = jit.Ptr(_SP, _FP_fargs+_FP_saves) + _VAR_dn = jit.Ptr(_SP, _FP_fargs+_FP_saves+8) + _VAR_vp = jit.Ptr(_SP, _FP_fargs+_FP_saves+16) ) var ( - _REG_ffi = []obj.Addr{ _RP, _RL, _RC} - _REG_b64 = []obj.Addr{_SP_p, _SP_q} + _REG_ffi = []obj.Addr{_RP, _RL, _RC} + _REG_b64 = []obj.Addr{_SP_p, _SP_q} - _REG_all = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RP, _RL, _RC} - _REG_ms = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _LR} - _REG_enc = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RL} + _REG_all = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RP, _RL, _RC} + _REG_ms = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _LR} + _REG_enc = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RL} ) type _Assembler struct { - jit.BaseAssembler - p _Program - x int - name string + jit.BaseAssembler + p _Program + x int + name string } func newAssembler(p _Program) *_Assembler { - return new(_Assembler).Init(p) + return new(_Assembler).Init(p) } /** Assembler Interface **/ func (self *_Assembler) Load() _Encoder { - return ptoenc(self.BaseAssembler.Load("encode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs)) + return ptoenc(self.BaseAssembler.Load("encode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs)) } func (self *_Assembler) Init(p _Program) *_Assembler { - self.p = p - self.BaseAssembler.Init(self.compile) - return self + self.p = p + self.BaseAssembler.Init(self.compile) + return self } func (self *_Assembler) compile() { - self.prologue() - self.instrs() - self.epilogue() - self.builtins() + self.prologue() + self.instrs() + self.epilogue() + self.builtins() } /** Assembler Stages **/ -var _OpFuncTab = [256]func(*_Assembler, *_Instr) { - _OP_null : (*_Assembler)._asm_OP_null, - _OP_empty_arr : (*_Assembler)._asm_OP_empty_arr, - _OP_empty_obj : (*_Assembler)._asm_OP_empty_obj, - _OP_bool : (*_Assembler)._asm_OP_bool, - _OP_i8 : (*_Assembler)._asm_OP_i8, - _OP_i16 : (*_Assembler)._asm_OP_i16, - _OP_i32 : (*_Assembler)._asm_OP_i32, - _OP_i64 : (*_Assembler)._asm_OP_i64, - _OP_u8 : (*_Assembler)._asm_OP_u8, - _OP_u16 : (*_Assembler)._asm_OP_u16, - _OP_u32 : (*_Assembler)._asm_OP_u32, - _OP_u64 : (*_Assembler)._asm_OP_u64, - _OP_f32 : (*_Assembler)._asm_OP_f32, - _OP_f64 : (*_Assembler)._asm_OP_f64, - _OP_str : (*_Assembler)._asm_OP_str, - _OP_bin : (*_Assembler)._asm_OP_bin, - _OP_quote : (*_Assembler)._asm_OP_quote, - _OP_number : (*_Assembler)._asm_OP_number, - _OP_eface : (*_Assembler)._asm_OP_eface, - _OP_iface : (*_Assembler)._asm_OP_iface, - _OP_byte : (*_Assembler)._asm_OP_byte, - _OP_text : (*_Assembler)._asm_OP_text, - _OP_deref : (*_Assembler)._asm_OP_deref, - _OP_index : (*_Assembler)._asm_OP_index, - _OP_load : (*_Assembler)._asm_OP_load, - _OP_save : (*_Assembler)._asm_OP_save, - _OP_drop : (*_Assembler)._asm_OP_drop, - _OP_drop_2 : (*_Assembler)._asm_OP_drop_2, - _OP_recurse : (*_Assembler)._asm_OP_recurse, - _OP_is_nil : (*_Assembler)._asm_OP_is_nil, - _OP_is_nil_p1 : (*_Assembler)._asm_OP_is_nil_p1, - _OP_is_zero_1 : (*_Assembler)._asm_OP_is_zero_1, - _OP_is_zero_2 : (*_Assembler)._asm_OP_is_zero_2, - _OP_is_zero_4 : (*_Assembler)._asm_OP_is_zero_4, - _OP_is_zero_8 : (*_Assembler)._asm_OP_is_zero_8, - _OP_is_zero_map : (*_Assembler)._asm_OP_is_zero_map, - _OP_goto : (*_Assembler)._asm_OP_goto, - _OP_map_iter : (*_Assembler)._asm_OP_map_iter, - _OP_map_stop : (*_Assembler)._asm_OP_map_stop, - _OP_map_check_key : (*_Assembler)._asm_OP_map_check_key, - _OP_map_write_key : (*_Assembler)._asm_OP_map_write_key, - _OP_map_value_next : (*_Assembler)._asm_OP_map_value_next, - _OP_slice_len : (*_Assembler)._asm_OP_slice_len, - _OP_slice_next : (*_Assembler)._asm_OP_slice_next, - _OP_marshal : (*_Assembler)._asm_OP_marshal, - _OP_marshal_p : (*_Assembler)._asm_OP_marshal_p, - _OP_marshal_text : (*_Assembler)._asm_OP_marshal_text, - _OP_marshal_text_p : (*_Assembler)._asm_OP_marshal_text_p, - _OP_cond_set : (*_Assembler)._asm_OP_cond_set, - _OP_cond_testc : (*_Assembler)._asm_OP_cond_testc, +var _OpFuncTab = [256]func(*_Assembler, *_Instr){ + _OP_null: (*_Assembler)._asm_OP_null, + _OP_empty_arr: (*_Assembler)._asm_OP_empty_arr, + _OP_empty_obj: (*_Assembler)._asm_OP_empty_obj, + _OP_bool: (*_Assembler)._asm_OP_bool, + _OP_i8: (*_Assembler)._asm_OP_i8, + _OP_i16: (*_Assembler)._asm_OP_i16, + _OP_i32: (*_Assembler)._asm_OP_i32, + _OP_i64: (*_Assembler)._asm_OP_i64, + _OP_u8: (*_Assembler)._asm_OP_u8, + _OP_u16: (*_Assembler)._asm_OP_u16, + _OP_u32: (*_Assembler)._asm_OP_u32, + _OP_u64: (*_Assembler)._asm_OP_u64, + _OP_f32: (*_Assembler)._asm_OP_f32, + _OP_f64: (*_Assembler)._asm_OP_f64, + _OP_str: (*_Assembler)._asm_OP_str, + _OP_bin: (*_Assembler)._asm_OP_bin, + _OP_quote: (*_Assembler)._asm_OP_quote, + _OP_number: (*_Assembler)._asm_OP_number, + _OP_eface: (*_Assembler)._asm_OP_eface, + _OP_iface: (*_Assembler)._asm_OP_iface, + _OP_byte: (*_Assembler)._asm_OP_byte, + _OP_text: (*_Assembler)._asm_OP_text, + _OP_deref: (*_Assembler)._asm_OP_deref, + _OP_index: (*_Assembler)._asm_OP_index, + _OP_load: (*_Assembler)._asm_OP_load, + _OP_save: (*_Assembler)._asm_OP_save, + _OP_drop: (*_Assembler)._asm_OP_drop, + _OP_drop_2: (*_Assembler)._asm_OP_drop_2, + _OP_recurse: (*_Assembler)._asm_OP_recurse, + _OP_is_nil: (*_Assembler)._asm_OP_is_nil, + _OP_is_nil_p1: (*_Assembler)._asm_OP_is_nil_p1, + _OP_is_zero_1: (*_Assembler)._asm_OP_is_zero_1, + _OP_is_zero_2: (*_Assembler)._asm_OP_is_zero_2, + _OP_is_zero_4: (*_Assembler)._asm_OP_is_zero_4, + _OP_is_zero_8: (*_Assembler)._asm_OP_is_zero_8, + _OP_is_zero_map: (*_Assembler)._asm_OP_is_zero_map, + _OP_goto: (*_Assembler)._asm_OP_goto, + _OP_map_iter: (*_Assembler)._asm_OP_map_iter, + _OP_map_stop: (*_Assembler)._asm_OP_map_stop, + _OP_map_check_key: (*_Assembler)._asm_OP_map_check_key, + _OP_map_write_key: (*_Assembler)._asm_OP_map_write_key, + _OP_map_value_next: (*_Assembler)._asm_OP_map_value_next, + _OP_slice_len: (*_Assembler)._asm_OP_slice_len, + _OP_slice_next: (*_Assembler)._asm_OP_slice_next, + _OP_marshal: (*_Assembler)._asm_OP_marshal, + _OP_marshal_p: (*_Assembler)._asm_OP_marshal_p, + _OP_marshal_text: (*_Assembler)._asm_OP_marshal_text, + _OP_marshal_text_p: (*_Assembler)._asm_OP_marshal_text_p, + _OP_cond_set: (*_Assembler)._asm_OP_cond_set, + _OP_cond_testc: (*_Assembler)._asm_OP_cond_testc, } func (self *_Assembler) instr(v *_Instr) { - if fn := _OpFuncTab[v.op()]; fn != nil { - fn(self, v) - } else { - panic(fmt.Sprintf("invalid opcode: %d", v.op())) - } + if fn := _OpFuncTab[v.op()]; fn != nil { + fn(self, v) + } else { + panic(fmt.Sprintf("invalid opcode: %d", v.op())) + } } func (self *_Assembler) instrs() { - for i, v := range self.p { - self.Mark(i) - self.instr(&v) - self.debug_instr(i, &v) - } + for i, v := range self.p { + self.Mark(i) + self.instr(&v) + self.debug_instr(i, &v) + } } func (self *_Assembler) builtins() { - self.more_space() - self.error_too_deep() - self.error_invalid_number() - self.error_nan_or_infinite() - self.go_panic() + self.more_space() + self.error_too_deep() + self.error_invalid_number() + self.error_nan_or_infinite() + self.go_panic() } func (self *_Assembler) epilogue() { - self.Mark(len(self.p)) - self.Emit("XORL", _ET, _ET) - self.Emit("XORL", _EP, _EP) - self.Link(_LB_error) - self.Emit("MOVQ", _ARG_rb, _CX) // MOVQ rb<>+0(FP), CX - self.Emit("MOVQ", _RL, jit.Ptr(_CX, 8)) // MOVQ RL, 8(CX) - self.Emit("MOVQ", jit.Imm(0), _ARG_rb) // MOVQ AX, rb<>+0(FP) - self.Emit("MOVQ", jit.Imm(0), _ARG_vp) // MOVQ BX, vp<>+8(FP) - self.Emit("MOVQ", jit.Imm(0), _ARG_sb) // MOVQ CX, sb<>+16(FP) - self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP - self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP - self.Emit("RET") // RET + self.Mark(len(self.p)) + self.Emit("XORL", _ET, _ET) + self.Emit("XORL", _EP, _EP) + self.Link(_LB_error) + self.Emit("MOVQ", _ARG_rb, _CX) // MOVQ rb<>+0(FP), CX + self.Emit("MOVQ", _RL, jit.Ptr(_CX, 8)) // MOVQ RL, 8(CX) + self.Emit("MOVQ", jit.Imm(0), _ARG_rb) // MOVQ AX, rb<>+0(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_vp) // MOVQ BX, vp<>+8(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_sb) // MOVQ CX, sb<>+16(FP) + self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP + self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP + self.Emit("RET") // RET } func (self *_Assembler) prologue() { - self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP - self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP) - self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP - self.Emit("MOVQ", _AX, _ARG_rb) // MOVQ AX, rb<>+0(FP) - self.Emit("MOVQ", _BX, _ARG_vp) // MOVQ BX, vp<>+8(FP) - self.Emit("MOVQ", _CX, _ARG_sb) // MOVQ CX, sb<>+16(FP) - self.Emit("MOVQ", _DI, _ARG_fv) // MOVQ DI, rb<>+24(FP) - self.Emit("MOVQ", jit.Ptr(_AX, 0), _RP) // MOVQ (AX) , DI - self.Emit("MOVQ", jit.Ptr(_AX, 8), _RL) // MOVQ 8(AX) , SI - self.Emit("MOVQ", jit.Ptr(_AX, 16), _RC) // MOVQ 16(AX), DX - self.Emit("MOVQ", _BX, _SP_p) // MOVQ BX, R10 - self.Emit("MOVQ", _CX, _ST) // MOVQ CX, R8 - self.Emit("XORL", _SP_x, _SP_x) // XORL R10, R12 - self.Emit("XORL", _SP_f, _SP_f) // XORL R11, R13 - self.Emit("XORL", _SP_q, _SP_q) // XORL R13, R11 + self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP + self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP) + self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP + self.Emit("MOVQ", _AX, _ARG_rb) // MOVQ AX, rb<>+0(FP) + self.Emit("MOVQ", _BX, _ARG_vp) // MOVQ BX, vp<>+8(FP) + self.Emit("MOVQ", _CX, _ARG_sb) // MOVQ CX, sb<>+16(FP) + self.Emit("MOVQ", _DI, _ARG_fv) // MOVQ DI, rb<>+24(FP) + self.Emit("MOVQ", jit.Ptr(_AX, 0), _RP) // MOVQ (AX) , DI + self.Emit("MOVQ", jit.Ptr(_AX, 8), _RL) // MOVQ 8(AX) , SI + self.Emit("MOVQ", jit.Ptr(_AX, 16), _RC) // MOVQ 16(AX), DX + self.Emit("MOVQ", _BX, _SP_p) // MOVQ BX, R10 + self.Emit("MOVQ", _CX, _ST) // MOVQ CX, R8 + self.Emit("XORL", _SP_x, _SP_x) // XORL R10, R12 + self.Emit("XORL", _SP_f, _SP_f) // XORL R11, R13 + self.Emit("XORL", _SP_q, _SP_q) // XORL R13, R11 } /** Assembler Inline Functions **/ func (self *_Assembler) xsave(reg ...obj.Addr) { - for i, v := range reg { - if i > _FP_saves / 8 - 1 { - panic("too many registers to save") - } else { - self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + int64(i) * 8)) - } - } + for i, v := range reg { + if i > _FP_saves/8-1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs+int64(i)*8)) + } + } } func (self *_Assembler) xload(reg ...obj.Addr) { - for i, v := range reg { - if i > _FP_saves / 8 - 1 { - panic("too many registers to load") - } else { - self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + int64(i) * 8), v) - } - } + for i, v := range reg { + if i > _FP_saves/8-1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs+int64(i)*8), v) + } + } } func (self *_Assembler) rbuf_di() { - if _RP.Reg != x86.REG_DI { - panic("register allocation messed up: RP != DI") - } else { - self.Emit("ADDQ", _RL, _RP) - } + if _RP.Reg != x86.REG_DI { + panic("register allocation messed up: RP != DI") + } else { + self.Emit("ADDQ", _RL, _RP) + } } func (self *_Assembler) store_int(nd int, fn obj.Addr, ins string) { - self.check_size(nd) - self.save_c() // SAVE $C_regs - self.rbuf_di() // MOVQ RP, DI - self.Emit(ins, jit.Ptr(_SP_p, 0), _SI) // $ins (SP.p), SI - self.call_c(fn) // CALL_C $fn - self.Emit("ADDQ", _AX, _RL) // ADDQ AX, RL + self.check_size(nd) + self.save_c() // SAVE $C_regs + self.rbuf_di() // MOVQ RP, DI + self.Emit(ins, jit.Ptr(_SP_p, 0), _SI) // $ins (SP.p), SI + self.call_c(fn) // CALL_C $fn + self.Emit("ADDQ", _AX, _RL) // ADDQ AX, RL } func (self *_Assembler) store_str(s string) { - i := 0 - m := rt.Str2Mem(s) - - /* 8-byte stores */ - for i <= len(m) - 8 { - self.Emit("MOVQ", jit.Imm(rt.Get64(m[i:])), _AX) // MOVQ $s[i:], AX - self.Emit("MOVQ", _AX, jit.Sib(_RP, _RL, 1, int64(i))) // MOVQ AX, i(RP)(RL) - i += 8 - } - - /* 4-byte stores */ - if i <= len(m) - 4 { - self.Emit("MOVL", jit.Imm(int64(rt.Get32(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVL $s[i:], i(RP)(RL) - i += 4 - } - - /* 2-byte stores */ - if i <= len(m) - 2 { - self.Emit("MOVW", jit.Imm(int64(rt.Get16(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVW $s[i:], i(RP)(RL) - i += 2 - } - - /* last byte */ - if i < len(m) { - self.Emit("MOVB", jit.Imm(int64(m[i])), jit.Sib(_RP, _RL, 1, int64(i))) // MOVB $s[i:], i(RP)(RL) - } + i := 0 + m := rt.Str2Mem(s) + + /* 8-byte stores */ + for i <= len(m)-8 { + self.Emit("MOVQ", jit.Imm(rt.Get64(m[i:])), _AX) // MOVQ $s[i:], AX + self.Emit("MOVQ", _AX, jit.Sib(_RP, _RL, 1, int64(i))) // MOVQ AX, i(RP)(RL) + i += 8 + } + + /* 4-byte stores */ + if i <= len(m)-4 { + self.Emit("MOVL", jit.Imm(int64(rt.Get32(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVL $s[i:], i(RP)(RL) + i += 4 + } + + /* 2-byte stores */ + if i <= len(m)-2 { + self.Emit("MOVW", jit.Imm(int64(rt.Get16(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVW $s[i:], i(RP)(RL) + i += 2 + } + + /* last byte */ + if i < len(m) { + self.Emit("MOVB", jit.Imm(int64(m[i])), jit.Sib(_RP, _RL, 1, int64(i))) // MOVB $s[i:], i(RP)(RL) + } } func (self *_Assembler) check_size(n int) { - self.check_size_rl(jit.Ptr(_RL, int64(n))) + self.check_size_rl(jit.Ptr(_RL, int64(n))) } func (self *_Assembler) check_size_r(r obj.Addr, d int) { - self.check_size_rl(jit.Sib(_RL, r, 1, int64(d))) + self.check_size_rl(jit.Sib(_RL, r, 1, int64(d))) } func (self *_Assembler) check_size_rl(v obj.Addr) { - idx := self.x - key := _LB_more_space_return + strconv.Itoa(idx) + idx := self.x + key := _LB_more_space_return + strconv.Itoa(idx) - /* the following code relies on LR == R9 to work */ - if _LR.Reg != x86.REG_R9 { - panic("register allocation messed up: LR != R9") - } + /* the following code relies on LR == R9 to work */ + if _LR.Reg != x86.REG_R9 { + panic("register allocation messed up: LR != R9") + } - /* check for buffer capacity */ - self.x++ - self.Emit("LEAQ", v, _AX) // LEAQ $v, AX - self.Emit("CMPQ", _AX, _RC) // CMPQ AX, RC - self.Sjmp("JBE" , key) // JBE _more_space_return_{n} - self.slice_grow_ax(key) // GROW $key - self.Link(key) // _more_space_return_{n}: + /* check for buffer capacity */ + self.x++ + self.Emit("LEAQ", v, _AX) // LEAQ $v, AX + self.Emit("CMPQ", _AX, _RC) // CMPQ AX, RC + self.Sjmp("JBE", key) // JBE _more_space_return_{n} + self.slice_grow_ax(key) // GROW $key + self.Link(key) // _more_space_return_{n}: } func (self *_Assembler) slice_grow_ax(ret string) { - self.Byte(0x4c, 0x8d, 0x0d) // LEAQ ?(PC), R9 - self.Sref(ret, 4) // .... &ret - self.Sjmp("JMP" , _LB_more_space) // JMP _more_space + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ ?(PC), R9 + self.Sref(ret, 4) // .... &ret + self.Sjmp("JMP", _LB_more_space) // JMP _more_space } /** State Stack Helpers **/ const ( - _StateSize = int64(unsafe.Sizeof(_State{})) - _StackLimit = _MaxStack * _StateSize + _StateSize = int64(unsafe.Sizeof(_State{})) + _StackLimit = _MaxStack * _StateSize ) func (self *_Assembler) save_state() { - self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX - self.Emit("LEAQ", jit.Ptr(_CX, _StateSize), _R9) // LEAQ _StateSize(CX), R9 - self.Emit("CMPQ", _R9, jit.Imm(_StackLimit)) // CMPQ R9, $_StackLimit - self.Sjmp("JAE" , _LB_error_too_deep) // JA _error_too_deep - self.Emit("MOVQ", _SP_x, jit.Sib(_ST, _CX, 1, 8)) // MOVQ SP.x, 8(ST)(CX) - self.Emit("MOVQ", _SP_f, jit.Sib(_ST, _CX, 1, 16)) // MOVQ SP.f, 16(ST)(CX) - self.WriteRecNotAX(0, _SP_p, jit.Sib(_ST, _CX, 1, 24)) // MOVQ SP.p, 24(ST)(CX) - self.WriteRecNotAX(1, _SP_q, jit.Sib(_ST, _CX, 1, 32)) // MOVQ SP.q, 32(ST)(CX) - self.Emit("MOVQ", _R9, jit.Ptr(_ST, 0)) // MOVQ R9, (ST) + self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX + self.Emit("LEAQ", jit.Ptr(_CX, _StateSize), _R9) // LEAQ _StateSize(CX), R9 + self.Emit("CMPQ", _R9, jit.Imm(_StackLimit)) // CMPQ R9, $_StackLimit + self.Sjmp("JAE", _LB_error_too_deep) // JA _error_too_deep + self.Emit("MOVQ", _SP_x, jit.Sib(_ST, _CX, 1, 8)) // MOVQ SP.x, 8(ST)(CX) + self.Emit("MOVQ", _SP_f, jit.Sib(_ST, _CX, 1, 16)) // MOVQ SP.f, 16(ST)(CX) + self.WriteRecNotAX(0, _SP_p, jit.Sib(_ST, _CX, 1, 24)) // MOVQ SP.p, 24(ST)(CX) + self.WriteRecNotAX(1, _SP_q, jit.Sib(_ST, _CX, 1, 32)) // MOVQ SP.q, 32(ST)(CX) + self.Emit("MOVQ", _R9, jit.Ptr(_ST, 0)) // MOVQ R9, (ST) } func (self *_Assembler) drop_state(decr int64) { - self.Emit("MOVQ" , jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX - self.Emit("SUBQ" , jit.Imm(decr), _AX) // SUBQ $decr, AX - self.Emit("MOVQ" , _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) - self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 8), _SP_x) // MOVQ 8(ST)(AX), SP.x - self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 16), _SP_f) // MOVQ 16(ST)(AX), SP.f - self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 24), _SP_p) // MOVQ 24(ST)(AX), SP.p - self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 32), _SP_q) // MOVQ 32(ST)(AX), SP.q - self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 - self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX) - self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 24)) // MOVOU X0, 24(ST)(AX) + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("SUBQ", jit.Imm(decr), _AX) // SUBQ $decr, AX + self.Emit("MOVQ", _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 8), _SP_x) // MOVQ 8(ST)(AX), SP.x + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 16), _SP_f) // MOVQ 16(ST)(AX), SP.f + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 24), _SP_p) // MOVQ 24(ST)(AX), SP.p + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 32), _SP_q) // MOVQ 32(ST)(AX), SP.q + self.Emit("PXOR", _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX) + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 24)) // MOVOU X0, 24(ST)(AX) } /** Buffer Helpers **/ func (self *_Assembler) add_char(ch byte) { - self.Emit("MOVB", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVB $ch, (RP)(RL) - self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL + self.Emit("MOVB", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVB $ch, (RP)(RL) + self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL } func (self *_Assembler) add_long(ch uint32, n int64) { - self.Emit("MOVL", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVL $ch, (RP)(RL) - self.Emit("ADDQ", jit.Imm(n), _RL) // ADDQ $n, RL + self.Emit("MOVL", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVL $ch, (RP)(RL) + self.Emit("ADDQ", jit.Imm(n), _RL) // ADDQ $n, RL } func (self *_Assembler) add_text(ss string) { - self.store_str(ss) // TEXT $ss - self.Emit("ADDQ", jit.Imm(int64(len(ss))), _RL) // ADDQ ${len(ss)}, RL + self.store_str(ss) // TEXT $ss + self.Emit("ADDQ", jit.Imm(int64(len(ss))), _RL) // ADDQ ${len(ss)}, RL } // get *buf at AX func (self *_Assembler) prep_buffer_AX() { - self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX - self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX) + self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX + self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX) } func (self *_Assembler) save_buffer() { - self.Emit("MOVQ", _ARG_rb, _CX) // MOVQ rb<>+0(FP), CX - self.Emit("MOVQ", _RP, jit.Ptr(_CX, 0)) // MOVQ RP, (CX) - self.Emit("MOVQ", _RL, jit.Ptr(_CX, 8)) // MOVQ RL, 8(CX) - self.Emit("MOVQ", _RC, jit.Ptr(_CX, 16)) // MOVQ RC, 16(CX) + self.Emit("MOVQ", _ARG_rb, _CX) // MOVQ rb<>+0(FP), CX + self.Emit("MOVQ", _RP, jit.Ptr(_CX, 0)) // MOVQ RP, (CX) + self.Emit("MOVQ", _RL, jit.Ptr(_CX, 8)) // MOVQ RL, 8(CX) + self.Emit("MOVQ", _RC, jit.Ptr(_CX, 16)) // MOVQ RC, 16(CX) } // get *buf at AX func (self *_Assembler) load_buffer_AX() { - self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX - self.Emit("MOVQ", jit.Ptr(_AX, 0), _RP) // MOVQ (AX), RP - self.Emit("MOVQ", jit.Ptr(_AX, 8), _RL) // MOVQ 8(AX), RL - self.Emit("MOVQ", jit.Ptr(_AX, 16), _RC) // MOVQ 16(AX), RC + self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX + self.Emit("MOVQ", jit.Ptr(_AX, 0), _RP) // MOVQ (AX), RP + self.Emit("MOVQ", jit.Ptr(_AX, 8), _RL) // MOVQ 8(AX), RL + self.Emit("MOVQ", jit.Ptr(_AX, 16), _RC) // MOVQ 16(AX), RC } /** Function Interface Helpers **/ func (self *_Assembler) call(pc obj.Addr) { - self.Emit("MOVQ", pc, _LR) // MOVQ $pc, AX - self.Rjmp("CALL", _LR) // CALL AX + self.Emit("MOVQ", pc, _LR) // MOVQ $pc, AX + self.Rjmp("CALL", _LR) // CALL AX } func (self *_Assembler) save_c() { - self.xsave(_REG_ffi...) // SAVE $REG_ffi + self.xsave(_REG_ffi...) // SAVE $REG_ffi } func (self *_Assembler) call_b64(pc obj.Addr) { - self.xsave(_REG_b64...) // SAVE $REG_all - self.call(pc) // CALL $pc - self.xload(_REG_b64...) // LOAD $REG_ffi + self.xsave(_REG_b64...) // SAVE $REG_all + self.call(pc) // CALL $pc + self.xload(_REG_b64...) // LOAD $REG_ffi } func (self *_Assembler) call_c(pc obj.Addr) { - self.Emit("XCHGQ", _SP_p, _BX) - self.Emit("XCHGQ", _SP_q, _BP) - self.call(pc) // CALL $pc - self.xload(_REG_ffi...) // LOAD $REG_ffi - self.Emit("XCHGQ", _SP_p, _BX) - self.Emit("XCHGQ", _SP_q, _BP) + self.Emit("XCHGQ", _SP_p, _BX) + self.Emit("XCHGQ", _SP_q, _BP) + self.call(pc) // CALL $pc + self.xload(_REG_ffi...) // LOAD $REG_ffi + self.Emit("XCHGQ", _SP_p, _BX) + self.Emit("XCHGQ", _SP_q, _BP) } func (self *_Assembler) call_go(pc obj.Addr) { - self.xsave(_REG_all...) // SAVE $REG_all - self.call(pc) // CALL $pc - self.xload(_REG_all...) // LOAD $REG_all + self.xsave(_REG_all...) // SAVE $REG_all + self.call(pc) // CALL $pc + self.xload(_REG_all...) // LOAD $REG_all } func (self *_Assembler) call_more_space(pc obj.Addr) { - self.xsave(_REG_ms...) // SAVE $REG_all - self.call(pc) // CALL $pc - self.xload(_REG_ms...) // LOAD $REG_all + self.xsave(_REG_ms...) // SAVE $REG_all + self.call(pc) // CALL $pc + self.xload(_REG_ms...) // LOAD $REG_all } func (self *_Assembler) call_encoder(pc obj.Addr) { - self.xsave(_REG_enc...) // SAVE $REG_all - self.call(pc) // CALL $pc - self.xload(_REG_enc...) // LOAD $REG_all + self.xsave(_REG_enc...) // SAVE $REG_all + self.call(pc) // CALL $pc + self.xload(_REG_enc...) // LOAD $REG_all } func (self *_Assembler) call_marshaler(fn obj.Addr, it *rt.GoType, vt reflect.Type) { - switch vt.Kind() { - case reflect.Interface : self.call_marshaler_i(fn, it) - case reflect.Ptr, reflect.Map : self.call_marshaler_v(fn, it, vt, true) - // struct/array of 1 direct iface type can be direct - default : self.call_marshaler_v(fn, it, vt, !rt.UnpackType(vt).Indirect()) - } + switch vt.Kind() { + case reflect.Interface: + self.call_marshaler_i(fn, it) + case reflect.Ptr, reflect.Map: + self.call_marshaler_v(fn, it, vt, true) + // struct/array of 1 direct iface type can be direct + default: + self.call_marshaler_v(fn, it, vt, !rt.UnpackType(vt).Indirect()) + } } func (self *_Assembler) call_marshaler_i(fn obj.Addr, it *rt.GoType) { - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JZ" , "_null_{n}") // JZ _null_{n} - self.Emit("MOVQ" , _AX, _BX) // MOVQ AX, BX - self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _CX) // MOVQ 8(SP.p), CX - self.Emit("MOVQ" , jit.Gtype(it), _AX) // MOVQ $it, AX - self.call_go(_F_assertI2I) // CALL_GO assertI2I - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JZ" , "_null_{n}") // JZ _null_{n} - self.Emit("MOVQ", _BX, _CX) // MOVQ BX, CX - self.Emit("MOVQ", _AX, _BX) // MOVQ AX, BX - self.prep_buffer_AX() - self.Emit("MOVQ", _ARG_fv, _DI) // MOVQ ARG.fv, DI - self.call_go(fn) // CALL $fn - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error - self.load_buffer_AX() - self.Sjmp("JMP" , "_done_{n}") // JMP _done_{n} - self.Link("_null_{n}") // _null_{n}: - self.check_size(4) // SIZE $4 - self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1) - self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL - self.Link("_done_{n}") // _done_{n}: + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ", "_null_{n}") // JZ _null_{n} + self.Emit("MOVQ", _AX, _BX) // MOVQ AX, BX + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _CX) // MOVQ 8(SP.p), CX + self.Emit("MOVQ", jit.Gtype(it), _AX) // MOVQ $it, AX + self.call_go(_F_assertI2I) // CALL_GO assertI2I + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ", "_null_{n}") // JZ _null_{n} + self.Emit("MOVQ", _BX, _CX) // MOVQ BX, CX + self.Emit("MOVQ", _AX, _BX) // MOVQ AX, BX + self.prep_buffer_AX() + self.Emit("MOVQ", _ARG_fv, _DI) // MOVQ ARG.fv, DI + self.call_go(fn) // CALL $fn + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error + self.load_buffer_AX() + self.Sjmp("JMP", "_done_{n}") // JMP _done_{n} + self.Link("_null_{n}") // _null_{n}: + self.check_size(4) // SIZE $4 + self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL + self.Link("_done_{n}") // _done_{n}: } func (self *_Assembler) call_marshaler_v(fn obj.Addr, it *rt.GoType, vt reflect.Type, deref bool) { - self.prep_buffer_AX() // MOVE {buf}, (SP) - self.Emit("MOVQ", jit.Itab(it, vt), _BX) // MOVQ $(itab(it, vt)), BX + self.prep_buffer_AX() // MOVE {buf}, (SP) + self.Emit("MOVQ", jit.Itab(it, vt), _BX) // MOVQ $(itab(it, vt)), BX - /* dereference the pointer if needed */ - if !deref { - self.Emit("MOVQ", _SP_p, _CX) // MOVQ SP.p, CX - } else { - self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _CX) // MOVQ 0(SP.p), CX - } + /* dereference the pointer if needed */ + if !deref { + self.Emit("MOVQ", _SP_p, _CX) // MOVQ SP.p, CX + } else { + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _CX) // MOVQ 0(SP.p), CX + } - /* call the encoder, and perform error checks */ - self.Emit("MOVQ", _ARG_fv, _DI) // MOVQ ARG.fv, DI - self.call_go(fn) // CALL $fn - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error - self.load_buffer_AX() + /* call the encoder, and perform error checks */ + self.Emit("MOVQ", _ARG_fv, _DI) // MOVQ ARG.fv, DI + self.call_go(fn) // CALL $fn + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error + self.load_buffer_AX() } /** Builtin: _more_space **/ var ( - _T_byte = jit.Type(byteType) - _F_growslice = jit.Func(growslice) + _T_byte = jit.Type(byteType) + _F_growslice = jit.Func(growslice) ) -// AX must saving n +// AX must saving n func (self *_Assembler) more_space() { - self.Link(_LB_more_space) - self.Emit("MOVQ", _RP, _BX) // MOVQ DI, BX - self.Emit("MOVQ", _RL, _CX) // MOVQ SI, CX - self.Emit("MOVQ", _RC, _DI) // MOVQ DX, DI - self.Emit("MOVQ", _AX, _SI) // MOVQ AX, SI - self.Emit("MOVQ", _T_byte, _AX) // MOVQ $_T_byte, AX - self.call_more_space(_F_growslice) // CALL $pc - self.Emit("MOVQ", _AX, _RP) // MOVQ AX, DI - self.Emit("MOVQ", _BX, _RL) // MOVQ BX, SI - self.Emit("MOVQ", _CX, _RC) // MOVQ CX, DX - self.save_buffer() // SAVE {buf} - self.Rjmp("JMP" , _LR) // JMP LR + self.Link(_LB_more_space) + self.Emit("MOVQ", _RP, _BX) // MOVQ DI, BX + self.Emit("MOVQ", _RL, _CX) // MOVQ SI, CX + self.Emit("MOVQ", _RC, _DI) // MOVQ DX, DI + self.Emit("MOVQ", _AX, _SI) // MOVQ AX, SI + self.Emit("MOVQ", _T_byte, _AX) // MOVQ $_T_byte, AX + self.call_more_space(_F_growslice) // CALL $pc + self.Emit("MOVQ", _AX, _RP) // MOVQ AX, DI + self.Emit("MOVQ", _BX, _RL) // MOVQ BX, SI + self.Emit("MOVQ", _CX, _RC) // MOVQ CX, DX + self.save_buffer() // SAVE {buf} + self.Rjmp("JMP", _LR) // JMP LR } /** Builtin Errors **/ var ( - _V_ERR_too_deep = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_too_deep)))) - _V_ERR_nan_or_infinite = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_nan_or_infinite)))) - _I_json_UnsupportedValueError = jit.Itab(rt.UnpackType(errorType), jsonUnsupportedValueType) + _V_ERR_too_deep = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_too_deep)))) + _V_ERR_nan_or_infinite = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_nan_or_infinite)))) + _I_json_UnsupportedValueError = jit.Itab(rt.UnpackType(errorType), jsonUnsupportedValueType) ) func (self *_Assembler) error_too_deep() { - self.Link(_LB_error_too_deep) - self.Emit("MOVQ", _V_ERR_too_deep, _EP) // MOVQ $_V_ERR_too_deep, EP - self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_error_too_deep) + self.Emit("MOVQ", _V_ERR_too_deep, _EP) // MOVQ $_V_ERR_too_deep, EP + self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET + self.Sjmp("JMP", _LB_error) // JMP _error } func (self *_Assembler) error_invalid_number() { - self.Link(_LB_error_invalid_number) - self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ 0(SP), AX - self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _BX) // MOVQ 8(SP), BX - self.call_go(_F_error_number) // CALL_GO error_number - self.Sjmp("JMP" , _LB_error) // JMP _error + self.Link(_LB_error_invalid_number) + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ 0(SP), AX + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _BX) // MOVQ 8(SP), BX + self.call_go(_F_error_number) // CALL_GO error_number + self.Sjmp("JMP", _LB_error) // JMP _error } -func (self *_Assembler) error_nan_or_infinite() { - self.Link(_LB_error_nan_or_infinite) - self.Emit("MOVQ", _V_ERR_nan_or_infinite, _EP) // MOVQ $_V_ERR_nan_or_infinite, EP - self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET - self.Sjmp("JMP" , _LB_error) // JMP _error +func (self *_Assembler) error_nan_or_infinite() { + self.Link(_LB_error_nan_or_infinite) + self.Emit("MOVQ", _V_ERR_nan_or_infinite, _EP) // MOVQ $_V_ERR_nan_or_infinite, EP + self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET + self.Sjmp("JMP", _LB_error) // JMP _error } /** String Encoding Routine **/ var ( - _F_quote = jit.Imm(int64(native.S_quote)) - _F_panic = jit.Func(goPanic) + _F_quote = jit.Imm(int64(native.S_quote)) + _F_panic = jit.Func(goPanic) ) func (self *_Assembler) go_panic() { - self.Link(_LB_panic) - self.Emit("MOVQ", _SP_p, _BX) - self.call_go(_F_panic) -} - -func (self *_Assembler) encode_string(doubleQuote bool) { - self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JZ" , "_str_empty_{n}") // JZ _str_empty_{n} - self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) - self.Sjmp("JNE" , "_str_next_{n}") - self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), _AX) - self.Sjmp("JMP", _LB_panic) - self.Link("_str_next_{n}") - - /* openning quote, check for double quote */ - if !doubleQuote { - self.check_size_r(_AX, 2) // SIZE $2 - self.add_char('"') // CHAR $'"' - } else { - self.check_size_r(_AX, 6) // SIZE $6 - self.add_long(_IM_open, 3) // TEXT $`"\"` - } - - /* quoting loop */ - self.Emit("XORL", _AX, _AX) // XORL AX, AX - self.Emit("MOVQ", _AX, _VAR_sp) // MOVQ AX, sp - self.Link("_str_loop_{n}") // _str_loop_{n}: - self.save_c() // SAVE $REG_ffi - - /* load the output buffer first, and then input buffer, - * because the parameter registers collide with RP / RL / RC */ - self.Emit("MOVQ", _RC, _CX) // MOVQ RC, CX - self.Emit("SUBQ", _RL, _CX) // SUBQ RL, CX - self.Emit("MOVQ", _CX, _VAR_dn) // MOVQ CX, dn - self.Emit("LEAQ", jit.Sib(_RP, _RL, 1, 0), _DX) // LEAQ (RP)(RL), DX - self.Emit("LEAQ", _VAR_dn, _CX) // LEAQ dn, CX - self.Emit("MOVQ", _VAR_sp, _AX) // MOVQ sp, AX - self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _DI) // MOVQ (SP.p), DI - self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _SI) // MOVQ 8(SP.p), SI - self.Emit("ADDQ", _AX, _DI) // ADDQ AX, DI - self.Emit("SUBQ", _AX, _SI) // SUBQ AX, SI - - /* set the flags based on `doubleQuote` */ - if !doubleQuote { - self.Emit("XORL", _R8, _R8) // XORL R8, R8 - } else { - self.Emit("MOVL", jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8 - } - - /* call the native quoter */ - self.call_c(_F_quote) // CALL quote - self.Emit("ADDQ" , _VAR_dn, _RL) // ADDQ dn, RL - - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JS" , "_str_space_{n}") // JS _str_space_{n} - - /* close the string, check for double quote */ - if !doubleQuote { - self.check_size(1) // SIZE $1 - self.add_char('"') // CHAR $'"' - self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n} - } else { - self.check_size(3) // SIZE $3 - self.add_text("\\\"\"") // TEXT $'\""' - self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n} - } - - /* not enough space to contain the quoted string */ - self.Link("_str_space_{n}") // _str_space_{n}: - self.Emit("NOTQ", _AX) // NOTQ AX - self.Emit("ADDQ", _AX, _VAR_sp) // ADDQ AX, sp - self.Emit("LEAQ", jit.Sib(_RC, _RC, 1, 0), _AX) // LEAQ (RC)(RC), AX - self.slice_grow_ax("_str_loop_{n}") // GROW _str_loop_{n} - - /* empty string, check for double quote */ - if !doubleQuote { - self.Link("_str_empty_{n}") // _str_empty_{n}: - self.check_size(2) // SIZE $2 - self.add_text("\"\"") // TEXT $'""' - self.Link("_str_end_{n}") // _str_end_{n}: - } else { - self.Link("_str_empty_{n}") // _str_empty_{n}: - self.check_size(6) // SIZE $6 - self.add_text("\"\\\"\\\"\"") // TEXT $'"\"\""' - self.Link("_str_end_{n}") // _str_end_{n}: - } + self.Link(_LB_panic) + self.Emit("MOVQ", _SP_p, _BX) + self.call_go(_F_panic) +} + +func (self *_Assembler) encode_string(doubleQuote bool) { + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ", "_str_empty_{n}") // JZ _str_empty_{n} + self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) + self.Sjmp("JNE", "_str_next_{n}") + self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), _AX) + self.Sjmp("JMP", _LB_panic) + self.Link("_str_next_{n}") + + /* openning quote, check for double quote */ + if !doubleQuote { + self.check_size_r(_AX, 2) // SIZE $2 + self.add_char('"') // CHAR $'"' + } else { + self.check_size_r(_AX, 6) // SIZE $6 + self.add_long(_IM_open, 3) // TEXT $`"\"` + } + + /* quoting loop */ + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, _VAR_sp) // MOVQ AX, sp + self.Link("_str_loop_{n}") // _str_loop_{n}: + self.save_c() // SAVE $REG_ffi + + /* load the output buffer first, and then input buffer, + * because the parameter registers collide with RP / RL / RC */ + self.Emit("MOVQ", _RC, _CX) // MOVQ RC, CX + self.Emit("SUBQ", _RL, _CX) // SUBQ RL, CX + self.Emit("MOVQ", _CX, _VAR_dn) // MOVQ CX, dn + self.Emit("LEAQ", jit.Sib(_RP, _RL, 1, 0), _DX) // LEAQ (RP)(RL), DX + self.Emit("LEAQ", _VAR_dn, _CX) // LEAQ dn, CX + self.Emit("MOVQ", _VAR_sp, _AX) // MOVQ sp, AX + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _DI) // MOVQ (SP.p), DI + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _SI) // MOVQ 8(SP.p), SI + self.Emit("ADDQ", _AX, _DI) // ADDQ AX, DI + self.Emit("SUBQ", _AX, _SI) // SUBQ AX, SI + + /* set the flags based on `doubleQuote` */ + if !doubleQuote { + self.Emit("XORL", _R8, _R8) // XORL R8, R8 + } else { + self.Emit("MOVL", jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8 + } + + /* call the native quoter */ + self.call_c(_F_quote) // CALL quote + self.Emit("ADDQ", _VAR_dn, _RL) // ADDQ dn, RL + + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS", "_str_space_{n}") // JS _str_space_{n} + + /* close the string, check for double quote */ + if !doubleQuote { + self.check_size(1) // SIZE $1 + self.add_char('"') // CHAR $'"' + self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n} + } else { + self.check_size(3) // SIZE $3 + self.add_text("\\\"\"") // TEXT $'\""' + self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n} + } + + /* not enough space to contain the quoted string */ + self.Link("_str_space_{n}") // _str_space_{n}: + self.Emit("NOTQ", _AX) // NOTQ AX + self.Emit("ADDQ", _AX, _VAR_sp) // ADDQ AX, sp + self.Emit("LEAQ", jit.Sib(_RC, _RC, 1, 0), _AX) // LEAQ (RC)(RC), AX + self.slice_grow_ax("_str_loop_{n}") // GROW _str_loop_{n} + + /* empty string, check for double quote */ + if !doubleQuote { + self.Link("_str_empty_{n}") // _str_empty_{n}: + self.check_size(2) // SIZE $2 + self.add_text("\"\"") // TEXT $'""' + self.Link("_str_end_{n}") // _str_end_{n}: + } else { + self.Link("_str_empty_{n}") // _str_empty_{n}: + self.check_size(6) // SIZE $6 + self.add_text("\"\\\"\\\"\"") // TEXT $'"\"\""' + self.Link("_str_end_{n}") // _str_end_{n}: + } } /** OpCode Assembler Functions **/ var ( - _T_json_Marshaler = rt.UnpackType(jsonMarshalerType) - _T_encoding_TextMarshaler = rt.UnpackType(encodingTextMarshalerType) + _T_json_Marshaler = rt.UnpackType(jsonMarshalerType) + _T_encoding_TextMarshaler = rt.UnpackType(encodingTextMarshalerType) ) var ( - _F_f64toa = jit.Imm(int64(native.S_f64toa)) - _F_f32toa = jit.Imm(int64(native.S_f32toa)) - _F_i64toa = jit.Imm(int64(native.S_i64toa)) - _F_u64toa = jit.Imm(int64(native.S_u64toa)) - _F_b64encode = jit.Imm(int64(_subr__b64encode)) + _F_f64toa = jit.Imm(int64(native.S_f64toa)) + _F_f32toa = jit.Imm(int64(native.S_f32toa)) + _F_i64toa = jit.Imm(int64(native.S_i64toa)) + _F_u64toa = jit.Imm(int64(native.S_u64toa)) + _F_b64encode = jit.Imm(int64(_subr__b64encode)) ) var ( - _F_memmove = jit.Func(memmove) - _F_error_number = jit.Func(error_number) - _F_isValidNumber = jit.Func(isValidNumber) + _F_memmove = jit.Func(memmove) + _F_error_number = jit.Func(error_number) + _F_isValidNumber = jit.Func(isValidNumber) ) var ( - _F_iteratorStop = jit.Func(iteratorStop) - _F_iteratorNext = jit.Func(iteratorNext) - _F_iteratorStart = jit.Func(iteratorStart) + _F_iteratorStop = jit.Func(iteratorStop) + _F_iteratorNext = jit.Func(iteratorNext) + _F_iteratorStart = jit.Func(iteratorStart) ) var ( - _F_encodeTypedPointer obj.Addr - _F_encodeJsonMarshaler obj.Addr - _F_encodeTextMarshaler obj.Addr + _F_encodeTypedPointer obj.Addr + _F_encodeJsonMarshaler obj.Addr + _F_encodeTextMarshaler obj.Addr ) const ( - _MODE_AVX2 = 1 << 2 + _MODE_AVX2 = 1 << 2 ) func init() { - _F_encodeTypedPointer = jit.Func(encodeTypedPointer) - _F_encodeJsonMarshaler = jit.Func(encodeJsonMarshaler) - _F_encodeTextMarshaler = jit.Func(encodeTextMarshaler) + _F_encodeTypedPointer = jit.Func(encodeTypedPointer) + _F_encodeJsonMarshaler = jit.Func(encodeJsonMarshaler) + _F_encodeTextMarshaler = jit.Func(encodeTextMarshaler) } func (self *_Assembler) _asm_OP_null(_ *_Instr) { - self.check_size(4) - self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1) - self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL + self.check_size(4) + self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL } func (self *_Assembler) _asm_OP_empty_arr(_ *_Instr) { - self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv) - self.Sjmp("JC", "_empty_arr_{n}") - self._asm_OP_null(nil) - self.Sjmp("JMP", "_empty_arr_end_{n}") - self.Link("_empty_arr_{n}") - self.check_size(2) - self.Emit("MOVW", jit.Imm(_IM_array), jit.Sib(_RP, _RL, 1, 0)) - self.Emit("ADDQ", jit.Imm(2), _RL) - self.Link("_empty_arr_end_{n}") + self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv) + self.Sjmp("JC", "_empty_arr_{n}") + self._asm_OP_null(nil) + self.Sjmp("JMP", "_empty_arr_end_{n}") + self.Link("_empty_arr_{n}") + self.check_size(2) + self.Emit("MOVW", jit.Imm(_IM_array), jit.Sib(_RP, _RL, 1, 0)) + self.Emit("ADDQ", jit.Imm(2), _RL) + self.Link("_empty_arr_end_{n}") } func (self *_Assembler) _asm_OP_empty_obj(_ *_Instr) { - self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv) - self.Sjmp("JC", "_empty_obj_{n}") - self._asm_OP_null(nil) - self.Sjmp("JMP", "_empty_obj_end_{n}") - self.Link("_empty_obj_{n}") - self.check_size(2) - self.Emit("MOVW", jit.Imm(_IM_object), jit.Sib(_RP, _RL, 1, 0)) - self.Emit("ADDQ", jit.Imm(2), _RL) - self.Link("_empty_obj_end_{n}") + self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv) + self.Sjmp("JC", "_empty_obj_{n}") + self._asm_OP_null(nil) + self.Sjmp("JMP", "_empty_obj_end_{n}") + self.Link("_empty_obj_{n}") + self.check_size(2) + self.Emit("MOVW", jit.Imm(_IM_object), jit.Sib(_RP, _RL, 1, 0)) + self.Emit("ADDQ", jit.Imm(2), _RL) + self.Link("_empty_obj_end_{n}") } func (self *_Assembler) _asm_OP_bool(_ *_Instr) { - self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0 - self.Sjmp("JE" , "_false_{n}") // JE _false_{n} - self.check_size(4) // SIZE $4 - self.Emit("MOVL", jit.Imm(_IM_true), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'true', (RP)(RL*1) - self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL - self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n} - self.Link("_false_{n}") // _false_{n}: - self.check_size(5) // SIZE $5 - self.Emit("MOVL", jit.Imm(_IM_fals), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'fals', (RP)(RL*1) - self.Emit("MOVB", jit.Imm('e'), jit.Sib(_RP, _RL, 1, 4)) // MOVB $'e', 4(RP)(RL*1) - self.Emit("ADDQ", jit.Imm(5), _RL) // ADDQ $5, RL - self.Link("_end_{n}") // _end_{n}: + self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0 + self.Sjmp("JE", "_false_{n}") // JE _false_{n} + self.check_size(4) // SIZE $4 + self.Emit("MOVL", jit.Imm(_IM_true), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'true', (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL + self.Sjmp("JMP", "_end_{n}") // JMP _end_{n} + self.Link("_false_{n}") // _false_{n}: + self.check_size(5) // SIZE $5 + self.Emit("MOVL", jit.Imm(_IM_fals), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'fals', (RP)(RL*1) + self.Emit("MOVB", jit.Imm('e'), jit.Sib(_RP, _RL, 1, 4)) // MOVB $'e', 4(RP)(RL*1) + self.Emit("ADDQ", jit.Imm(5), _RL) // ADDQ $5, RL + self.Link("_end_{n}") // _end_{n}: } func (self *_Assembler) _asm_OP_i8(_ *_Instr) { - self.store_int(4, _F_i64toa, "MOVBQSX") + self.store_int(4, _F_i64toa, "MOVBQSX") } func (self *_Assembler) _asm_OP_i16(_ *_Instr) { - self.store_int(6, _F_i64toa, "MOVWQSX") + self.store_int(6, _F_i64toa, "MOVWQSX") } func (self *_Assembler) _asm_OP_i32(_ *_Instr) { - self.store_int(17, _F_i64toa, "MOVLQSX") + self.store_int(17, _F_i64toa, "MOVLQSX") } func (self *_Assembler) _asm_OP_i64(_ *_Instr) { - self.store_int(21, _F_i64toa, "MOVQ") + self.store_int(21, _F_i64toa, "MOVQ") } func (self *_Assembler) _asm_OP_u8(_ *_Instr) { - self.store_int(3, _F_u64toa, "MOVBQZX") + self.store_int(3, _F_u64toa, "MOVBQZX") } func (self *_Assembler) _asm_OP_u16(_ *_Instr) { - self.store_int(5, _F_u64toa, "MOVWQZX") + self.store_int(5, _F_u64toa, "MOVWQZX") } func (self *_Assembler) _asm_OP_u32(_ *_Instr) { - self.store_int(16, _F_u64toa, "MOVLQZX") + self.store_int(16, _F_u64toa, "MOVLQZX") } func (self *_Assembler) _asm_OP_u64(_ *_Instr) { - self.store_int(20, _F_u64toa, "MOVQ") + self.store_int(20, _F_u64toa, "MOVQ") } func (self *_Assembler) _asm_OP_f32(_ *_Instr) { - self.check_size(32) - self.Emit("MOVL" , jit.Ptr(_SP_p, 0), _AX) // MOVL (SP.p), AX - self.Emit("ANDL" , jit.Imm(_FM_exp32), _AX) // ANDL $_FM_exp32, AX - self.Emit("XORL" , jit.Imm(_FM_exp32), _AX) // XORL $_FM_exp32, AX - self.Sjmp("JZ" , _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite - self.save_c() // SAVE $C_regs - self.rbuf_di() // MOVQ RP, DI - self.Emit("MOVSS" , jit.Ptr(_SP_p, 0), _X0) // MOVSS (SP.p), X0 - self.call_c(_F_f32toa) // CALL_C f64toa - self.Emit("ADDQ" , _AX, _RL) // ADDQ AX, RL + self.check_size(32) + self.Emit("MOVL", jit.Ptr(_SP_p, 0), _AX) // MOVL (SP.p), AX + self.Emit("ANDL", jit.Imm(_FM_exp32), _AX) // ANDL $_FM_exp32, AX + self.Emit("XORL", jit.Imm(_FM_exp32), _AX) // XORL $_FM_exp32, AX + self.Sjmp("JZ", _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite + self.save_c() // SAVE $C_regs + self.rbuf_di() // MOVQ RP, DI + self.Emit("MOVSS", jit.Ptr(_SP_p, 0), _X0) // MOVSS (SP.p), X0 + self.call_c(_F_f32toa) // CALL_C f64toa + self.Emit("ADDQ", _AX, _RL) // ADDQ AX, RL } func (self *_Assembler) _asm_OP_f64(_ *_Instr) { - self.check_size(32) - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX - self.Emit("MOVQ" , jit.Imm(_FM_exp64), _CX) // MOVQ $_FM_exp64, CX - self.Emit("ANDQ" , _CX, _AX) // ANDQ CX, AX - self.Emit("XORQ" , _CX, _AX) // XORQ CX, AX - self.Sjmp("JZ" , _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite - self.save_c() // SAVE $C_regs - self.rbuf_di() // MOVQ RP, DI - self.Emit("MOVSD" , jit.Ptr(_SP_p, 0), _X0) // MOVSD (SP.p), X0 - self.call_c(_F_f64toa) // CALL_C f64toa - self.Emit("ADDQ" , _AX, _RL) // ADDQ AX, RL + self.check_size(32) + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("MOVQ", jit.Imm(_FM_exp64), _CX) // MOVQ $_FM_exp64, CX + self.Emit("ANDQ", _CX, _AX) // ANDQ CX, AX + self.Emit("XORQ", _CX, _AX) // XORQ CX, AX + self.Sjmp("JZ", _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite + self.save_c() // SAVE $C_regs + self.rbuf_di() // MOVQ RP, DI + self.Emit("MOVSD", jit.Ptr(_SP_p, 0), _X0) // MOVSD (SP.p), X0 + self.call_c(_F_f64toa) // CALL_C f64toa + self.Emit("ADDQ", _AX, _RL) // ADDQ AX, RL } func (self *_Assembler) _asm_OP_str(_ *_Instr) { - self.encode_string(false) + self.encode_string(false) } func (self *_Assembler) _asm_OP_bin(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX - self.Emit("ADDQ", jit.Imm(2), _AX) // ADDQ $2, AX - self.Emit("MOVQ", jit.Imm(_IM_mulv), _CX) // MOVQ $_MF_mulv, CX - self.Emit("MOVQ", _DX, _BX) // MOVQ DX, BX - self.From("MULQ", _CX) // MULQ CX - self.Emit("LEAQ", jit.Sib(_DX, _DX, 1, 1), _AX) // LEAQ 1(DX)(DX), AX - self.Emit("ORQ" , jit.Imm(2), _AX) // ORQ $2, AX - self.Emit("MOVQ", _BX, _DX) // MOVQ BX, DX - self.check_size_r(_AX, 0) // SIZE AX - self.add_char('"') // CHAR $'"' - self.Emit("MOVQ", _ARG_rb, _DI) // MOVQ rb<>+0(FP), DI - self.Emit("MOVQ", _RL, jit.Ptr(_DI, 8)) // MOVQ SI, 8(DI) - self.Emit("MOVQ", _SP_p, _SI) // MOVQ SP.p, SI - - /* check for AVX2 support */ - if !cpu.HasAVX2 { - self.Emit("XORL", _DX, _DX) // XORL DX, DX - } else { - self.Emit("MOVL", jit.Imm(_MODE_AVX2), _DX) // MOVL $_MODE_AVX2, DX - } - - /* call the encoder */ - self.call_b64(_F_b64encode) // CALL b64encode - self.load_buffer_AX() // LOAD {buf} - self.add_char('"') // CHAR $'"' + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX + self.Emit("ADDQ", jit.Imm(2), _AX) // ADDQ $2, AX + self.Emit("MOVQ", jit.Imm(_IM_mulv), _CX) // MOVQ $_MF_mulv, CX + self.Emit("MOVQ", _DX, _BX) // MOVQ DX, BX + self.From("MULQ", _CX) // MULQ CX + self.Emit("LEAQ", jit.Sib(_DX, _DX, 1, 1), _AX) // LEAQ 1(DX)(DX), AX + self.Emit("ORQ", jit.Imm(2), _AX) // ORQ $2, AX + self.Emit("MOVQ", _BX, _DX) // MOVQ BX, DX + self.check_size_r(_AX, 0) // SIZE AX + self.add_char('"') // CHAR $'"' + self.Emit("MOVQ", _ARG_rb, _DI) // MOVQ rb<>+0(FP), DI + self.Emit("MOVQ", _RL, jit.Ptr(_DI, 8)) // MOVQ SI, 8(DI) + self.Emit("MOVQ", _SP_p, _SI) // MOVQ SP.p, SI + + /* check for AVX2 support */ + if !cpu.HasAVX2 { + self.Emit("XORL", _DX, _DX) // XORL DX, DX + } else { + self.Emit("MOVL", jit.Imm(_MODE_AVX2), _DX) // MOVL $_MODE_AVX2, DX + } + + /* call the encoder */ + self.call_b64(_F_b64encode) // CALL b64encode + self.load_buffer_AX() // LOAD {buf} + self.add_char('"') // CHAR $'"' } func (self *_Assembler) _asm_OP_quote(_ *_Instr) { - self.encode_string(true) + self.encode_string(true) } func (self *_Assembler) _asm_OP_number(_ *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _BX) // MOVQ (SP.p), BX - self.Emit("TESTQ", _BX, _BX) // TESTQ BX, BX - self.Sjmp("JZ" , "_empty_{n}") - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Sjmp("JNZ" , "_number_next_{n}") - self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), _AX) - self.Sjmp("JMP", _LB_panic) - self.Link("_number_next_{n}") - self.call_go(_F_isValidNumber) // CALL_GO isValidNumber - self.Emit("CMPB" , _AX, jit.Imm(0)) // CMPB AX, $0 - self.Sjmp("JE" , _LB_error_invalid_number) // JE _error_invalid_number - self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _BX) // MOVQ (SP.p), BX - self.check_size_r(_BX, 0) // SIZE BX - self.Emit("LEAQ" , jit.Sib(_RP, _RL, 1, 0), _AX) // LEAQ (RP)(RL), AX - self.Emit("ADDQ" , jit.Ptr(_SP_p, 8), _RL) // ADDQ 8(SP.p), RL - self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _BX) // MOVOU (SP.p), BX - self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _CX) // MOVOU X0, 8(SP) - self.call_go(_F_memmove) // CALL_GO memmove - self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX - self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX) - self.Sjmp("JMP" , "_done_{n}") // JMP _done_{n} - self.Link("_empty_{n}") // _empty_{n} - self.check_size(1) // SIZE $1 - self.add_char('0') // CHAR $'0' - self.Link("_done_{n}") // _done_{n}: + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _BX) // MOVQ (SP.p), BX + self.Emit("TESTQ", _BX, _BX) // TESTQ BX, BX + self.Sjmp("JZ", "_empty_{n}") + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ", "_number_next_{n}") + self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), _AX) + self.Sjmp("JMP", _LB_panic) + self.Link("_number_next_{n}") + self.call_go(_F_isValidNumber) // CALL_GO isValidNumber + self.Emit("CMPB", _AX, jit.Imm(0)) // CMPB AX, $0 + self.Sjmp("JE", _LB_error_invalid_number) // JE _error_invalid_number + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _BX) // MOVQ (SP.p), BX + self.check_size_r(_BX, 0) // SIZE BX + self.Emit("LEAQ", jit.Sib(_RP, _RL, 1, 0), _AX) // LEAQ (RP)(RL), AX + self.Emit("ADDQ", jit.Ptr(_SP_p, 8), _RL) // ADDQ 8(SP.p), RL + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _BX) // MOVOU (SP.p), BX + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _CX) // MOVOU X0, 8(SP) + self.call_go(_F_memmove) // CALL_GO memmove + self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX + self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX) + self.Sjmp("JMP", "_done_{n}") // JMP _done_{n} + self.Link("_empty_{n}") // _empty_{n} + self.check_size(1) // SIZE $1 + self.add_char('0') // CHAR $'0' + self.Link("_done_{n}") // _done_{n}: } func (self *_Assembler) _asm_OP_eface(_ *_Instr) { - self.prep_buffer_AX() // MOVE {buf}, AX - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _BX) // MOVQ (SP.p), BX - self.Emit("LEAQ" , jit.Ptr(_SP_p, 8), _CX) // LEAQ 8(SP.p), CX - self.Emit("MOVQ" , _ST, _DI) // MOVQ ST, DI - self.Emit("MOVQ" , _ARG_fv, _SI) // MOVQ fv, AX - self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error - self.load_buffer_AX() + self.prep_buffer_AX() // MOVE {buf}, AX + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _BX) // MOVQ (SP.p), BX + self.Emit("LEAQ", jit.Ptr(_SP_p, 8), _CX) // LEAQ 8(SP.p), CX + self.Emit("MOVQ", _ST, _DI) // MOVQ ST, DI + self.Emit("MOVQ", _ARG_fv, _SI) // MOVQ fv, AX + self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error + self.load_buffer_AX() } func (self *_Assembler) _asm_OP_iface(_ *_Instr) { - self.prep_buffer_AX() // MOVE {buf}, AX - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _CX) // MOVQ (SP.p), CX - self.Emit("MOVQ" , jit.Ptr(_CX, 8), _BX) // MOVQ 8(CX), BX - self.Emit("LEAQ" , jit.Ptr(_SP_p, 8), _CX) // LEAQ 8(SP.p), CX - self.Emit("MOVQ" , _ST, _DI) // MOVQ ST, DI - self.Emit("MOVQ" , _ARG_fv, _SI) // MOVQ fv, AX - self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error - self.load_buffer_AX() + self.prep_buffer_AX() // MOVE {buf}, AX + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _CX) // MOVQ (SP.p), CX + self.Emit("MOVQ", jit.Ptr(_CX, 8), _BX) // MOVQ 8(CX), BX + self.Emit("LEAQ", jit.Ptr(_SP_p, 8), _CX) // LEAQ 8(SP.p), CX + self.Emit("MOVQ", _ST, _DI) // MOVQ ST, DI + self.Emit("MOVQ", _ARG_fv, _SI) // MOVQ fv, AX + self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error + self.load_buffer_AX() } func (self *_Assembler) _asm_OP_byte(p *_Instr) { - self.check_size(1) - self.Emit("MOVB", jit.Imm(p.i64()), jit.Sib(_RP, _RL, 1, 0)) // MOVL p.vi(), (RP)(RL*1) - self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL + self.check_size(1) + self.Emit("MOVB", jit.Imm(p.i64()), jit.Sib(_RP, _RL, 1, 0)) // MOVL p.vi(), (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL } func (self *_Assembler) _asm_OP_text(p *_Instr) { - self.check_size(len(p.vs())) // SIZE ${len(p.vs())} - self.add_text(p.vs()) // TEXT ${p.vs()} + self.check_size(len(p.vs())) // SIZE ${len(p.vs())} + self.add_text(p.vs()) // TEXT ${p.vs()} } func (self *_Assembler) _asm_OP_deref(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p } func (self *_Assembler) _asm_OP_index(p *_Instr) { - self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ $p.vi(), AX - self.Emit("ADDQ", _AX, _SP_p) // ADDQ AX, SP.p + self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ $p.vi(), AX + self.Emit("ADDQ", _AX, _SP_p) // ADDQ AX, SP.p } func (self *_Assembler) _asm_OP_load(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX - self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -24), _SP_x) // MOVQ -24(ST)(AX), SP.x - self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -8), _SP_p) // MOVQ -8(ST)(AX), SP.p - self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _SP_q) // MOVQ (ST)(AX), SP.q + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -24), _SP_x) // MOVQ -24(ST)(AX), SP.x + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -8), _SP_p) // MOVQ -8(ST)(AX), SP.p + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _SP_q) // MOVQ (ST)(AX), SP.q } func (self *_Assembler) _asm_OP_save(_ *_Instr) { - self.save_state() + self.save_state() } func (self *_Assembler) _asm_OP_drop(_ *_Instr) { - self.drop_state(_StateSize) + self.drop_state(_StateSize) } func (self *_Assembler) _asm_OP_drop_2(_ *_Instr) { - self.drop_state(_StateSize * 2) // DROP $(_StateSize * 2) - self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 56)) // MOVOU X0, 56(ST)(AX) + self.drop_state(_StateSize * 2) // DROP $(_StateSize * 2) + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 56)) // MOVOU X0, 56(ST)(AX) } func (self *_Assembler) _asm_OP_recurse(p *_Instr) { - self.prep_buffer_AX() // MOVE {buf}, (SP) - vt, pv := p.vp() - self.Emit("MOVQ", jit.Type(vt), _BX) // MOVQ $(type(p.vt())), BX - - /* check for indirection */ - if !rt.UnpackType(vt).Indirect() { - self.Emit("MOVQ", _SP_p, _CX) // MOVQ SP.p, CX - } else { - self.Emit("MOVQ", _SP_p, _VAR_vp) // MOVQ SP.p, VAR.vp - self.Emit("LEAQ", _VAR_vp, _CX) // LEAQ VAR.vp, CX - } - - /* call the encoder */ - self.Emit("MOVQ" , _ST, _DI) // MOVQ ST, DI - self.Emit("MOVQ" , _ARG_fv, _SI) // MOVQ $fv, SI - if pv { - self.Emit("BTCQ", jit.Imm(bitPointerValue), _SI) // BTCQ $1, SI - } - self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error - self.load_buffer_AX() + self.prep_buffer_AX() // MOVE {buf}, (SP) + vt, pv := p.vp() + self.Emit("MOVQ", jit.Type(vt), _BX) // MOVQ $(type(p.vt())), BX + + /* check for indirection */ + if !rt.UnpackType(vt).Indirect() { + self.Emit("MOVQ", _SP_p, _CX) // MOVQ SP.p, CX + } else { + self.Emit("MOVQ", _SP_p, _VAR_vp) // MOVQ SP.p, VAR.vp + self.Emit("LEAQ", _VAR_vp, _CX) // LEAQ VAR.vp, CX + } + + /* call the encoder */ + self.Emit("MOVQ", _ST, _DI) // MOVQ ST, DI + self.Emit("MOVQ", _ARG_fv, _SI) // MOVQ $fv, SI + if pv { + self.Emit("BTCQ", jit.Imm(bitPointerValue), _SI) // BTCQ $1, SI + } + self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error + self.load_buffer_AX() } func (self *_Assembler) _asm_OP_is_nil(p *_Instr) { - self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_is_nil_p1(p *_Instr) { - self.Emit("CMPQ", jit.Ptr(_SP_p, 8), jit.Imm(0)) // CMPQ 8(SP.p), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("CMPQ", jit.Ptr(_SP_p, 8), jit.Imm(0)) // CMPQ 8(SP.p), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_is_zero_1(p *_Instr) { - self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_is_zero_2(p *_Instr) { - self.Emit("CMPW", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPW (SP.p), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("CMPW", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPW (SP.p), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_is_zero_4(p *_Instr) { - self.Emit("CMPL", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPL (SP.p), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("CMPL", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPL (SP.p), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_is_zero_8(p *_Instr) { - self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_is_zero_map(p *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX - self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX - self.Xjmp("JZ" , p.vi()) // JZ p.vi() - self.Emit("CMPQ" , jit.Ptr(_AX, 0), jit.Imm(0)) // CMPQ (AX), $0 - self.Xjmp("JE" , p.vi()) // JE p.vi() + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Xjmp("JZ", p.vi()) // JZ p.vi() + self.Emit("CMPQ", jit.Ptr(_AX, 0), jit.Imm(0)) // CMPQ (AX), $0 + self.Xjmp("JE", p.vi()) // JE p.vi() } func (self *_Assembler) _asm_OP_goto(p *_Instr) { - self.Xjmp("JMP", p.vi()) + self.Xjmp("JMP", p.vi()) } func (self *_Assembler) _asm_OP_map_iter(p *_Instr) { - self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ $p.vt(), AX - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _BX) // MOVQ (SP.p), BX - self.Emit("MOVQ" , _ARG_fv, _CX) // MOVQ fv, CX - self.call_go(_F_iteratorStart) // CALL_GO iteratorStart - self.Emit("MOVQ" , _AX, _SP_q) // MOVQ AX, SP.q - self.Emit("MOVQ" , _BX, _ET) // MOVQ 32(SP), ET - self.Emit("MOVQ" , _CX, _EP) // MOVQ 40(SP), EP - self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET - self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.Emit("MOVQ", jit.Type(p.vt()), _AX) // MOVQ $p.vt(), AX + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _BX) // MOVQ (SP.p), BX + self.Emit("MOVQ", _ARG_fv, _CX) // MOVQ fv, CX + self.call_go(_F_iteratorStart) // CALL_GO iteratorStart + self.Emit("MOVQ", _AX, _SP_q) // MOVQ AX, SP.q + self.Emit("MOVQ", _BX, _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ", _CX, _EP) // MOVQ 40(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_error) // JNZ _error } func (self *_Assembler) _asm_OP_map_stop(_ *_Instr) { - self.Emit("MOVQ", _SP_q, _AX) // MOVQ SP.q, AX - self.call_go(_F_iteratorStop) // CALL_GO iteratorStop - self.Emit("XORL", _SP_q, _SP_q) // XORL SP.q, SP.q + self.Emit("MOVQ", _SP_q, _AX) // MOVQ SP.q, AX + self.call_go(_F_iteratorStop) // CALL_GO iteratorStop + self.Emit("XORL", _SP_q, _SP_q) // XORL SP.q, SP.q } func (self *_Assembler) _asm_OP_map_check_key(p *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_SP_q, 0), _SP_p) // MOVQ (SP.q), SP.p - self.Emit("TESTQ", _SP_p, _SP_p) // TESTQ SP.p, SP.p - self.Xjmp("JZ" , p.vi()) // JNZ p.vi() + self.Emit("MOVQ", jit.Ptr(_SP_q, 0), _SP_p) // MOVQ (SP.q), SP.p + self.Emit("TESTQ", _SP_p, _SP_p) // TESTQ SP.p, SP.p + self.Xjmp("JZ", p.vi()) // JNZ p.vi() } func (self *_Assembler) _asm_OP_map_write_key(p *_Instr) { - self.Emit("BTQ", jit.Imm(bitSortMapKeys), _ARG_fv) // BTQ ${SortMapKeys}, fv - self.Sjmp("JNC", "_unordered_key_{n}") // JNC _unordered_key_{n} - self.encode_string(false) // STR $false - self.Xjmp("JMP", p.vi()) // JMP ${p.vi()} - self.Link("_unordered_key_{n}") // _unordered_key_{n}: + self.Emit("BTQ", jit.Imm(bitSortMapKeys), _ARG_fv) // BTQ ${SortMapKeys}, fv + self.Sjmp("JNC", "_unordered_key_{n}") // JNC _unordered_key_{n} + self.encode_string(false) // STR $false + self.Xjmp("JMP", p.vi()) // JMP ${p.vi()} + self.Link("_unordered_key_{n}") // _unordered_key_{n}: } func (self *_Assembler) _asm_OP_map_value_next(_ *_Instr) { - self.Emit("MOVQ", jit.Ptr(_SP_q, 8), _SP_p) // MOVQ 8(SP.q), SP.p - self.Emit("MOVQ", _SP_q, _AX) // MOVQ SP.q, AX - self.call_go(_F_iteratorNext) // CALL_GO iteratorNext + self.Emit("MOVQ", jit.Ptr(_SP_q, 8), _SP_p) // MOVQ 8(SP.q), SP.p + self.Emit("MOVQ", _SP_q, _AX) // MOVQ SP.q, AX + self.call_go(_F_iteratorNext) // CALL_GO iteratorNext } func (self *_Assembler) _asm_OP_slice_len(_ *_Instr) { - self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _SP_x) // MOVQ 8(SP.p), SP.x - self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p - self.Emit("ORQ" , jit.Imm(1 << _S_init), _SP_f) // ORQ $(1<<_S_init), SP.f + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _SP_x) // MOVQ 8(SP.p), SP.x + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p + self.Emit("ORQ", jit.Imm(1<<_S_init), _SP_f) // ORQ $(1<<_S_init), SP.f } func (self *_Assembler) _asm_OP_slice_next(p *_Instr) { - self.Emit("TESTQ" , _SP_x, _SP_x) // TESTQ SP.x, SP.x - self.Xjmp("JZ" , p.vi()) // JZ p.vi() - self.Emit("SUBQ" , jit.Imm(1), _SP_x) // SUBQ $1, SP.x - self.Emit("BTRQ" , jit.Imm(_S_init), _SP_f) // BTRQ $_S_init, SP.f - self.Emit("LEAQ" , jit.Ptr(_SP_p, int64(p.vlen())), _AX) // LEAQ $(p.vlen())(SP.p), AX - self.Emit("CMOVQCC", _AX, _SP_p) // CMOVQNC AX, SP.p + self.Emit("TESTQ", _SP_x, _SP_x) // TESTQ SP.x, SP.x + self.Xjmp("JZ", p.vi()) // JZ p.vi() + self.Emit("SUBQ", jit.Imm(1), _SP_x) // SUBQ $1, SP.x + self.Emit("BTRQ", jit.Imm(_S_init), _SP_f) // BTRQ $_S_init, SP.f + self.Emit("LEAQ", jit.Ptr(_SP_p, int64(p.vlen())), _AX) // LEAQ $(p.vlen())(SP.p), AX + self.Emit("CMOVQCC", _AX, _SP_p) // CMOVQNC AX, SP.p } func (self *_Assembler) _asm_OP_marshal(p *_Instr) { - self.call_marshaler(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt()) + self.call_marshaler(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt()) } func (self *_Assembler) _asm_OP_marshal_p(p *_Instr) { - if p.vk() != reflect.Ptr { - panic("marshal_p: invalid type") - } else { - self.call_marshaler_v(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt(), false) - } + if p.vk() != reflect.Ptr { + panic("marshal_p: invalid type") + } else { + self.call_marshaler_v(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt(), false) + } } func (self *_Assembler) _asm_OP_marshal_text(p *_Instr) { - self.call_marshaler(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt()) + self.call_marshaler(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt()) } func (self *_Assembler) _asm_OP_marshal_text_p(p *_Instr) { - if p.vk() != reflect.Ptr { - panic("marshal_text_p: invalid type") - } else { - self.call_marshaler_v(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt(), false) - } + if p.vk() != reflect.Ptr { + panic("marshal_text_p: invalid type") + } else { + self.call_marshaler_v(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt(), false) + } } func (self *_Assembler) _asm_OP_cond_set(_ *_Instr) { - self.Emit("ORQ", jit.Imm(1 << _S_cond), _SP_f) // ORQ $(1<<_S_cond), SP.f + self.Emit("ORQ", jit.Imm(1<<_S_cond), _SP_f) // ORQ $(1<<_S_cond), SP.f } func (self *_Assembler) _asm_OP_cond_testc(p *_Instr) { - self.Emit("BTRQ", jit.Imm(_S_cond), _SP_f) // BTRQ $_S_cond, SP.f - self.Xjmp("JC" , p.vi()) + self.Emit("BTRQ", jit.Imm(_S_cond), _SP_f) // BTRQ $_S_cond, SP.f + self.Xjmp("JC", p.vi()) } func (self *_Assembler) print_gc(i int, p1 *_Instr, p2 *_Instr) { - self.Emit("MOVQ", jit.Imm(int64(p2.op())), _CX) // MOVQ $(p2.op()), AX - self.Emit("MOVQ", jit.Imm(int64(p1.op())), _BX) // MOVQ $(p1.op()), BX - self.Emit("MOVQ", jit.Imm(int64(i)), _AX) // MOVQ $(i), CX - self.call_go(_F_println) + self.Emit("MOVQ", jit.Imm(int64(p2.op())), _CX) // MOVQ $(p2.op()), AX + self.Emit("MOVQ", jit.Imm(int64(p1.op())), _BX) // MOVQ $(p1.op()), BX + self.Emit("MOVQ", jit.Imm(int64(i)), _AX) // MOVQ $(i), CX + self.call_go(_F_println) } var ( - _V_writeBarrier = jit.Imm(int64(uintptr(unsafe.Pointer(&_runtime_writeBarrier)))) + _V_writeBarrier = jit.Imm(int64(uintptr(unsafe.Pointer(&_runtime_writeBarrier)))) - _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX) + _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX) ) func (self *_Assembler) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr) { - if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { - panic("rec contains AX!") - } - self.Emit("MOVQ", _V_writeBarrier, _BX) - self.Emit("CMPL", jit.Ptr(_BX, 0), jit.Imm(0)) - self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.xsave(_DI) - self.Emit("MOVQ", ptr, _AX) - self.Emit("LEAQ", rec, _DI) - self.Emit("MOVQ", _F_gcWriteBarrierAX, _BX) // MOVQ ${fn}, AX - self.Rjmp("CALL", _BX) - self.xload(_DI) - self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") - self.Emit("MOVQ", ptr, rec) - self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") -} \ No newline at end of file + if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { + panic("rec contains AX!") + } + self.Emit("MOVQ", _V_writeBarrier, _BX) + self.Emit("CMPL", jit.Ptr(_BX, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.xsave(_DI) + self.Emit("MOVQ", ptr, _AX) + self.Emit("LEAQ", rec, _DI) + self.Emit("MOVQ", _F_gcWriteBarrierAX, _BX) // MOVQ ${fn}, AX + self.Rjmp("CALL", _BX) + self.xload(_DI) + self.Sjmp("JMP", "_end_writeBarrier"+strconv.Itoa(i)+"_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/compiler.go b/vendor/github.com/bytedance/sonic/internal/encoder/compiler.go index a949c90f7..58416900a 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/compiler.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/compiler.go @@ -17,298 +17,344 @@ package encoder import ( - `fmt` - `reflect` - `strconv` - `strings` - `unsafe` - - `github.com/bytedance/sonic/internal/resolver` - `github.com/bytedance/sonic/internal/rt` - `github.com/bytedance/sonic/option` + "fmt" + "reflect" + "strconv" + "strings" + "unsafe" + + "github.com/bytedance/sonic/internal/resolver" + "github.com/bytedance/sonic/internal/rt" + "github.com/bytedance/sonic/option" ) type _Op uint8 const ( - _OP_null _Op = iota + 1 - _OP_empty_arr - _OP_empty_obj - _OP_bool - _OP_i8 - _OP_i16 - _OP_i32 - _OP_i64 - _OP_u8 - _OP_u16 - _OP_u32 - _OP_u64 - _OP_f32 - _OP_f64 - _OP_str - _OP_bin - _OP_quote - _OP_number - _OP_eface - _OP_iface - _OP_byte - _OP_text - _OP_deref - _OP_index - _OP_load - _OP_save - _OP_drop - _OP_drop_2 - _OP_recurse - _OP_is_nil - _OP_is_nil_p1 - _OP_is_zero_1 - _OP_is_zero_2 - _OP_is_zero_4 - _OP_is_zero_8 - _OP_is_zero_map - _OP_goto - _OP_map_iter - _OP_map_stop - _OP_map_check_key - _OP_map_write_key - _OP_map_value_next - _OP_slice_len - _OP_slice_next - _OP_marshal - _OP_marshal_p - _OP_marshal_text - _OP_marshal_text_p - _OP_cond_set - _OP_cond_testc + _OP_null _Op = iota + 1 + _OP_empty_arr + _OP_empty_obj + _OP_bool + _OP_i8 + _OP_i16 + _OP_i32 + _OP_i64 + _OP_u8 + _OP_u16 + _OP_u32 + _OP_u64 + _OP_f32 + _OP_f64 + _OP_str + _OP_bin + _OP_quote + _OP_number + _OP_eface + _OP_iface + _OP_byte + _OP_text + _OP_deref + _OP_index + _OP_load + _OP_save + _OP_drop + _OP_drop_2 + _OP_recurse + _OP_is_nil + _OP_is_nil_p1 + _OP_is_zero_1 + _OP_is_zero_2 + _OP_is_zero_4 + _OP_is_zero_8 + _OP_is_zero_map + _OP_goto + _OP_map_iter + _OP_map_stop + _OP_map_check_key + _OP_map_write_key + _OP_map_value_next + _OP_slice_len + _OP_slice_next + _OP_marshal + _OP_marshal_p + _OP_marshal_text + _OP_marshal_text_p + _OP_cond_set + _OP_cond_testc ) const ( - _INT_SIZE = 32 << (^uint(0) >> 63) - _PTR_SIZE = 32 << (^uintptr(0) >> 63) - _PTR_BYTE = unsafe.Sizeof(uintptr(0)) + _INT_SIZE = 32 << (^uint(0) >> 63) + _PTR_SIZE = 32 << (^uintptr(0) >> 63) + _PTR_BYTE = unsafe.Sizeof(uintptr(0)) ) const ( - _MAX_ILBUF = 100000 // cutoff at 100k of IL instructions - _MAX_FIELDS = 50 // cutoff at 50 fields struct + _MAX_ILBUF = 100000 // cutoff at 100k of IL instructions + _MAX_FIELDS = 50 // cutoff at 50 fields struct ) -var _OpNames = [256]string { - _OP_null : "null", - _OP_empty_arr : "empty_arr", - _OP_empty_obj : "empty_obj", - _OP_bool : "bool", - _OP_i8 : "i8", - _OP_i16 : "i16", - _OP_i32 : "i32", - _OP_i64 : "i64", - _OP_u8 : "u8", - _OP_u16 : "u16", - _OP_u32 : "u32", - _OP_u64 : "u64", - _OP_f32 : "f32", - _OP_f64 : "f64", - _OP_str : "str", - _OP_bin : "bin", - _OP_quote : "quote", - _OP_number : "number", - _OP_eface : "eface", - _OP_iface : "iface", - _OP_byte : "byte", - _OP_text : "text", - _OP_deref : "deref", - _OP_index : "index", - _OP_load : "load", - _OP_save : "save", - _OP_drop : "drop", - _OP_drop_2 : "drop_2", - _OP_recurse : "recurse", - _OP_is_nil : "is_nil", - _OP_is_nil_p1 : "is_nil_p1", - _OP_is_zero_1 : "is_zero_1", - _OP_is_zero_2 : "is_zero_2", - _OP_is_zero_4 : "is_zero_4", - _OP_is_zero_8 : "is_zero_8", - _OP_is_zero_map : "is_zero_map", - _OP_goto : "goto", - _OP_map_iter : "map_iter", - _OP_map_stop : "map_stop", - _OP_map_check_key : "map_check_key", - _OP_map_write_key : "map_write_key", - _OP_map_value_next : "map_value_next", - _OP_slice_len : "slice_len", - _OP_slice_next : "slice_next", - _OP_marshal : "marshal", - _OP_marshal_p : "marshal_p", - _OP_marshal_text : "marshal_text", - _OP_marshal_text_p : "marshal_text_p", - _OP_cond_set : "cond_set", - _OP_cond_testc : "cond_testc", +var _OpNames = [256]string{ + _OP_null: "null", + _OP_empty_arr: "empty_arr", + _OP_empty_obj: "empty_obj", + _OP_bool: "bool", + _OP_i8: "i8", + _OP_i16: "i16", + _OP_i32: "i32", + _OP_i64: "i64", + _OP_u8: "u8", + _OP_u16: "u16", + _OP_u32: "u32", + _OP_u64: "u64", + _OP_f32: "f32", + _OP_f64: "f64", + _OP_str: "str", + _OP_bin: "bin", + _OP_quote: "quote", + _OP_number: "number", + _OP_eface: "eface", + _OP_iface: "iface", + _OP_byte: "byte", + _OP_text: "text", + _OP_deref: "deref", + _OP_index: "index", + _OP_load: "load", + _OP_save: "save", + _OP_drop: "drop", + _OP_drop_2: "drop_2", + _OP_recurse: "recurse", + _OP_is_nil: "is_nil", + _OP_is_nil_p1: "is_nil_p1", + _OP_is_zero_1: "is_zero_1", + _OP_is_zero_2: "is_zero_2", + _OP_is_zero_4: "is_zero_4", + _OP_is_zero_8: "is_zero_8", + _OP_is_zero_map: "is_zero_map", + _OP_goto: "goto", + _OP_map_iter: "map_iter", + _OP_map_stop: "map_stop", + _OP_map_check_key: "map_check_key", + _OP_map_write_key: "map_write_key", + _OP_map_value_next: "map_value_next", + _OP_slice_len: "slice_len", + _OP_slice_next: "slice_next", + _OP_marshal: "marshal", + _OP_marshal_p: "marshal_p", + _OP_marshal_text: "marshal_text", + _OP_marshal_text_p: "marshal_text_p", + _OP_cond_set: "cond_set", + _OP_cond_testc: "cond_testc", } func (self _Op) String() string { - if ret := _OpNames[self]; ret != "" { - return ret - } else { - return "<invalid>" - } + if ret := _OpNames[self]; ret != "" { + return ret + } else { + return "<invalid>" + } } func _OP_int() _Op { - switch _INT_SIZE { - case 32: return _OP_i32 - case 64: return _OP_i64 - default: panic("unsupported int size") - } + switch _INT_SIZE { + case 32: + return _OP_i32 + case 64: + return _OP_i64 + default: + panic("unsupported int size") + } } func _OP_uint() _Op { - switch _INT_SIZE { - case 32: return _OP_u32 - case 64: return _OP_u64 - default: panic("unsupported uint size") - } + switch _INT_SIZE { + case 32: + return _OP_u32 + case 64: + return _OP_u64 + default: + panic("unsupported uint size") + } } func _OP_uintptr() _Op { - switch _PTR_SIZE { - case 32: return _OP_u32 - case 64: return _OP_u64 - default: panic("unsupported pointer size") - } + switch _PTR_SIZE { + case 32: + return _OP_u32 + case 64: + return _OP_u64 + default: + panic("unsupported pointer size") + } } func _OP_is_zero_ints() _Op { - switch _INT_SIZE { - case 32: return _OP_is_zero_4 - case 64: return _OP_is_zero_8 - default: panic("unsupported integer size") - } + switch _INT_SIZE { + case 32: + return _OP_is_zero_4 + case 64: + return _OP_is_zero_8 + default: + panic("unsupported integer size") + } } type _Instr struct { - u uint64 // union {op: 8, _: 8, vi: 48}, vi maybe int or len(str) - p unsafe.Pointer // maybe GoString.Ptr, or *GoType + u uint64 // union {op: 8, _: 8, vi: 48}, vi maybe int or len(str) + p unsafe.Pointer // maybe GoString.Ptr, or *GoType } func packOp(op _Op) uint64 { - return uint64(op) << 56 + return uint64(op) << 56 } func newInsOp(op _Op) _Instr { - return _Instr{u: packOp(op)} + return _Instr{u: packOp(op)} } func newInsVi(op _Op, vi int) _Instr { - return _Instr{u: packOp(op) | rt.PackInt(vi)} + return _Instr{u: packOp(op) | rt.PackInt(vi)} } func newInsVs(op _Op, vs string) _Instr { - return _Instr { - u: packOp(op) | rt.PackInt(len(vs)), - p: (*rt.GoString)(unsafe.Pointer(&vs)).Ptr, - } + return _Instr{ + u: packOp(op) | rt.PackInt(len(vs)), + p: (*rt.GoString)(unsafe.Pointer(&vs)).Ptr, + } } func newInsVt(op _Op, vt reflect.Type) _Instr { - return _Instr { - u: packOp(op), - p: unsafe.Pointer(rt.UnpackType(vt)), - } + return _Instr{ + u: packOp(op), + p: unsafe.Pointer(rt.UnpackType(vt)), + } } func newInsVp(op _Op, vt reflect.Type, pv bool) _Instr { - i := 0 - if pv { - i = 1 - } - return _Instr { - u: packOp(op) | rt.PackInt(i), - p: unsafe.Pointer(rt.UnpackType(vt)), - } + i := 0 + if pv { + i = 1 + } + return _Instr{ + u: packOp(op) | rt.PackInt(i), + p: unsafe.Pointer(rt.UnpackType(vt)), + } } func (self _Instr) op() _Op { - return _Op(self.u >> 56) + return _Op(self.u >> 56) } func (self _Instr) vi() int { - return rt.UnpackInt(self.u) + return rt.UnpackInt(self.u) } func (self _Instr) vf() uint8 { - return (*rt.GoType)(self.p).KindFlags + return (*rt.GoType)(self.p).KindFlags } func (self _Instr) vs() (v string) { - (*rt.GoString)(unsafe.Pointer(&v)).Ptr = self.p - (*rt.GoString)(unsafe.Pointer(&v)).Len = self.vi() - return + (*rt.GoString)(unsafe.Pointer(&v)).Ptr = self.p + (*rt.GoString)(unsafe.Pointer(&v)).Len = self.vi() + return } func (self _Instr) vk() reflect.Kind { - return (*rt.GoType)(self.p).Kind() + return (*rt.GoType)(self.p).Kind() } func (self _Instr) vt() reflect.Type { - return (*rt.GoType)(self.p).Pack() + return (*rt.GoType)(self.p).Pack() } func (self _Instr) vp() (vt reflect.Type, pv bool) { - return (*rt.GoType)(self.p).Pack(), rt.UnpackInt(self.u) == 1 + return (*rt.GoType)(self.p).Pack(), rt.UnpackInt(self.u) == 1 } func (self _Instr) i64() int64 { - return int64(self.vi()) + return int64(self.vi()) } func (self _Instr) vlen() int { - return int((*rt.GoType)(self.p).Size) + return int((*rt.GoType)(self.p).Size) } func (self _Instr) isBranch() bool { - switch self.op() { - case _OP_goto : fallthrough - case _OP_is_nil : fallthrough - case _OP_is_nil_p1 : fallthrough - case _OP_is_zero_1 : fallthrough - case _OP_is_zero_2 : fallthrough - case _OP_is_zero_4 : fallthrough - case _OP_is_zero_8 : fallthrough - case _OP_map_check_key : fallthrough - case _OP_map_write_key : fallthrough - case _OP_slice_next : fallthrough - case _OP_cond_testc : return true - default : return false - } + switch self.op() { + case _OP_goto: + fallthrough + case _OP_is_nil: + fallthrough + case _OP_is_nil_p1: + fallthrough + case _OP_is_zero_1: + fallthrough + case _OP_is_zero_2: + fallthrough + case _OP_is_zero_4: + fallthrough + case _OP_is_zero_8: + fallthrough + case _OP_map_check_key: + fallthrough + case _OP_map_write_key: + fallthrough + case _OP_slice_next: + fallthrough + case _OP_cond_testc: + return true + default: + return false + } } func (self _Instr) disassemble() string { - switch self.op() { - case _OP_byte : return fmt.Sprintf("%-18s%s", self.op().String(), strconv.QuoteRune(rune(self.vi()))) - case _OP_text : return fmt.Sprintf("%-18s%s", self.op().String(), strconv.Quote(self.vs())) - case _OP_index : return fmt.Sprintf("%-18s%d", self.op().String(), self.vi()) - case _OP_recurse : fallthrough - case _OP_map_iter : fallthrough - case _OP_marshal : fallthrough - case _OP_marshal_p : fallthrough - case _OP_marshal_text : fallthrough - case _OP_marshal_text_p : return fmt.Sprintf("%-18s%s", self.op().String(), self.vt()) - case _OP_goto : fallthrough - case _OP_is_nil : fallthrough - case _OP_is_nil_p1 : fallthrough - case _OP_is_zero_1 : fallthrough - case _OP_is_zero_2 : fallthrough - case _OP_is_zero_4 : fallthrough - case _OP_is_zero_8 : fallthrough - case _OP_is_zero_map : fallthrough - case _OP_cond_testc : fallthrough - case _OP_map_check_key : fallthrough - case _OP_map_write_key : return fmt.Sprintf("%-18sL_%d", self.op().String(), self.vi()) - case _OP_slice_next : return fmt.Sprintf("%-18sL_%d, %s", self.op().String(), self.vi(), self.vt()) - default : return self.op().String() - } + switch self.op() { + case _OP_byte: + return fmt.Sprintf("%-18s%s", self.op().String(), strconv.QuoteRune(rune(self.vi()))) + case _OP_text: + return fmt.Sprintf("%-18s%s", self.op().String(), strconv.Quote(self.vs())) + case _OP_index: + return fmt.Sprintf("%-18s%d", self.op().String(), self.vi()) + case _OP_recurse: + fallthrough + case _OP_map_iter: + fallthrough + case _OP_marshal: + fallthrough + case _OP_marshal_p: + fallthrough + case _OP_marshal_text: + fallthrough + case _OP_marshal_text_p: + return fmt.Sprintf("%-18s%s", self.op().String(), self.vt()) + case _OP_goto: + fallthrough + case _OP_is_nil: + fallthrough + case _OP_is_nil_p1: + fallthrough + case _OP_is_zero_1: + fallthrough + case _OP_is_zero_2: + fallthrough + case _OP_is_zero_4: + fallthrough + case _OP_is_zero_8: + fallthrough + case _OP_is_zero_map: + fallthrough + case _OP_cond_testc: + fallthrough + case _OP_map_check_key: + fallthrough + case _OP_map_write_key: + return fmt.Sprintf("%-18sL_%d", self.op().String(), self.vi()) + case _OP_slice_next: + return fmt.Sprintf("%-18sL_%d, %s", self.op().String(), self.vi(), self.vt()) + default: + return self.op().String() + } } type ( @@ -316,570 +362,644 @@ type ( ) func (self _Program) pc() int { - return len(self) + return len(self) } func (self _Program) tag(n int) { - if n >= _MaxStack { - panic("type nesting too deep") - } + if n >= _MaxStack { + panic("type nesting too deep") + } } func (self _Program) pin(i int) { - v := &self[i] - v.u &= 0xffff000000000000 - v.u |= rt.PackInt(self.pc()) + v := &self[i] + v.u &= 0xffff000000000000 + v.u |= rt.PackInt(self.pc()) } func (self _Program) rel(v []int) { - for _, i := range v { - self.pin(i) - } + for _, i := range v { + self.pin(i) + } } func (self *_Program) add(op _Op) { - *self = append(*self, newInsOp(op)) + *self = append(*self, newInsOp(op)) } func (self *_Program) key(op _Op) { - *self = append(*self, - newInsVi(_OP_byte, '"'), - newInsOp(op), - newInsVi(_OP_byte, '"'), - ) + *self = append(*self, + newInsVi(_OP_byte, '"'), + newInsOp(op), + newInsVi(_OP_byte, '"'), + ) } func (self *_Program) int(op _Op, vi int) { - *self = append(*self, newInsVi(op, vi)) + *self = append(*self, newInsVi(op, vi)) } func (self *_Program) str(op _Op, vs string) { - *self = append(*self, newInsVs(op, vs)) + *self = append(*self, newInsVs(op, vs)) } func (self *_Program) rtt(op _Op, vt reflect.Type) { - *self = append(*self, newInsVt(op, vt)) + *self = append(*self, newInsVt(op, vt)) } func (self *_Program) vp(op _Op, vt reflect.Type, pv bool) { - *self = append(*self, newInsVp(op, vt, pv)) + *self = append(*self, newInsVp(op, vt, pv)) } func (self _Program) disassemble() string { - nb := len(self) - tab := make([]bool, nb + 1) - ret := make([]string, 0, nb + 1) - - /* prescan to get all the labels */ - for _, ins := range self { - if ins.isBranch() { - tab[ins.vi()] = true - } - } - - /* disassemble each instruction */ - for i, ins := range self { - if !tab[i] { - ret = append(ret, "\t" + ins.disassemble()) - } else { - ret = append(ret, fmt.Sprintf("L_%d:\n\t%s", i, ins.disassemble())) - } - } - - /* add the last label, if needed */ - if tab[nb] { - ret = append(ret, fmt.Sprintf("L_%d:", nb)) - } - - /* add an "end" indicator, and join all the strings */ - return strings.Join(append(ret, "\tend"), "\n") + nb := len(self) + tab := make([]bool, nb+1) + ret := make([]string, 0, nb+1) + + /* prescan to get all the labels */ + for _, ins := range self { + if ins.isBranch() { + tab[ins.vi()] = true + } + } + + /* disassemble each instruction */ + for i, ins := range self { + if !tab[i] { + ret = append(ret, "\t"+ins.disassemble()) + } else { + ret = append(ret, fmt.Sprintf("L_%d:\n\t%s", i, ins.disassemble())) + } + } + + /* add the last label, if needed */ + if tab[nb] { + ret = append(ret, fmt.Sprintf("L_%d:", nb)) + } + + /* add an "end" indicator, and join all the strings */ + return strings.Join(append(ret, "\tend"), "\n") } type _Compiler struct { - opts option.CompileOptions - pv bool - tab map[reflect.Type]bool - rec map[reflect.Type]uint8 + opts option.CompileOptions + pv bool + tab map[reflect.Type]bool + rec map[reflect.Type]uint8 } func newCompiler() *_Compiler { - return &_Compiler { - opts: option.DefaultCompileOptions(), - tab: map[reflect.Type]bool{}, - rec: map[reflect.Type]uint8{}, - } + return &_Compiler{ + opts: option.DefaultCompileOptions(), + tab: map[reflect.Type]bool{}, + rec: map[reflect.Type]uint8{}, + } } func (self *_Compiler) apply(opts option.CompileOptions) *_Compiler { - self.opts = opts - if self.opts.RecursiveDepth > 0 { - self.rec = map[reflect.Type]uint8{} - } - return self + self.opts = opts + if self.opts.RecursiveDepth > 0 { + self.rec = map[reflect.Type]uint8{} + } + return self } func (self *_Compiler) rescue(ep *error) { - if val := recover(); val != nil { - if err, ok := val.(error); ok { - *ep = err - } else { - panic(val) - } - } + if val := recover(); val != nil { + if err, ok := val.(error); ok { + *ep = err + } else { + panic(val) + } + } } func (self *_Compiler) compile(vt reflect.Type, pv bool) (ret _Program, err error) { - defer self.rescue(&err) - self.compileOne(&ret, 0, vt, pv) - return + defer self.rescue(&err) + self.compileOne(&ret, 0, vt, pv) + return } func (self *_Compiler) compileOne(p *_Program, sp int, vt reflect.Type, pv bool) { - if self.tab[vt] { - p.vp(_OP_recurse, vt, pv) - } else { - self.compileRec(p, sp, vt, pv) - } + if self.tab[vt] { + p.vp(_OP_recurse, vt, pv) + } else { + self.compileRec(p, sp, vt, pv) + } } func (self *_Compiler) compileRec(p *_Program, sp int, vt reflect.Type, pv bool) { - pr := self.pv - pt := reflect.PtrTo(vt) - - /* check for addressable `json.Marshaler` with pointer receiver */ - if pv && pt.Implements(jsonMarshalerType) { - p.rtt(_OP_marshal_p, pt) - return - } - - /* check for `json.Marshaler` */ - if vt.Implements(jsonMarshalerType) { - self.compileMarshaler(p, _OP_marshal, vt, jsonMarshalerType) - return - } - - /* check for addressable `encoding.TextMarshaler` with pointer receiver */ - if pv && pt.Implements(encodingTextMarshalerType) { - p.rtt(_OP_marshal_text_p, pt) - return - } - - /* check for `encoding.TextMarshaler` */ - if vt.Implements(encodingTextMarshalerType) { - self.compileMarshaler(p, _OP_marshal_text, vt, encodingTextMarshalerType) - return - } - - /* enter the recursion, and compile the type */ - self.pv = pv - self.tab[vt] = true - self.compileOps(p, sp, vt) - - /* exit the recursion */ - self.pv = pr - delete(self.tab, vt) + pr := self.pv + pt := reflect.PtrTo(vt) + + /* check for addressable `json.Marshaler` with pointer receiver */ + if pv && pt.Implements(jsonMarshalerType) { + p.rtt(_OP_marshal_p, pt) + return + } + + /* check for `json.Marshaler` */ + if vt.Implements(jsonMarshalerType) { + self.compileMarshaler(p, _OP_marshal, vt, jsonMarshalerType) + return + } + + /* check for addressable `encoding.TextMarshaler` with pointer receiver */ + if pv && pt.Implements(encodingTextMarshalerType) { + p.rtt(_OP_marshal_text_p, pt) + return + } + + /* check for `encoding.TextMarshaler` */ + if vt.Implements(encodingTextMarshalerType) { + self.compileMarshaler(p, _OP_marshal_text, vt, encodingTextMarshalerType) + return + } + + /* enter the recursion, and compile the type */ + self.pv = pv + self.tab[vt] = true + self.compileOps(p, sp, vt) + + /* exit the recursion */ + self.pv = pr + delete(self.tab, vt) } func (self *_Compiler) compileOps(p *_Program, sp int, vt reflect.Type) { - switch vt.Kind() { - case reflect.Bool : p.add(_OP_bool) - case reflect.Int : p.add(_OP_int()) - case reflect.Int8 : p.add(_OP_i8) - case reflect.Int16 : p.add(_OP_i16) - case reflect.Int32 : p.add(_OP_i32) - case reflect.Int64 : p.add(_OP_i64) - case reflect.Uint : p.add(_OP_uint()) - case reflect.Uint8 : p.add(_OP_u8) - case reflect.Uint16 : p.add(_OP_u16) - case reflect.Uint32 : p.add(_OP_u32) - case reflect.Uint64 : p.add(_OP_u64) - case reflect.Uintptr : p.add(_OP_uintptr()) - case reflect.Float32 : p.add(_OP_f32) - case reflect.Float64 : p.add(_OP_f64) - case reflect.String : self.compileString (p, vt) - case reflect.Array : self.compileArray (p, sp, vt.Elem(), vt.Len()) - case reflect.Interface : self.compileInterface (p, vt) - case reflect.Map : self.compileMap (p, sp, vt) - case reflect.Ptr : self.compilePtr (p, sp, vt.Elem()) - case reflect.Slice : self.compileSlice (p, sp, vt.Elem()) - case reflect.Struct : self.compileStruct (p, sp, vt) - default : panic (error_type(vt)) - } + switch vt.Kind() { + case reflect.Bool: + p.add(_OP_bool) + case reflect.Int: + p.add(_OP_int()) + case reflect.Int8: + p.add(_OP_i8) + case reflect.Int16: + p.add(_OP_i16) + case reflect.Int32: + p.add(_OP_i32) + case reflect.Int64: + p.add(_OP_i64) + case reflect.Uint: + p.add(_OP_uint()) + case reflect.Uint8: + p.add(_OP_u8) + case reflect.Uint16: + p.add(_OP_u16) + case reflect.Uint32: + p.add(_OP_u32) + case reflect.Uint64: + p.add(_OP_u64) + case reflect.Uintptr: + p.add(_OP_uintptr()) + case reflect.Float32: + p.add(_OP_f32) + case reflect.Float64: + p.add(_OP_f64) + case reflect.String: + self.compileString(p, vt) + case reflect.Array: + self.compileArray(p, sp, vt.Elem(), vt.Len()) + case reflect.Interface: + self.compileInterface(p, vt) + case reflect.Map: + self.compileMap(p, sp, vt) + case reflect.Ptr: + self.compilePtr(p, sp, vt.Elem()) + case reflect.Slice: + self.compileSlice(p, sp, vt.Elem()) + case reflect.Struct: + self.compileStruct(p, sp, vt) + default: + panic(error_type(vt)) + } } func (self *_Compiler) compileNil(p *_Program, sp int, vt reflect.Type, nil_op _Op, fn func(*_Program, int, reflect.Type)) { - x := p.pc() - p.add(_OP_is_nil) - fn(p, sp, vt) - e := p.pc() - p.add(_OP_goto) - p.pin(x) - p.add(nil_op) - p.pin(e) + x := p.pc() + p.add(_OP_is_nil) + fn(p, sp, vt) + e := p.pc() + p.add(_OP_goto) + p.pin(x) + p.add(nil_op) + p.pin(e) } func (self *_Compiler) compilePtr(p *_Program, sp int, vt reflect.Type) { - self.compileNil(p, sp, vt, _OP_null, self.compilePtrBody) + self.compileNil(p, sp, vt, _OP_null, self.compilePtrBody) } func (self *_Compiler) compilePtrBody(p *_Program, sp int, vt reflect.Type) { - p.tag(sp) - p.add(_OP_save) - p.add(_OP_deref) - self.compileOne(p, sp + 1, vt, true) - p.add(_OP_drop) + p.tag(sp) + p.add(_OP_save) + p.add(_OP_deref) + self.compileOne(p, sp+1, vt, true) + p.add(_OP_drop) } func (self *_Compiler) compileMap(p *_Program, sp int, vt reflect.Type) { - self.compileNil(p, sp, vt, _OP_empty_obj, self.compileMapBody) + self.compileNil(p, sp, vt, _OP_empty_obj, self.compileMapBody) } func (self *_Compiler) compileMapBody(p *_Program, sp int, vt reflect.Type) { - p.tag(sp + 1) - p.int(_OP_byte, '{') - p.add(_OP_save) - p.rtt(_OP_map_iter, vt) - p.add(_OP_save) - i := p.pc() - p.add(_OP_map_check_key) - u := p.pc() - p.add(_OP_map_write_key) - self.compileMapBodyKey(p, vt.Key()) - p.pin(u) - p.int(_OP_byte, ':') - p.add(_OP_map_value_next) - self.compileOne(p, sp + 2, vt.Elem(), false) - j := p.pc() - p.add(_OP_map_check_key) - p.int(_OP_byte, ',') - v := p.pc() - p.add(_OP_map_write_key) - self.compileMapBodyKey(p, vt.Key()) - p.pin(v) - p.int(_OP_byte, ':') - p.add(_OP_map_value_next) - self.compileOne(p, sp + 2, vt.Elem(), false) - p.int(_OP_goto, j) - p.pin(i) - p.pin(j) - p.add(_OP_map_stop) - p.add(_OP_drop_2) - p.int(_OP_byte, '}') + p.tag(sp + 1) + p.int(_OP_byte, '{') + p.add(_OP_save) + p.rtt(_OP_map_iter, vt) + p.add(_OP_save) + i := p.pc() + p.add(_OP_map_check_key) + u := p.pc() + p.add(_OP_map_write_key) + self.compileMapBodyKey(p, vt.Key()) + p.pin(u) + p.int(_OP_byte, ':') + p.add(_OP_map_value_next) + self.compileOne(p, sp+2, vt.Elem(), false) + j := p.pc() + p.add(_OP_map_check_key) + p.int(_OP_byte, ',') + v := p.pc() + p.add(_OP_map_write_key) + self.compileMapBodyKey(p, vt.Key()) + p.pin(v) + p.int(_OP_byte, ':') + p.add(_OP_map_value_next) + self.compileOne(p, sp+2, vt.Elem(), false) + p.int(_OP_goto, j) + p.pin(i) + p.pin(j) + p.add(_OP_map_stop) + p.add(_OP_drop_2) + p.int(_OP_byte, '}') } func (self *_Compiler) compileMapBodyKey(p *_Program, vk reflect.Type) { - if !vk.Implements(encodingTextMarshalerType) { - self.compileMapBodyTextKey(p, vk) - } else { - self.compileMapBodyUtextKey(p, vk) - } + if !vk.Implements(encodingTextMarshalerType) { + self.compileMapBodyTextKey(p, vk) + } else { + self.compileMapBodyUtextKey(p, vk) + } } func (self *_Compiler) compileMapBodyTextKey(p *_Program, vk reflect.Type) { - switch vk.Kind() { - case reflect.Invalid : panic("map key is nil") - case reflect.Bool : p.key(_OP_bool) - case reflect.Int : p.key(_OP_int()) - case reflect.Int8 : p.key(_OP_i8) - case reflect.Int16 : p.key(_OP_i16) - case reflect.Int32 : p.key(_OP_i32) - case reflect.Int64 : p.key(_OP_i64) - case reflect.Uint : p.key(_OP_uint()) - case reflect.Uint8 : p.key(_OP_u8) - case reflect.Uint16 : p.key(_OP_u16) - case reflect.Uint32 : p.key(_OP_u32) - case reflect.Uint64 : p.key(_OP_u64) - case reflect.Uintptr : p.key(_OP_uintptr()) - case reflect.Float32 : p.key(_OP_f32) - case reflect.Float64 : p.key(_OP_f64) - case reflect.String : self.compileString(p, vk) - default : panic(error_type(vk)) - } + switch vk.Kind() { + case reflect.Invalid: + panic("map key is nil") + case reflect.Bool: + p.key(_OP_bool) + case reflect.Int: + p.key(_OP_int()) + case reflect.Int8: + p.key(_OP_i8) + case reflect.Int16: + p.key(_OP_i16) + case reflect.Int32: + p.key(_OP_i32) + case reflect.Int64: + p.key(_OP_i64) + case reflect.Uint: + p.key(_OP_uint()) + case reflect.Uint8: + p.key(_OP_u8) + case reflect.Uint16: + p.key(_OP_u16) + case reflect.Uint32: + p.key(_OP_u32) + case reflect.Uint64: + p.key(_OP_u64) + case reflect.Uintptr: + p.key(_OP_uintptr()) + case reflect.Float32: + p.key(_OP_f32) + case reflect.Float64: + p.key(_OP_f64) + case reflect.String: + self.compileString(p, vk) + default: + panic(error_type(vk)) + } } func (self *_Compiler) compileMapBodyUtextKey(p *_Program, vk reflect.Type) { - if vk.Kind() != reflect.Ptr { - p.rtt(_OP_marshal_text, vk) - } else { - self.compileMapBodyUtextPtr(p, vk) - } + if vk.Kind() != reflect.Ptr { + p.rtt(_OP_marshal_text, vk) + } else { + self.compileMapBodyUtextPtr(p, vk) + } } func (self *_Compiler) compileMapBodyUtextPtr(p *_Program, vk reflect.Type) { - i := p.pc() - p.add(_OP_is_nil) - p.rtt(_OP_marshal_text, vk) - j := p.pc() - p.add(_OP_goto) - p.pin(i) - p.str(_OP_text, "\"\"") - p.pin(j) + i := p.pc() + p.add(_OP_is_nil) + p.rtt(_OP_marshal_text, vk) + j := p.pc() + p.add(_OP_goto) + p.pin(i) + p.str(_OP_text, "\"\"") + p.pin(j) } func (self *_Compiler) compileSlice(p *_Program, sp int, vt reflect.Type) { - self.compileNil(p, sp, vt, _OP_empty_arr, self.compileSliceBody) + self.compileNil(p, sp, vt, _OP_empty_arr, self.compileSliceBody) } func (self *_Compiler) compileSliceBody(p *_Program, sp int, vt reflect.Type) { - if isSimpleByte(vt) { - p.add(_OP_bin) - } else { - self.compileSliceArray(p, sp, vt) - } + if isSimpleByte(vt) { + p.add(_OP_bin) + } else { + self.compileSliceArray(p, sp, vt) + } } func (self *_Compiler) compileSliceArray(p *_Program, sp int, vt reflect.Type) { - p.tag(sp) - p.int(_OP_byte, '[') - p.add(_OP_save) - p.add(_OP_slice_len) - i := p.pc() - p.rtt(_OP_slice_next, vt) - self.compileOne(p, sp + 1, vt, true) - j := p.pc() - p.rtt(_OP_slice_next, vt) - p.int(_OP_byte, ',') - self.compileOne(p, sp + 1, vt, true) - p.int(_OP_goto, j) - p.pin(i) - p.pin(j) - p.add(_OP_drop) - p.int(_OP_byte, ']') + p.tag(sp) + p.int(_OP_byte, '[') + p.add(_OP_save) + p.add(_OP_slice_len) + i := p.pc() + p.rtt(_OP_slice_next, vt) + self.compileOne(p, sp+1, vt, true) + j := p.pc() + p.rtt(_OP_slice_next, vt) + p.int(_OP_byte, ',') + self.compileOne(p, sp+1, vt, true) + p.int(_OP_goto, j) + p.pin(i) + p.pin(j) + p.add(_OP_drop) + p.int(_OP_byte, ']') } func (self *_Compiler) compileArray(p *_Program, sp int, vt reflect.Type, nb int) { - p.tag(sp) - p.int(_OP_byte, '[') - p.add(_OP_save) + p.tag(sp) + p.int(_OP_byte, '[') + p.add(_OP_save) - /* first item */ - if nb != 0 { - self.compileOne(p, sp + 1, vt, self.pv) - p.add(_OP_load) - } + /* first item */ + if nb != 0 { + self.compileOne(p, sp+1, vt, self.pv) + p.add(_OP_load) + } - /* remaining items */ - for i := 1; i < nb; i++ { - p.int(_OP_byte, ',') - p.int(_OP_index, i * int(vt.Size())) - self.compileOne(p, sp + 1, vt, self.pv) - p.add(_OP_load) - } + /* remaining items */ + for i := 1; i < nb; i++ { + p.int(_OP_byte, ',') + p.int(_OP_index, i*int(vt.Size())) + self.compileOne(p, sp+1, vt, self.pv) + p.add(_OP_load) + } - /* end of array */ - p.add(_OP_drop) - p.int(_OP_byte, ']') + /* end of array */ + p.add(_OP_drop) + p.int(_OP_byte, ']') } func (self *_Compiler) compileString(p *_Program, vt reflect.Type) { - if vt != jsonNumberType { - p.add(_OP_str) - } else { - p.add(_OP_number) - } + if vt != jsonNumberType { + p.add(_OP_str) + } else { + p.add(_OP_number) + } } func (self *_Compiler) compileStruct(p *_Program, sp int, vt reflect.Type) { - if sp >= self.opts.MaxInlineDepth || p.pc() >= _MAX_ILBUF || (sp > 0 && vt.NumField() >= _MAX_FIELDS) { - p.vp(_OP_recurse, vt, self.pv) - if self.opts.RecursiveDepth > 0 { - if self.pv { - self.rec[vt] = 1 - } else { - self.rec[vt] = 0 - } - } - } else { - self.compileStructBody(p, sp, vt) - } + if sp >= self.opts.MaxInlineDepth || p.pc() >= _MAX_ILBUF || (sp > 0 && vt.NumField() >= _MAX_FIELDS) { + p.vp(_OP_recurse, vt, self.pv) + if self.opts.RecursiveDepth > 0 { + if self.pv { + self.rec[vt] = 1 + } else { + self.rec[vt] = 0 + } + } + } else { + self.compileStructBody(p, sp, vt) + } } func (self *_Compiler) compileStructBody(p *_Program, sp int, vt reflect.Type) { - p.tag(sp) - p.int(_OP_byte, '{') - p.add(_OP_save) - p.add(_OP_cond_set) - - /* compile each field */ - for _, fv := range resolver.ResolveStruct(vt) { - var s []int - var o resolver.Offset - - /* "omitempty" for arrays */ - if fv.Type.Kind() == reflect.Array { - if fv.Type.Len() == 0 && (fv.Opts & resolver.F_omitempty) != 0 { - continue - } - } - - /* index to the field */ - for _, o = range fv.Path { - if p.int(_OP_index, int(o.Size)); o.Kind == resolver.F_deref { - s = append(s, p.pc()) - p.add(_OP_is_nil) - p.add(_OP_deref) - } - } - - /* check for "omitempty" option */ - if fv.Type.Kind() != reflect.Struct && fv.Type.Kind() != reflect.Array && (fv.Opts & resolver.F_omitempty) != 0 { - s = append(s, p.pc()) - self.compileStructFieldZero(p, fv.Type) - } - - /* add the comma if not the first element */ - i := p.pc() - p.add(_OP_cond_testc) - p.int(_OP_byte, ',') - p.pin(i) - - /* compile the key and value */ - ft := fv.Type - p.str(_OP_text, Quote(fv.Name) + ":") - - /* check for "stringnize" option */ - if (fv.Opts & resolver.F_stringize) == 0 { - self.compileOne(p, sp + 1, ft, self.pv) - } else { - self.compileStructFieldStr(p, sp + 1, ft) - } - - /* patch the skipping jumps and reload the struct pointer */ - p.rel(s) - p.add(_OP_load) - } - - /* end of object */ - p.add(_OP_drop) - p.int(_OP_byte, '}') + p.tag(sp) + p.int(_OP_byte, '{') + p.add(_OP_save) + p.add(_OP_cond_set) + + /* compile each field */ + for _, fv := range resolver.ResolveStruct(vt) { + var s []int + var o resolver.Offset + + /* "omitempty" for arrays */ + if fv.Type.Kind() == reflect.Array { + if fv.Type.Len() == 0 && (fv.Opts&resolver.F_omitempty) != 0 { + continue + } + } + + /* index to the field */ + for _, o = range fv.Path { + if p.int(_OP_index, int(o.Size)); o.Kind == resolver.F_deref { + s = append(s, p.pc()) + p.add(_OP_is_nil) + p.add(_OP_deref) + } + } + + /* check for "omitempty" option */ + if fv.Type.Kind() != reflect.Struct && fv.Type.Kind() != reflect.Array && (fv.Opts&resolver.F_omitempty) != 0 { + s = append(s, p.pc()) + self.compileStructFieldZero(p, fv.Type) + } + + /* add the comma if not the first element */ + i := p.pc() + p.add(_OP_cond_testc) + p.int(_OP_byte, ',') + p.pin(i) + + /* compile the key and value */ + ft := fv.Type + p.str(_OP_text, Quote(fv.Name)+":") + + /* check for "stringnize" option */ + if (fv.Opts & resolver.F_stringize) == 0 { + self.compileOne(p, sp+1, ft, self.pv) + } else { + self.compileStructFieldStr(p, sp+1, ft) + } + + /* patch the skipping jumps and reload the struct pointer */ + p.rel(s) + p.add(_OP_load) + } + + /* end of object */ + p.add(_OP_drop) + p.int(_OP_byte, '}') } func (self *_Compiler) compileStructFieldStr(p *_Program, sp int, vt reflect.Type) { - pc := -1 - ft := vt - sv := false - - /* dereference the pointer if needed */ - if ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } - - /* check if it can be stringized */ - switch ft.Kind() { - case reflect.Bool : sv = true - case reflect.Int : sv = true - case reflect.Int8 : sv = true - case reflect.Int16 : sv = true - case reflect.Int32 : sv = true - case reflect.Int64 : sv = true - case reflect.Uint : sv = true - case reflect.Uint8 : sv = true - case reflect.Uint16 : sv = true - case reflect.Uint32 : sv = true - case reflect.Uint64 : sv = true - case reflect.Uintptr : sv = true - case reflect.Float32 : sv = true - case reflect.Float64 : sv = true - case reflect.String : sv = true - } - - /* if it's not, ignore the "string" and follow the regular path */ - if !sv { - self.compileOne(p, sp, vt, self.pv) - return - } - - /* dereference the pointer */ - if vt.Kind() == reflect.Ptr { - pc = p.pc() - vt = vt.Elem() - p.add(_OP_is_nil) - p.add(_OP_deref) - } - - /* special case of a double-quoted string */ - if ft != jsonNumberType && ft.Kind() == reflect.String { - p.add(_OP_quote) - } else { - self.compileStructFieldQuoted(p, sp, vt) - } - - /* the "null" case of the pointer */ - if pc != -1 { - e := p.pc() - p.add(_OP_goto) - p.pin(pc) - p.add(_OP_null) - p.pin(e) - } + pc := -1 + ft := vt + sv := false + + /* dereference the pointer if needed */ + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + /* check if it can be stringized */ + switch ft.Kind() { + case reflect.Bool: + sv = true + case reflect.Int: + sv = true + case reflect.Int8: + sv = true + case reflect.Int16: + sv = true + case reflect.Int32: + sv = true + case reflect.Int64: + sv = true + case reflect.Uint: + sv = true + case reflect.Uint8: + sv = true + case reflect.Uint16: + sv = true + case reflect.Uint32: + sv = true + case reflect.Uint64: + sv = true + case reflect.Uintptr: + sv = true + case reflect.Float32: + sv = true + case reflect.Float64: + sv = true + case reflect.String: + sv = true + } + + /* if it's not, ignore the "string" and follow the regular path */ + if !sv { + self.compileOne(p, sp, vt, self.pv) + return + } + + /* dereference the pointer */ + if vt.Kind() == reflect.Ptr { + pc = p.pc() + vt = vt.Elem() + p.add(_OP_is_nil) + p.add(_OP_deref) + } + + /* special case of a double-quoted string */ + if ft != jsonNumberType && ft.Kind() == reflect.String { + p.add(_OP_quote) + } else { + self.compileStructFieldQuoted(p, sp, vt) + } + + /* the "null" case of the pointer */ + if pc != -1 { + e := p.pc() + p.add(_OP_goto) + p.pin(pc) + p.add(_OP_null) + p.pin(e) + } } func (self *_Compiler) compileStructFieldZero(p *_Program, vt reflect.Type) { - switch vt.Kind() { - case reflect.Bool : p.add(_OP_is_zero_1) - case reflect.Int : p.add(_OP_is_zero_ints()) - case reflect.Int8 : p.add(_OP_is_zero_1) - case reflect.Int16 : p.add(_OP_is_zero_2) - case reflect.Int32 : p.add(_OP_is_zero_4) - case reflect.Int64 : p.add(_OP_is_zero_8) - case reflect.Uint : p.add(_OP_is_zero_ints()) - case reflect.Uint8 : p.add(_OP_is_zero_1) - case reflect.Uint16 : p.add(_OP_is_zero_2) - case reflect.Uint32 : p.add(_OP_is_zero_4) - case reflect.Uint64 : p.add(_OP_is_zero_8) - case reflect.Uintptr : p.add(_OP_is_nil) - case reflect.Float32 : p.add(_OP_is_zero_4) - case reflect.Float64 : p.add(_OP_is_zero_8) - case reflect.String : p.add(_OP_is_nil_p1) - case reflect.Interface : p.add(_OP_is_nil_p1) - case reflect.Map : p.add(_OP_is_zero_map) - case reflect.Ptr : p.add(_OP_is_nil) - case reflect.Slice : p.add(_OP_is_nil_p1) - default : panic(error_type(vt)) - } + switch vt.Kind() { + case reflect.Bool: + p.add(_OP_is_zero_1) + case reflect.Int: + p.add(_OP_is_zero_ints()) + case reflect.Int8: + p.add(_OP_is_zero_1) + case reflect.Int16: + p.add(_OP_is_zero_2) + case reflect.Int32: + p.add(_OP_is_zero_4) + case reflect.Int64: + p.add(_OP_is_zero_8) + case reflect.Uint: + p.add(_OP_is_zero_ints()) + case reflect.Uint8: + p.add(_OP_is_zero_1) + case reflect.Uint16: + p.add(_OP_is_zero_2) + case reflect.Uint32: + p.add(_OP_is_zero_4) + case reflect.Uint64: + p.add(_OP_is_zero_8) + case reflect.Uintptr: + p.add(_OP_is_nil) + case reflect.Float32: + p.add(_OP_is_zero_4) + case reflect.Float64: + p.add(_OP_is_zero_8) + case reflect.String: + p.add(_OP_is_nil_p1) + case reflect.Interface: + p.add(_OP_is_nil_p1) + case reflect.Map: + p.add(_OP_is_zero_map) + case reflect.Ptr: + p.add(_OP_is_nil) + case reflect.Slice: + p.add(_OP_is_nil_p1) + default: + panic(error_type(vt)) + } } func (self *_Compiler) compileStructFieldQuoted(p *_Program, sp int, vt reflect.Type) { - p.int(_OP_byte, '"') - self.compileOne(p, sp, vt, self.pv) - p.int(_OP_byte, '"') + p.int(_OP_byte, '"') + self.compileOne(p, sp, vt, self.pv) + p.int(_OP_byte, '"') } func (self *_Compiler) compileInterface(p *_Program, vt reflect.Type) { - x := p.pc() - p.add(_OP_is_nil_p1) + x := p.pc() + p.add(_OP_is_nil_p1) - /* iface and efaces are different */ - if vt.NumMethod() == 0 { - p.add(_OP_eface) - } else { - p.add(_OP_iface) - } + /* iface and efaces are different */ + if vt.NumMethod() == 0 { + p.add(_OP_eface) + } else { + p.add(_OP_iface) + } - /* the "null" value */ - e := p.pc() - p.add(_OP_goto) - p.pin(x) - p.add(_OP_null) - p.pin(e) + /* the "null" value */ + e := p.pc() + p.add(_OP_goto) + p.pin(x) + p.add(_OP_null) + p.pin(e) } func (self *_Compiler) compileMarshaler(p *_Program, op _Op, vt reflect.Type, mt reflect.Type) { - pc := p.pc() - vk := vt.Kind() - - /* direct receiver */ - if vk != reflect.Ptr { - p.rtt(op, vt) - return - } - - /* value receiver with a pointer type, check for nil before calling the marshaler */ - p.add(_OP_is_nil) - p.rtt(op, vt) - i := p.pc() - p.add(_OP_goto) - p.pin(pc) - p.add(_OP_null) - p.pin(i) + pc := p.pc() + vk := vt.Kind() + + /* direct receiver */ + if vk != reflect.Ptr { + p.rtt(op, vt) + return + } + + /* value receiver with a pointer type, check for nil before calling the marshaler */ + p.add(_OP_is_nil) + p.rtt(op, vt) + i := p.pc() + p.add(_OP_goto) + p.pin(pc) + p.add(_OP_null) + p.pin(i) } diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/debug_go116.go b/vendor/github.com/bytedance/sonic/internal/encoder/debug_go116.go index 4bc9c15c1..66d117925 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/debug_go116.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/debug_go116.go @@ -1,3 +1,4 @@ +//go:build go1.15 && !go1.17 // +build go1.15,!go1.17 /* @@ -19,48 +20,48 @@ package encoder import ( - `os` - `strings` - `runtime` - `runtime/debug` + "os" + "runtime" + "runtime/debug" + "strings" - `github.com/bytedance/sonic/internal/jit` + "github.com/bytedance/sonic/internal/jit" ) var ( - debugSyncGC = os.Getenv("SONIC_SYNC_GC") != "" - debugAsyncGC = os.Getenv("SONIC_NO_ASYNC_GC") == "" + debugSyncGC = os.Getenv("SONIC_SYNC_GC") != "" + debugAsyncGC = os.Getenv("SONIC_NO_ASYNC_GC") == "" ) var ( - _Instr_End _Instr = newInsOp(_OP_null) + _Instr_End _Instr = newInsOp(_OP_null) - _F_gc = jit.Func(runtime.GC) - _F_force_gc = jit.Func(debug.FreeOSMemory) - _F_println = jit.Func(println_wrapper) + _F_gc = jit.Func(runtime.GC) + _F_force_gc = jit.Func(debug.FreeOSMemory) + _F_println = jit.Func(println_wrapper) ) -func println_wrapper(i int, op1 int, op2 int){ - println(i, " Intrs ", op1, _OpNames[op1], "next: ", op2, _OpNames[op2]) +func println_wrapper(i int, op1 int, op2 int) { + println(i, " Intrs ", op1, _OpNames[op1], "next: ", op2, _OpNames[op2]) } func (self *_Assembler) force_gc() { - self.call_go(_F_gc) - self.call_go(_F_force_gc) + self.call_go(_F_gc) + self.call_go(_F_force_gc) } func (self *_Assembler) debug_instr(i int, v *_Instr) { - if debugSyncGC { - if (i+1 == len(self.p)) { - self.print_gc(i, v, &_Instr_End) - } else { - next := &(self.p[i+1]) - self.print_gc(i, v, next) - name := _OpNames[next.op()] - if strings.Contains(name, "save") { - return - } - } - self.force_gc() - } + if debugSyncGC { + if i+1 == len(self.p) { + self.print_gc(i, v, &_Instr_End) + } else { + next := &(self.p[i+1]) + self.print_gc(i, v, next) + name := _OpNames[next.op()] + if strings.Contains(name, "save") { + return + } + } + self.force_gc() + } } diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/debug_go117.go b/vendor/github.com/bytedance/sonic/internal/encoder/debug_go117.go index e1016de32..1a5c71a78 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/debug_go117.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/debug_go117.go @@ -1,3 +1,4 @@ +//go:build go1.17 && !go1.21 // +build go1.17,!go1.21 /* @@ -19,96 +20,96 @@ package encoder import ( - `fmt` - `os` - `runtime` - `strings` - `unsafe` - - `github.com/bytedance/sonic/internal/jit` - `github.com/twitchyliquid64/golang-asm/obj` + "fmt" + "os" + "runtime" + "strings" + "unsafe" + + "github.com/bytedance/sonic/internal/jit" + "github.com/twitchyliquid64/golang-asm/obj" ) const _FP_debug = 128 var ( - debugSyncGC = os.Getenv("SONIC_SYNC_GC") != "" - debugAsyncGC = os.Getenv("SONIC_NO_ASYNC_GC") == "" - debugCheckPtr = os.Getenv("SONIC_CHECK_POINTER") != "" + debugSyncGC = os.Getenv("SONIC_SYNC_GC") != "" + debugAsyncGC = os.Getenv("SONIC_NO_ASYNC_GC") == "" + debugCheckPtr = os.Getenv("SONIC_CHECK_POINTER") != "" ) var ( - _Instr_End = newInsOp(_OP_is_nil) + _Instr_End = newInsOp(_OP_is_nil) - _F_gc = jit.Func(gc) - _F_println = jit.Func(println_wrapper) - _F_print = jit.Func(print) + _F_gc = jit.Func(gc) + _F_println = jit.Func(println_wrapper) + _F_print = jit.Func(print) ) func (self *_Assembler) dsave(r ...obj.Addr) { - for i, v := range r { - if i > _FP_debug / 8 - 1 { - panic("too many registers to save") - } else { - self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + _FP_saves + _FP_locals + int64(i) * 8)) - } - } + for i, v := range r { + if i > _FP_debug/8-1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs+_FP_saves+_FP_locals+int64(i)*8)) + } + } } func (self *_Assembler) dload(r ...obj.Addr) { - for i, v := range r { - if i > _FP_debug / 8 - 1 { - panic("too many registers to load") - } else { - self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + _FP_saves + _FP_locals + int64(i) * 8), v) - } - } + for i, v := range r { + if i > _FP_debug/8-1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs+_FP_saves+_FP_locals+int64(i)*8), v) + } + } } -func println_wrapper(i int, op1 int, op2 int){ - println(i, " Intrs ", op1, _OpNames[op1], "next: ", op2, _OpNames[op2]) +func println_wrapper(i int, op1 int, op2 int) { + println(i, " Intrs ", op1, _OpNames[op1], "next: ", op2, _OpNames[op2]) } -func print(i int){ - println(i) +func print(i int) { + println(i) } func gc() { - if !debugSyncGC { - return - } - runtime.GC() - // debug.FreeOSMemory() + if !debugSyncGC { + return + } + runtime.GC() + // debug.FreeOSMemory() } func (self *_Assembler) dcall(fn obj.Addr) { - self.Emit("MOVQ", fn, _R10) // MOVQ ${fn}, R10 - self.Rjmp("CALL", _R10) // CALL R10 + self.Emit("MOVQ", fn, _R10) // MOVQ ${fn}, R10 + self.Rjmp("CALL", _R10) // CALL R10 } func (self *_Assembler) debug_gc() { - if !debugSyncGC { - return - } - self.dsave(_REG_debug...) - self.dcall(_F_gc) - self.dload(_REG_debug...) + if !debugSyncGC { + return + } + self.dsave(_REG_debug...) + self.dcall(_F_gc) + self.dload(_REG_debug...) } func (self *_Assembler) debug_instr(i int, v *_Instr) { - if debugSyncGC { - if i+1 == len(self.p) { - self.print_gc(i, v, &_Instr_End) - } else { - next := &(self.p[i+1]) - self.print_gc(i, v, next) - name := _OpNames[next.op()] - if strings.Contains(name, "save") { - return - } - } - // self.debug_gc() - } + if debugSyncGC { + if i+1 == len(self.p) { + self.print_gc(i, v, &_Instr_End) + } else { + next := &(self.p[i+1]) + self.print_gc(i, v, next) + name := _OpNames[next.op()] + if strings.Contains(name, "save") { + return + } + } + // self.debug_gc() + } } //go:noescape @@ -120,86 +121,86 @@ func checkptrBase(p unsafe.Pointer) uintptr func findObject(p, refBase, refOff uintptr) (base uintptr, s unsafe.Pointer, objIndex uintptr) var ( - _F_checkptr = jit.Func(checkptr) - _F_printptr = jit.Func(printptr) + _F_checkptr = jit.Func(checkptr) + _F_printptr = jit.Func(printptr) ) var ( - _R10 = jit.Reg("R10") + _R10 = jit.Reg("R10") ) -var _REG_debug = []obj.Addr { - jit.Reg("AX"), - jit.Reg("BX"), - jit.Reg("CX"), - jit.Reg("DX"), - jit.Reg("DI"), - jit.Reg("SI"), - jit.Reg("BP"), - jit.Reg("SP"), - jit.Reg("R8"), - jit.Reg("R9"), - jit.Reg("R10"), - jit.Reg("R11"), - jit.Reg("R12"), - jit.Reg("R13"), - jit.Reg("R14"), - jit.Reg("R15"), +var _REG_debug = []obj.Addr{ + jit.Reg("AX"), + jit.Reg("BX"), + jit.Reg("CX"), + jit.Reg("DX"), + jit.Reg("DI"), + jit.Reg("SI"), + jit.Reg("BP"), + jit.Reg("SP"), + jit.Reg("R8"), + jit.Reg("R9"), + jit.Reg("R10"), + jit.Reg("R11"), + jit.Reg("R12"), + jit.Reg("R13"), + jit.Reg("R14"), + jit.Reg("R15"), } func checkptr(ptr uintptr) { - if ptr == 0 { - return - } - fmt.Printf("pointer: %x\n", ptr) - f := checkptrBase(unsafe.Pointer(uintptr(ptr))) - if f == 0 { - fmt.Printf("! unknown-based pointer: %x\n", ptr) - } else if f == 1 { - fmt.Printf("! stack pointer: %x\n", ptr) - } else { - fmt.Printf("base: %x\n", f) - } - findobj(ptr) + if ptr == 0 { + return + } + fmt.Printf("pointer: %x\n", ptr) + f := checkptrBase(unsafe.Pointer(uintptr(ptr))) + if f == 0 { + fmt.Printf("! unknown-based pointer: %x\n", ptr) + } else if f == 1 { + fmt.Printf("! stack pointer: %x\n", ptr) + } else { + fmt.Printf("base: %x\n", f) + } + findobj(ptr) } func findobj(ptr uintptr) { - base, s, objIndex := findObject(ptr, 0, 0) - if s != nil && base == 0 { - fmt.Printf("! invalid pointer: %x\n", ptr) - } - fmt.Printf("objIndex: %d\n", objIndex) + base, s, objIndex := findObject(ptr, 0, 0) + if s != nil && base == 0 { + fmt.Printf("! invalid pointer: %x\n", ptr) + } + fmt.Printf("objIndex: %d\n", objIndex) } func (self *_Assembler) check_ptr(ptr obj.Addr, lea bool) { - if !debugCheckPtr { - return - } - - self.dsave(_REG_debug...) - if lea { - self.Emit("LEAQ", ptr, _R10) - } else { - self.Emit("MOVQ", ptr, _R10) - } - self.Emit("MOVQ", _R10, jit.Ptr(_SP, 0)) - self.dcall(_F_checkptr) - self.dload(_REG_debug...) + if !debugCheckPtr { + return + } + + self.dsave(_REG_debug...) + if lea { + self.Emit("LEAQ", ptr, _R10) + } else { + self.Emit("MOVQ", ptr, _R10) + } + self.Emit("MOVQ", _R10, jit.Ptr(_SP, 0)) + self.dcall(_F_checkptr) + self.dload(_REG_debug...) } func printptr(i int, ptr uintptr) { - fmt.Printf("[%d] ptr: %x\n", i, ptr) + fmt.Printf("[%d] ptr: %x\n", i, ptr) } func (self *_Assembler) print_ptr(i int, ptr obj.Addr, lea bool) { - self.dsave(_REG_debug...) - if lea { - self.Emit("LEAQ", ptr, _R10) - } else { - self.Emit("MOVQ", ptr, _R10) - } - - self.Emit("MOVQ", jit.Imm(int64(i)), _AX) - self.Emit("MOVQ", _R10, _BX) - self.dcall(_F_printptr) - self.dload(_REG_debug...) -} \ No newline at end of file + self.dsave(_REG_debug...) + if lea { + self.Emit("LEAQ", ptr, _R10) + } else { + self.Emit("MOVQ", ptr, _R10) + } + + self.Emit("MOVQ", jit.Imm(int64(i)), _AX) + self.Emit("MOVQ", _R10, _BX) + self.dcall(_F_printptr) + self.dload(_REG_debug...) +} diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/encoder.go b/vendor/github.com/bytedance/sonic/internal/encoder/encoder.go index 3c46061a4..d8e0a696e 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/encoder.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/encoder.go @@ -17,219 +17,219 @@ package encoder import ( - `bytes` - `encoding/json` - `reflect` - `runtime` - `unsafe` - - `github.com/bytedance/sonic/internal/native` - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/internal/rt` - `github.com/bytedance/sonic/utf8` - `github.com/bytedance/sonic/option` + "bytes" + "encoding/json" + "reflect" + "runtime" + "unsafe" + + "github.com/bytedance/sonic/internal/native" + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/internal/rt" + "github.com/bytedance/sonic/option" + "github.com/bytedance/sonic/utf8" ) // Options is a set of encoding options. type Options uint64 const ( - bitSortMapKeys = iota - bitEscapeHTML - bitCompactMarshaler - bitNoQuoteTextMarshaler - bitNoNullSliceOrMap - bitValidateString - - // used for recursive compile - bitPointerValue = 63 + bitSortMapKeys = iota + bitEscapeHTML + bitCompactMarshaler + bitNoQuoteTextMarshaler + bitNoNullSliceOrMap + bitValidateString + + // used for recursive compile + bitPointerValue = 63 ) const ( - // SortMapKeys indicates that the keys of a map needs to be sorted - // before serializing into JSON. - // WARNING: This hurts performance A LOT, USE WITH CARE. - SortMapKeys Options = 1 << bitSortMapKeys - - // EscapeHTML indicates encoder to escape all HTML characters - // after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape). - // WARNING: This hurts performance A LOT, USE WITH CARE. - EscapeHTML Options = 1 << bitEscapeHTML - - // CompactMarshaler indicates that the output JSON from json.Marshaler - // is always compact and needs no validation - CompactMarshaler Options = 1 << bitCompactMarshaler - - // NoQuoteTextMarshaler indicates that the output text from encoding.TextMarshaler - // is always escaped string and needs no quoting - NoQuoteTextMarshaler Options = 1 << bitNoQuoteTextMarshaler - - // NoNullSliceOrMap indicates all empty Array or Object are encoded as '[]' or '{}', - // instead of 'null' - NoNullSliceOrMap Options = 1 << bitNoNullSliceOrMap - - // ValidateString indicates that encoder should validate the input string - // before encoding it into JSON. - ValidateString Options = 1 << bitValidateString - - // CompatibleWithStd is used to be compatible with std encoder. - CompatibleWithStd Options = SortMapKeys | EscapeHTML | CompactMarshaler + // SortMapKeys indicates that the keys of a map needs to be sorted + // before serializing into JSON. + // WARNING: This hurts performance A LOT, USE WITH CARE. + SortMapKeys Options = 1 << bitSortMapKeys + + // EscapeHTML indicates encoder to escape all HTML characters + // after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape). + // WARNING: This hurts performance A LOT, USE WITH CARE. + EscapeHTML Options = 1 << bitEscapeHTML + + // CompactMarshaler indicates that the output JSON from json.Marshaler + // is always compact and needs no validation + CompactMarshaler Options = 1 << bitCompactMarshaler + + // NoQuoteTextMarshaler indicates that the output text from encoding.TextMarshaler + // is always escaped string and needs no quoting + NoQuoteTextMarshaler Options = 1 << bitNoQuoteTextMarshaler + + // NoNullSliceOrMap indicates all empty Array or Object are encoded as '[]' or '{}', + // instead of 'null' + NoNullSliceOrMap Options = 1 << bitNoNullSliceOrMap + + // ValidateString indicates that encoder should validate the input string + // before encoding it into JSON. + ValidateString Options = 1 << bitValidateString + + // CompatibleWithStd is used to be compatible with std encoder. + CompatibleWithStd Options = SortMapKeys | EscapeHTML | CompactMarshaler ) // Encoder represents a specific set of encoder configurations. type Encoder struct { - Opts Options - prefix string - indent string + Opts Options + prefix string + indent string } // Encode returns the JSON encoding of v. func (self *Encoder) Encode(v interface{}) ([]byte, error) { - if self.indent != "" || self.prefix != "" { - return EncodeIndented(v, self.prefix, self.indent, self.Opts) - } - return Encode(v, self.Opts) + if self.indent != "" || self.prefix != "" { + return EncodeIndented(v, self.prefix, self.indent, self.Opts) + } + return Encode(v, self.Opts) } // SortKeys enables the SortMapKeys option. func (self *Encoder) SortKeys() *Encoder { - self.Opts |= SortMapKeys - return self + self.Opts |= SortMapKeys + return self } // SetEscapeHTML specifies if option EscapeHTML opens func (self *Encoder) SetEscapeHTML(f bool) { - if f { - self.Opts |= EscapeHTML - } else { - self.Opts &= ^EscapeHTML - } + if f { + self.Opts |= EscapeHTML + } else { + self.Opts &= ^EscapeHTML + } } // SetValidateString specifies if option ValidateString opens func (self *Encoder) SetValidateString(f bool) { - if f { - self.Opts |= ValidateString - } else { - self.Opts &= ^ValidateString - } + if f { + self.Opts |= ValidateString + } else { + self.Opts &= ^ValidateString + } } // SetCompactMarshaler specifies if option CompactMarshaler opens func (self *Encoder) SetCompactMarshaler(f bool) { - if f { - self.Opts |= CompactMarshaler - } else { - self.Opts &= ^CompactMarshaler - } + if f { + self.Opts |= CompactMarshaler + } else { + self.Opts &= ^CompactMarshaler + } } // SetNoQuoteTextMarshaler specifies if option NoQuoteTextMarshaler opens func (self *Encoder) SetNoQuoteTextMarshaler(f bool) { - if f { - self.Opts |= NoQuoteTextMarshaler - } else { - self.Opts &= ^NoQuoteTextMarshaler - } + if f { + self.Opts |= NoQuoteTextMarshaler + } else { + self.Opts &= ^NoQuoteTextMarshaler + } } // SetIndent instructs the encoder to format each subsequent encoded // value as if indented by the package-level function EncodeIndent(). // Calling SetIndent("", "") disables indentation. func (enc *Encoder) SetIndent(prefix, indent string) { - enc.prefix = prefix - enc.indent = indent + enc.prefix = prefix + enc.indent = indent } // Quote returns the JSON-quoted version of s. func Quote(s string) string { - var n int - var p []byte + var n int + var p []byte - /* check for empty string */ - if s == "" { - return `""` - } + /* check for empty string */ + if s == "" { + return `""` + } - /* allocate space for result */ - n = len(s) + 2 - p = make([]byte, 0, n) + /* allocate space for result */ + n = len(s) + 2 + p = make([]byte, 0, n) - /* call the encoder */ - _ = encodeString(&p, s) - return rt.Mem2Str(p) + /* call the encoder */ + _ = encodeString(&p, s) + return rt.Mem2Str(p) } // Encode returns the JSON encoding of val, encoded with opts. func Encode(val interface{}, opts Options) ([]byte, error) { - var ret []byte - - buf := newBytes() - err := encodeInto(&buf, val, opts) - - /* check for errors */ - if err != nil { - freeBytes(buf) - return nil, err - } - - /* htmlescape or correct UTF-8 if opts enable */ - old := buf - buf = encodeFinish(old, opts) - pbuf := ((*rt.GoSlice)(unsafe.Pointer(&buf))).Ptr - pold := ((*rt.GoSlice)(unsafe.Pointer(&old))).Ptr - - /* return when allocated a new buffer */ - if pbuf != pold { - freeBytes(old) - return buf, nil - } - - /* make a copy of the result */ - ret = make([]byte, len(buf)) - copy(ret, buf) - - freeBytes(buf) - /* return the buffer into pool */ - return ret, nil + var ret []byte + + buf := newBytes() + err := encodeInto(&buf, val, opts) + + /* check for errors */ + if err != nil { + freeBytes(buf) + return nil, err + } + + /* htmlescape or correct UTF-8 if opts enable */ + old := buf + buf = encodeFinish(old, opts) + pbuf := ((*rt.GoSlice)(unsafe.Pointer(&buf))).Ptr + pold := ((*rt.GoSlice)(unsafe.Pointer(&old))).Ptr + + /* return when allocated a new buffer */ + if pbuf != pold { + freeBytes(old) + return buf, nil + } + + /* make a copy of the result */ + ret = make([]byte, len(buf)) + copy(ret, buf) + + freeBytes(buf) + /* return the buffer into pool */ + return ret, nil } // EncodeInto is like Encode but uses a user-supplied buffer instead of allocating // a new one. func EncodeInto(buf *[]byte, val interface{}, opts Options) error { - err := encodeInto(buf, val, opts) - if err != nil { - return err - } - *buf = encodeFinish(*buf, opts) - return err + err := encodeInto(buf, val, opts) + if err != nil { + return err + } + *buf = encodeFinish(*buf, opts) + return err } func encodeInto(buf *[]byte, val interface{}, opts Options) error { - stk := newStack() - efv := rt.UnpackEface(val) - err := encodeTypedPointer(buf, efv.Type, &efv.Value, stk, uint64(opts)) - - /* return the stack into pool */ - if err != nil { - resetStack(stk) - } - freeStack(stk) - - /* avoid GC ahead */ - runtime.KeepAlive(buf) - runtime.KeepAlive(efv) - return err + stk := newStack() + efv := rt.UnpackEface(val) + err := encodeTypedPointer(buf, efv.Type, &efv.Value, stk, uint64(opts)) + + /* return the stack into pool */ + if err != nil { + resetStack(stk) + } + freeStack(stk) + + /* avoid GC ahead */ + runtime.KeepAlive(buf) + runtime.KeepAlive(efv) + return err } func encodeFinish(buf []byte, opts Options) []byte { - if opts & EscapeHTML != 0 { - buf = HTMLEscape(nil, buf) - } - if opts & ValidateString != 0 && !utf8.Validate(buf) { - buf = utf8.CorrectWith(nil, buf, `\ufffd`) - } - return buf + if opts&EscapeHTML != 0 { + buf = HTMLEscape(nil, buf) + } + if opts&ValidateString != 0 && !utf8.Validate(buf) { + buf = utf8.CorrectWith(nil, buf, `\ufffd`) + } + return buf } var typeByte = rt.UnpackType(reflect.TypeOf(byte(0))) @@ -241,46 +241,46 @@ var typeByte = rt.UnpackType(reflect.TypeOf(byte(0))) // escaping within <script> tags, so an alternative JSON encoding must // be used. func HTMLEscape(dst []byte, src []byte) []byte { - return htmlEscape(dst, src) + return htmlEscape(dst, src) } // EncodeIndented is like Encode but applies Indent to format the output. // Each JSON element in the output will begin on a new line beginning with prefix // followed by one or more copies of indent according to the indentation nesting. func EncodeIndented(val interface{}, prefix string, indent string, opts Options) ([]byte, error) { - var err error - var out []byte - var buf *bytes.Buffer - - /* encode into the buffer */ - out = newBytes() - err = EncodeInto(&out, val, opts) - - /* check for errors */ - if err != nil { - freeBytes(out) - return nil, err - } - - /* indent the JSON */ - buf = newBuffer() - err = json.Indent(buf, out, prefix, indent) - - /* check for errors */ - if err != nil { - freeBytes(out) - freeBuffer(buf) - return nil, err - } - - /* copy to the result buffer */ - ret := make([]byte, buf.Len()) - copy(ret, buf.Bytes()) - - /* return the buffers into pool */ - freeBytes(out) - freeBuffer(buf) - return ret, nil + var err error + var out []byte + var buf *bytes.Buffer + + /* encode into the buffer */ + out = newBytes() + err = EncodeInto(&out, val, opts) + + /* check for errors */ + if err != nil { + freeBytes(out) + return nil, err + } + + /* indent the JSON */ + buf = newBuffer() + err = json.Indent(buf, out, prefix, indent) + + /* check for errors */ + if err != nil { + freeBytes(out) + freeBuffer(buf) + return nil, err + } + + /* copy to the result buffer */ + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + + /* return the buffers into pool */ + freeBytes(out) + freeBuffer(buf) + return ret, nil } // Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in @@ -289,12 +289,12 @@ func EncodeIndented(val interface{}, prefix string, indent string, opts Options) // Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is // a compile option to set the depth of recursive compile for the nested struct type. func Pretouch(vt reflect.Type, opts ...option.CompileOption) error { - cfg := option.DefaultCompileOptions() - for _, opt := range opts { - opt(&cfg) - break - } - return pretouchRec(map[reflect.Type]uint8{vt: 0}, cfg) + cfg := option.DefaultCompileOptions() + for _, opt := range opts { + opt(&cfg) + break + } + return pretouchRec(map[reflect.Type]uint8{vt: 0}, cfg) } // Valid validates json and returns first non-blank character position, @@ -303,26 +303,26 @@ func Pretouch(vt reflect.Type, opts ...option.CompileOption) error { // // Note: it does not check for the invalid UTF-8 characters. func Valid(data []byte) (ok bool, start int) { - n := len(data) - if n == 0 { - return false, -1 - } - s := rt.Mem2Str(data) - p := 0 - m := types.NewStateMachine() - ret := native.ValidateOne(&s, &p, m) - types.FreeStateMachine(m) - - if ret < 0 { - return false, p-1 - } - - /* check for trailing spaces */ - for ;p < n; p++ { - if (types.SPACE_MASK & (1 << data[p])) == 0 { - return false, p - } - } - - return true, ret -} \ No newline at end of file + n := len(data) + if n == 0 { + return false, -1 + } + s := rt.Mem2Str(data) + p := 0 + m := types.NewStateMachine() + ret := native.ValidateOne(&s, &p, m) + types.FreeStateMachine(m) + + if ret < 0 { + return false, p - 1 + } + + /* check for trailing spaces */ + for ; p < n; p++ { + if (types.SPACE_MASK & (1 << data[p])) == 0 { + return false, p + } + } + + return true, ret +} diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/errors.go b/vendor/github.com/bytedance/sonic/internal/encoder/errors.go index ac6848a5b..96d1cc5db 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/errors.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/errors.go @@ -17,49 +17,49 @@ package encoder import ( - `encoding/json` - `fmt` - `reflect` - `strconv` - `unsafe` + "encoding/json" + "fmt" + "reflect" + "strconv" + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) -var _ERR_too_deep = &json.UnsupportedValueError { - Str : "Value nesting too deep", - Value : reflect.ValueOf("..."), +var _ERR_too_deep = &json.UnsupportedValueError{ + Str: "Value nesting too deep", + Value: reflect.ValueOf("..."), } -var _ERR_nan_or_infinite = &json.UnsupportedValueError { - Str : "NaN or ±Infinite", - Value : reflect.ValueOf("NaN or ±Infinite"), +var _ERR_nan_or_infinite = &json.UnsupportedValueError{ + Str: "NaN or ±Infinite", + Value: reflect.ValueOf("NaN or ±Infinite"), } func error_type(vtype reflect.Type) error { - return &json.UnsupportedTypeError{Type: vtype} + return &json.UnsupportedTypeError{Type: vtype} } func error_number(number json.Number) error { - return &json.UnsupportedValueError { - Str : "invalid number literal: " + strconv.Quote(string(number)), - Value : reflect.ValueOf(number), - } + return &json.UnsupportedValueError{ + Str: "invalid number literal: " + strconv.Quote(string(number)), + Value: reflect.ValueOf(number), + } } func error_marshaler(ret []byte, pos int) error { - return fmt.Errorf("invalid Marshaler output json syntax at %d: %q", pos, ret) + return fmt.Errorf("invalid Marshaler output json syntax at %d: %q", pos, ret) } const ( - panicNilPointerOfNonEmptyString int = 1 + iota + panicNilPointerOfNonEmptyString int = 1 + iota ) func goPanic(code int, val unsafe.Pointer) { - switch(code){ - case panicNilPointerOfNonEmptyString: - panic(fmt.Sprintf("val: %#v has nil pointer while its length is not zero!", (*rt.GoString)(val))) - default: - panic("encoder error!") - } -} \ No newline at end of file + switch code { + case panicNilPointerOfNonEmptyString: + panic(fmt.Sprintf("val: %#v has nil pointer while its length is not zero!", (*rt.GoString)(val))) + default: + panic("encoder error!") + } +} diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/mapiter.go b/vendor/github.com/bytedance/sonic/internal/encoder/mapiter.go index 8a322b3af..48c372870 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/mapiter.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/mapiter.go @@ -27,173 +27,197 @@ import ( ) type _MapPair struct { - k string // when the map key is integer, k is pointed to m - v unsafe.Pointer - m [32]byte + k string // when the map key is integer, k is pointed to m + v unsafe.Pointer + m [32]byte } type _MapIterator struct { - it rt.GoMapIterator // must be the first field - kv rt.GoSlice // slice of _MapPair - ki int + it rt.GoMapIterator // must be the first field + kv rt.GoSlice // slice of _MapPair + ki int } var ( - iteratorPool = sync.Pool{} - iteratorPair = rt.UnpackType(reflect.TypeOf(_MapPair{})) + iteratorPool = sync.Pool{} + iteratorPair = rt.UnpackType(reflect.TypeOf(_MapPair{})) ) func init() { - if unsafe.Offsetof(_MapIterator{}.it) != 0 { - panic("_MapIterator.it is not the first field") - } + if unsafe.Offsetof(_MapIterator{}.it) != 0 { + panic("_MapIterator.it is not the first field") + } } - func newIterator() *_MapIterator { - if v := iteratorPool.Get(); v == nil { - return new(_MapIterator) - } else { - return resetIterator(v.(*_MapIterator)) - } + if v := iteratorPool.Get(); v == nil { + return new(_MapIterator) + } else { + return resetIterator(v.(*_MapIterator)) + } } func resetIterator(p *_MapIterator) *_MapIterator { - p.ki = 0 - p.it = rt.GoMapIterator{} - p.kv.Len = 0 - return p + p.ki = 0 + p.it = rt.GoMapIterator{} + p.kv.Len = 0 + return p } func (self *_MapIterator) at(i int) *_MapPair { - return (*_MapPair)(unsafe.Pointer(uintptr(self.kv.Ptr) + uintptr(i) * unsafe.Sizeof(_MapPair{}))) + return (*_MapPair)(unsafe.Pointer(uintptr(self.kv.Ptr) + uintptr(i)*unsafe.Sizeof(_MapPair{}))) } func (self *_MapIterator) add() (p *_MapPair) { - p = self.at(self.kv.Len) - self.kv.Len++ - return + p = self.at(self.kv.Len) + self.kv.Len++ + return } func (self *_MapIterator) data() (p []_MapPair) { - *(*rt.GoSlice)(unsafe.Pointer(&p)) = self.kv - return + *(*rt.GoSlice)(unsafe.Pointer(&p)) = self.kv + return } func (self *_MapIterator) append(t *rt.GoType, k unsafe.Pointer, v unsafe.Pointer) (err error) { - p := self.add() - p.v = v + p := self.add() + p.v = v - /* check for strings */ - if tk := t.Kind(); tk != reflect.String { - return self.appendGeneric(p, t, tk, k) - } + /* check for strings */ + if tk := t.Kind(); tk != reflect.String { + return self.appendGeneric(p, t, tk, k) + } - /* fast path for strings */ - p.k = *(*string)(k) - return nil + /* fast path for strings */ + p.k = *(*string)(k) + return nil } func (self *_MapIterator) appendGeneric(p *_MapPair, t *rt.GoType, v reflect.Kind, k unsafe.Pointer) error { - switch v { - case reflect.Int : p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], int64(*(*int)(k)))]) ; return nil - case reflect.Int8 : p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], int64(*(*int8)(k)))]) ; return nil - case reflect.Int16 : p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], int64(*(*int16)(k)))]) ; return nil - case reflect.Int32 : p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], int64(*(*int32)(k)))]) ; return nil - case reflect.Int64 : p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], *(*int64)(k))]) ; return nil - case reflect.Uint : p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uint)(k)))]) ; return nil - case reflect.Uint8 : p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uint8)(k)))]) ; return nil - case reflect.Uint16 : p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uint16)(k)))]) ; return nil - case reflect.Uint32 : p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uint32)(k)))]) ; return nil - case reflect.Uint64 : p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], *(*uint64)(k))]) ; return nil - case reflect.Uintptr : p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uintptr)(k)))]) ; return nil - case reflect.Interface : return self.appendInterface(p, t, k) - case reflect.Struct, reflect.Ptr : return self.appendConcrete(p, t, k) - default : panic("unexpected map key type") - } + switch v { + case reflect.Int: + p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], int64(*(*int)(k)))]) + return nil + case reflect.Int8: + p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], int64(*(*int8)(k)))]) + return nil + case reflect.Int16: + p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], int64(*(*int16)(k)))]) + return nil + case reflect.Int32: + p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], int64(*(*int32)(k)))]) + return nil + case reflect.Int64: + p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], *(*int64)(k))]) + return nil + case reflect.Uint: + p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uint)(k)))]) + return nil + case reflect.Uint8: + p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uint8)(k)))]) + return nil + case reflect.Uint16: + p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uint16)(k)))]) + return nil + case reflect.Uint32: + p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uint32)(k)))]) + return nil + case reflect.Uint64: + p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], *(*uint64)(k))]) + return nil + case reflect.Uintptr: + p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uintptr)(k)))]) + return nil + case reflect.Interface: + return self.appendInterface(p, t, k) + case reflect.Struct, reflect.Ptr: + return self.appendConcrete(p, t, k) + default: + panic("unexpected map key type") + } } func (self *_MapIterator) appendConcrete(p *_MapPair, t *rt.GoType, k unsafe.Pointer) (err error) { - // compiler has already checked that the type implements the encoding.MarshalText interface - if !t.Indirect() { - k = *(*unsafe.Pointer)(k) - } - eface := rt.GoEface{Value: k, Type: t}.Pack() - out, err := eface.(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - p.k = rt.Mem2Str(out) - return + // compiler has already checked that the type implements the encoding.MarshalText interface + if !t.Indirect() { + k = *(*unsafe.Pointer)(k) + } + eface := rt.GoEface{Value: k, Type: t}.Pack() + out, err := eface.(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + p.k = rt.Mem2Str(out) + return } func (self *_MapIterator) appendInterface(p *_MapPair, t *rt.GoType, k unsafe.Pointer) (err error) { - if len(rt.IfaceType(t).Methods) == 0 { - panic("unexpected map key type") - } else if p.k, err = asText(k); err == nil { - return nil - } else { - return - } + if len(rt.IfaceType(t).Methods) == 0 { + panic("unexpected map key type") + } else if p.k, err = asText(k); err == nil { + return nil + } else { + return + } } func iteratorStop(p *_MapIterator) { - iteratorPool.Put(p) + iteratorPool.Put(p) } func iteratorNext(p *_MapIterator) { - i := p.ki - t := &p.it - - /* check for unordered iteration */ - if i < 0 { - mapiternext(t) - return - } - - /* check for end of iteration */ - if p.ki >= p.kv.Len { - t.K = nil - t.V = nil - return - } - - /* update the key-value pair, and increase the pointer */ - t.K = unsafe.Pointer(&p.at(p.ki).k) - t.V = p.at(p.ki).v - p.ki++ + i := p.ki + t := &p.it + + /* check for unordered iteration */ + if i < 0 { + mapiternext(t) + return + } + + /* check for end of iteration */ + if p.ki >= p.kv.Len { + t.K = nil + t.V = nil + return + } + + /* update the key-value pair, and increase the pointer */ + t.K = unsafe.Pointer(&p.at(p.ki).k) + t.V = p.at(p.ki).v + p.ki++ } func iteratorStart(t *rt.GoMapType, m *rt.GoMap, fv uint64) (*_MapIterator, error) { - it := newIterator() - mapiterinit(t, m, &it.it) - - /* check for key-sorting, empty map don't need sorting */ - if m.Count == 0 || (fv & uint64(SortMapKeys)) == 0 { - it.ki = -1 - return it, nil - } - - /* pre-allocate space if needed */ - if m.Count > it.kv.Cap { - it.kv = growslice(iteratorPair, it.kv, m.Count) - } - - /* dump all the key-value pairs */ - for ; it.it.K != nil; mapiternext(&it.it) { - if err := it.append(t.Key, it.it.K, it.it.V); err != nil { - iteratorStop(it) - return nil, err - } - } - - /* sort the keys, map with only 1 item don't need sorting */ - if it.ki = 1; m.Count > 1 { - radixQsort(it.data(), 0, maxDepth(it.kv.Len)) - } - - /* load the first pair into iterator */ - it.it.V = it.at(0).v - it.it.K = unsafe.Pointer(&it.at(0).k) - return it, nil + it := newIterator() + mapiterinit(t, m, &it.it) + + /* check for key-sorting, empty map don't need sorting */ + if m.Count == 0 || (fv&uint64(SortMapKeys)) == 0 { + it.ki = -1 + return it, nil + } + + /* pre-allocate space if needed */ + if m.Count > it.kv.Cap { + it.kv = growslice(iteratorPair, it.kv, m.Count) + } + + /* dump all the key-value pairs */ + for ; it.it.K != nil; mapiternext(&it.it) { + if err := it.append(t.Key, it.it.K, it.it.V); err != nil { + iteratorStop(it) + return nil, err + } + } + + /* sort the keys, map with only 1 item don't need sorting */ + if it.ki = 1; m.Count > 1 { + radixQsort(it.data(), 0, maxDepth(it.kv.Len)) + } + + /* load the first pair into iterator */ + it.it.V = it.at(0).v + it.it.K = unsafe.Pointer(&it.at(0).k) + return it, nil } diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/pools.go b/vendor/github.com/bytedance/sonic/internal/encoder/pools.go index 9892ba11b..84352705a 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/pools.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/pools.go @@ -17,177 +17,177 @@ package encoder import ( - `bytes` - `sync` - `unsafe` - `errors` - `reflect` - - `github.com/bytedance/sonic/internal/caching` - `github.com/bytedance/sonic/option` - `github.com/bytedance/sonic/internal/rt` + "bytes" + "errors" + "reflect" + "sync" + "unsafe" + + "github.com/bytedance/sonic/internal/caching" + "github.com/bytedance/sonic/internal/rt" + "github.com/bytedance/sonic/option" ) const ( - _MaxStack = 4096 // 4k states + _MaxStack = 4096 // 4k states - _StackSize = unsafe.Sizeof(_Stack{}) + _StackSize = unsafe.Sizeof(_Stack{}) ) var ( - bytesPool = sync.Pool{} - stackPool = sync.Pool{} - bufferPool = sync.Pool{} - programCache = caching.CreateProgramCache() + bytesPool = sync.Pool{} + stackPool = sync.Pool{} + bufferPool = sync.Pool{} + programCache = caching.CreateProgramCache() ) type _State struct { - x int - f uint64 - p unsafe.Pointer - q unsafe.Pointer + x int + f uint64 + p unsafe.Pointer + q unsafe.Pointer } type _Stack struct { - sp uint64 - sb [_MaxStack]_State + sp uint64 + sb [_MaxStack]_State } type _Encoder func( - rb *[]byte, - vp unsafe.Pointer, - sb *_Stack, - fv uint64, + rb *[]byte, + vp unsafe.Pointer, + sb *_Stack, + fv uint64, ) error var _KeepAlive struct { - rb *[]byte - vp unsafe.Pointer - sb *_Stack - fv uint64 - err error - frame [_FP_offs]byte + rb *[]byte + vp unsafe.Pointer + sb *_Stack + fv uint64 + err error + frame [_FP_offs]byte } var errCallShadow = errors.New("DON'T CALL THIS!") // Faker func of _Encoder, used to export its stackmap as _Encoder's func _Encoder_Shadow(rb *[]byte, vp unsafe.Pointer, sb *_Stack, fv uint64) (err error) { - // align to assembler_amd64.go: _FP_offs - var frame [_FP_offs]byte - - // must keep all args and frames noticeable to GC - _KeepAlive.rb = rb - _KeepAlive.vp = vp - _KeepAlive.sb = sb - _KeepAlive.fv = fv - _KeepAlive.err = err - _KeepAlive.frame = frame - - return errCallShadow + // align to assembler_amd64.go: _FP_offs + var frame [_FP_offs]byte + + // must keep all args and frames noticeable to GC + _KeepAlive.rb = rb + _KeepAlive.vp = vp + _KeepAlive.sb = sb + _KeepAlive.fv = fv + _KeepAlive.err = err + _KeepAlive.frame = frame + + return errCallShadow } func newBytes() []byte { - if ret := bytesPool.Get(); ret != nil { - return ret.([]byte) - } else { - return make([]byte, 0, option.DefaultEncoderBufferSize) - } + if ret := bytesPool.Get(); ret != nil { + return ret.([]byte) + } else { + return make([]byte, 0, option.DefaultEncoderBufferSize) + } } func newStack() *_Stack { - if ret := stackPool.Get(); ret == nil { - return new(_Stack) - } else { - return ret.(*_Stack) - } + if ret := stackPool.Get(); ret == nil { + return new(_Stack) + } else { + return ret.(*_Stack) + } } func resetStack(p *_Stack) { - memclrNoHeapPointers(unsafe.Pointer(p), _StackSize) + memclrNoHeapPointers(unsafe.Pointer(p), _StackSize) } func newBuffer() *bytes.Buffer { - if ret := bufferPool.Get(); ret != nil { - return ret.(*bytes.Buffer) - } else { - return bytes.NewBuffer(make([]byte, 0, option.DefaultEncoderBufferSize)) - } + if ret := bufferPool.Get(); ret != nil { + return ret.(*bytes.Buffer) + } else { + return bytes.NewBuffer(make([]byte, 0, option.DefaultEncoderBufferSize)) + } } func freeBytes(p []byte) { - p = p[:0] - bytesPool.Put(p) + p = p[:0] + bytesPool.Put(p) } func freeStack(p *_Stack) { - p.sp = 0 - stackPool.Put(p) + p.sp = 0 + stackPool.Put(p) } func freeBuffer(p *bytes.Buffer) { - p.Reset() - bufferPool.Put(p) + p.Reset() + bufferPool.Put(p) } func makeEncoder(vt *rt.GoType, ex ...interface{}) (interface{}, error) { - if pp, err := newCompiler().compile(vt.Pack(), ex[0].(bool)); err != nil { - return nil, err - } else { - as := newAssembler(pp) - as.name = vt.String() - return as.Load(), nil - } + if pp, err := newCompiler().compile(vt.Pack(), ex[0].(bool)); err != nil { + return nil, err + } else { + as := newAssembler(pp) + as.name = vt.String() + return as.Load(), nil + } } func findOrCompile(vt *rt.GoType, pv bool) (_Encoder, error) { - if val := programCache.Get(vt); val != nil { - return val.(_Encoder), nil - } else if ret, err := programCache.Compute(vt, makeEncoder, pv); err == nil { - return ret.(_Encoder), nil - } else { - return nil, err - } + if val := programCache.Get(vt); val != nil { + return val.(_Encoder), nil + } else if ret, err := programCache.Compute(vt, makeEncoder, pv); err == nil { + return ret.(_Encoder), nil + } else { + return nil, err + } } func pretouchType(_vt reflect.Type, opts option.CompileOptions, v uint8) (map[reflect.Type]uint8, error) { - /* compile function */ - compiler := newCompiler().apply(opts) - encoder := func(vt *rt.GoType, ex ...interface{}) (interface{}, error) { - if pp, err := compiler.compile(_vt, ex[0].(bool)); err != nil { - return nil, err - } else { - as := newAssembler(pp) - as.name = vt.String() - return as.Load(), nil - } - } - - /* find or compile */ - vt := rt.UnpackType(_vt) - if val := programCache.Get(vt); val != nil { - return nil, nil - } else if _, err := programCache.Compute(vt, encoder, v == 1); err == nil { - return compiler.rec, nil - } else { - return nil, err - } + /* compile function */ + compiler := newCompiler().apply(opts) + encoder := func(vt *rt.GoType, ex ...interface{}) (interface{}, error) { + if pp, err := compiler.compile(_vt, ex[0].(bool)); err != nil { + return nil, err + } else { + as := newAssembler(pp) + as.name = vt.String() + return as.Load(), nil + } + } + + /* find or compile */ + vt := rt.UnpackType(_vt) + if val := programCache.Get(vt); val != nil { + return nil, nil + } else if _, err := programCache.Compute(vt, encoder, v == 1); err == nil { + return compiler.rec, nil + } else { + return nil, err + } } func pretouchRec(vtm map[reflect.Type]uint8, opts option.CompileOptions) error { - if opts.RecursiveDepth < 0 || len(vtm) == 0 { - return nil - } - next := make(map[reflect.Type]uint8) - for vt, v := range vtm { - sub, err := pretouchType(vt, opts, v) - if err != nil { - return err - } - for svt, v := range sub { - next[svt] = v - } - } - opts.RecursiveDepth -= 1 - return pretouchRec(next, opts) -} \ No newline at end of file + if opts.RecursiveDepth < 0 || len(vtm) == 0 { + return nil + } + next := make(map[reflect.Type]uint8) + for vt, v := range vtm { + sub, err := pretouchType(vt, opts, v) + if err != nil { + return err + } + for svt, v := range sub { + next[svt] = v + } + } + opts.RecursiveDepth -= 1 + return pretouchRec(next, opts) +} diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/primitives.go b/vendor/github.com/bytedance/sonic/internal/encoder/primitives.go index 78fb29ff6..cedd23c00 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/primitives.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/primitives.go @@ -17,152 +17,152 @@ package encoder import ( - `encoding` - `encoding/json` - `unsafe` + "encoding" + "encoding/json" + "unsafe" - `github.com/bytedance/sonic/internal/jit` - `github.com/bytedance/sonic/internal/native` - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/jit" + "github.com/bytedance/sonic/internal/native" + "github.com/bytedance/sonic/internal/rt" ) /** Encoder Primitives **/ func encodeNil(rb *[]byte) error { - *rb = append(*rb, 'n', 'u', 'l', 'l') - return nil + *rb = append(*rb, 'n', 'u', 'l', 'l') + return nil } func encodeString(buf *[]byte, val string) error { - var sidx int - var pbuf *rt.GoSlice - var pstr *rt.GoString - - /* opening quote */ - *buf = append(*buf, '"') - pbuf = (*rt.GoSlice)(unsafe.Pointer(buf)) - pstr = (*rt.GoString)(unsafe.Pointer(&val)) - - /* encode with native library */ - for sidx < pstr.Len { - sn := pstr.Len - sidx - dn := pbuf.Cap - pbuf.Len - sp := padd(pstr.Ptr, sidx) - dp := padd(pbuf.Ptr, pbuf.Len) - nb := native.Quote(sp, sn, dp, &dn, 0) - - /* check for errors */ - if pbuf.Len += dn; nb >= 0 { - break - } - - /* not enough space, grow the slice and try again */ - sidx += ^nb - *pbuf = growslice(rt.UnpackType(byteType), *pbuf, pbuf.Cap * 2) - } - - /* closing quote */ - *buf = append(*buf, '"') - return nil + var sidx int + var pbuf *rt.GoSlice + var pstr *rt.GoString + + /* opening quote */ + *buf = append(*buf, '"') + pbuf = (*rt.GoSlice)(unsafe.Pointer(buf)) + pstr = (*rt.GoString)(unsafe.Pointer(&val)) + + /* encode with native library */ + for sidx < pstr.Len { + sn := pstr.Len - sidx + dn := pbuf.Cap - pbuf.Len + sp := padd(pstr.Ptr, sidx) + dp := padd(pbuf.Ptr, pbuf.Len) + nb := native.Quote(sp, sn, dp, &dn, 0) + + /* check for errors */ + if pbuf.Len += dn; nb >= 0 { + break + } + + /* not enough space, grow the slice and try again */ + sidx += ^nb + *pbuf = growslice(rt.UnpackType(byteType), *pbuf, pbuf.Cap*2) + } + + /* closing quote */ + *buf = append(*buf, '"') + return nil } func encodeTypedPointer(buf *[]byte, vt *rt.GoType, vp *unsafe.Pointer, sb *_Stack, fv uint64) error { - if vt == nil { - return encodeNil(buf) - } else if fn, err := findOrCompile(vt, (fv&(1<<bitPointerValue)) != 0); err != nil { - return err - } else if vt.Indirect() { - rt.MoreStack(_FP_size + native.MaxFrameSize) - rt.StopProf() - err := fn(buf, *vp, sb, fv) - rt.StartProf() - return err - } else { - rt.MoreStack(_FP_size + native.MaxFrameSize) - rt.StopProf() - err := fn(buf, unsafe.Pointer(vp), sb, fv) - rt.StartProf() - return err - } + if vt == nil { + return encodeNil(buf) + } else if fn, err := findOrCompile(vt, (fv&(1<<bitPointerValue)) != 0); err != nil { + return err + } else if vt.Indirect() { + rt.MoreStack(_FP_size + native.MaxFrameSize) + rt.StopProf() + err := fn(buf, *vp, sb, fv) + rt.StartProf() + return err + } else { + rt.MoreStack(_FP_size + native.MaxFrameSize) + rt.StopProf() + err := fn(buf, unsafe.Pointer(vp), sb, fv) + rt.StartProf() + return err + } } func encodeJsonMarshaler(buf *[]byte, val json.Marshaler, opt Options) error { - if ret, err := val.MarshalJSON(); err != nil { - return err - } else { - if opt & CompactMarshaler != 0 { - return compact(buf, ret) - } - if ok, s := Valid(ret); !ok { - return error_marshaler(ret, s) - } - *buf = append(*buf, ret...) - return nil - } + if ret, err := val.MarshalJSON(); err != nil { + return err + } else { + if opt&CompactMarshaler != 0 { + return compact(buf, ret) + } + if ok, s := Valid(ret); !ok { + return error_marshaler(ret, s) + } + *buf = append(*buf, ret...) + return nil + } } func encodeTextMarshaler(buf *[]byte, val encoding.TextMarshaler, opt Options) error { - if ret, err := val.MarshalText(); err != nil { - return err - } else { - if opt & NoQuoteTextMarshaler != 0 { - *buf = append(*buf, ret...) - return nil - } - return encodeString(buf, rt.Mem2Str(ret) ) - } + if ret, err := val.MarshalText(); err != nil { + return err + } else { + if opt&NoQuoteTextMarshaler != 0 { + *buf = append(*buf, ret...) + return nil + } + return encodeString(buf, rt.Mem2Str(ret)) + } } func htmlEscape(dst []byte, src []byte) []byte { - var sidx int - - dst = append(dst, src[:0]...) // avoid check nil dst - sbuf := (*rt.GoSlice)(unsafe.Pointer(&src)) - dbuf := (*rt.GoSlice)(unsafe.Pointer(&dst)) - - /* grow dst if it is shorter */ - if cap(dst) - len(dst) < len(src) + native.BufPaddingSize { - cap := len(src) * 3 / 2 + native.BufPaddingSize - *dbuf = growslice(typeByte, *dbuf, cap) - } - - for sidx < sbuf.Len { - sp := padd(sbuf.Ptr, sidx) - dp := padd(dbuf.Ptr, dbuf.Len) - - sn := sbuf.Len - sidx - dn := dbuf.Cap - dbuf.Len - nb := native.HTMLEscape(sp, sn, dp, &dn) - - /* check for errors */ - if dbuf.Len += dn; nb >= 0 { - break - } - - /* not enough space, grow the slice and try again */ - sidx += ^nb - *dbuf = growslice(typeByte, *dbuf, dbuf.Cap * 2) - } - return dst + var sidx int + + dst = append(dst, src[:0]...) // avoid check nil dst + sbuf := (*rt.GoSlice)(unsafe.Pointer(&src)) + dbuf := (*rt.GoSlice)(unsafe.Pointer(&dst)) + + /* grow dst if it is shorter */ + if cap(dst)-len(dst) < len(src)+native.BufPaddingSize { + cap := len(src)*3/2 + native.BufPaddingSize + *dbuf = growslice(typeByte, *dbuf, cap) + } + + for sidx < sbuf.Len { + sp := padd(sbuf.Ptr, sidx) + dp := padd(dbuf.Ptr, dbuf.Len) + + sn := sbuf.Len - sidx + dn := dbuf.Cap - dbuf.Len + nb := native.HTMLEscape(sp, sn, dp, &dn) + + /* check for errors */ + if dbuf.Len += dn; nb >= 0 { + break + } + + /* not enough space, grow the slice and try again */ + sidx += ^nb + *dbuf = growslice(typeByte, *dbuf, dbuf.Cap*2) + } + return dst } var ( - argPtrs = []bool { true, true, true, false } - localPtrs = []bool{} + argPtrs = []bool{true, true, true, false} + localPtrs = []bool{} ) var ( - _F_assertI2I = jit.Func(assertI2I) + _F_assertI2I = jit.Func(assertI2I) ) func asText(v unsafe.Pointer) (string, error) { - text := assertI2I(_T_encoding_TextMarshaler, *(*rt.GoIface)(v)) - r, e := (*(*encoding.TextMarshaler)(unsafe.Pointer(&text))).MarshalText() - return rt.Mem2Str(r), e + text := assertI2I(_T_encoding_TextMarshaler, *(*rt.GoIface)(v)) + r, e := (*(*encoding.TextMarshaler)(unsafe.Pointer(&text))).MarshalText() + return rt.Mem2Str(r), e } func asJson(v unsafe.Pointer) (string, error) { - text := assertI2I(_T_json_Marshaler, *(*rt.GoIface)(v)) - r, e := (*(*json.Marshaler)(unsafe.Pointer(&text))).MarshalJSON() - return rt.Mem2Str(r), e -} \ No newline at end of file + text := assertI2I(_T_json_Marshaler, *(*rt.GoIface)(v)) + r, e := (*(*json.Marshaler)(unsafe.Pointer(&text))).MarshalJSON() + return rt.Mem2Str(r), e +} diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/sort.go b/vendor/github.com/bytedance/sonic/internal/encoder/sort.go index b1a67598b..f51e33e25 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/sort.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/sort.go @@ -19,188 +19,188 @@ package encoder // Algorithm 3-way Radix Quicksort, d means the radix. // Reference: https://algs4.cs.princeton.edu/51radix/Quick3string.java.html func radixQsort(kvs []_MapPair, d, maxDepth int) { - for len(kvs) > 11 { - // To avoid the worst case of quickSort (time: O(n^2)), use introsort here. - // Reference: https://en.wikipedia.org/wiki/Introsort and - // https://github.com/golang/go/issues/467 - if maxDepth == 0 { - heapSort(kvs, 0, len(kvs)) - return - } - maxDepth-- - - p := pivot(kvs, d) - lt, i, gt := 0, 0, len(kvs) - for i < gt { - c := byteAt(kvs[i].k, d) - if c < p { - swap(kvs, lt, i) - i++ - lt++ - } else if c > p { - gt-- - swap(kvs, i, gt) - } else { - i++ - } - } - - // kvs[0:lt] < v = kvs[lt:gt] < kvs[gt:len(kvs)] - // Native implemention: - // radixQsort(kvs[:lt], d, maxDepth) - // if p > -1 { - // radixQsort(kvs[lt:gt], d+1, maxDepth) - // } - // radixQsort(kvs[gt:], d, maxDepth) - // Optimize as follows: make recursive calls only for the smaller parts. - // Reference: https://www.geeksforgeeks.org/quicksort-tail-call-optimization-reducing-worst-case-space-log-n/ - if p == -1 { - if lt > len(kvs) - gt { - radixQsort(kvs[gt:], d, maxDepth) - kvs = kvs[:lt] - } else { - radixQsort(kvs[:lt], d, maxDepth) - kvs = kvs[gt:] - } - } else { - ml := maxThree(lt, gt-lt, len(kvs)-gt) - if ml == lt { - radixQsort(kvs[lt:gt], d+1, maxDepth) - radixQsort(kvs[gt:], d, maxDepth) - kvs = kvs[:lt] - } else if ml == gt-lt { - radixQsort(kvs[:lt], d, maxDepth) - radixQsort(kvs[gt:], d, maxDepth) - kvs = kvs[lt:gt] - d += 1 - } else { - radixQsort(kvs[:lt], d, maxDepth) - radixQsort(kvs[lt:gt], d+1, maxDepth) - kvs = kvs[gt:] - } - } - } - insertRadixSort(kvs, d) + for len(kvs) > 11 { + // To avoid the worst case of quickSort (time: O(n^2)), use introsort here. + // Reference: https://en.wikipedia.org/wiki/Introsort and + // https://github.com/golang/go/issues/467 + if maxDepth == 0 { + heapSort(kvs, 0, len(kvs)) + return + } + maxDepth-- + + p := pivot(kvs, d) + lt, i, gt := 0, 0, len(kvs) + for i < gt { + c := byteAt(kvs[i].k, d) + if c < p { + swap(kvs, lt, i) + i++ + lt++ + } else if c > p { + gt-- + swap(kvs, i, gt) + } else { + i++ + } + } + + // kvs[0:lt] < v = kvs[lt:gt] < kvs[gt:len(kvs)] + // Native implemention: + // radixQsort(kvs[:lt], d, maxDepth) + // if p > -1 { + // radixQsort(kvs[lt:gt], d+1, maxDepth) + // } + // radixQsort(kvs[gt:], d, maxDepth) + // Optimize as follows: make recursive calls only for the smaller parts. + // Reference: https://www.geeksforgeeks.org/quicksort-tail-call-optimization-reducing-worst-case-space-log-n/ + if p == -1 { + if lt > len(kvs)-gt { + radixQsort(kvs[gt:], d, maxDepth) + kvs = kvs[:lt] + } else { + radixQsort(kvs[:lt], d, maxDepth) + kvs = kvs[gt:] + } + } else { + ml := maxThree(lt, gt-lt, len(kvs)-gt) + if ml == lt { + radixQsort(kvs[lt:gt], d+1, maxDepth) + radixQsort(kvs[gt:], d, maxDepth) + kvs = kvs[:lt] + } else if ml == gt-lt { + radixQsort(kvs[:lt], d, maxDepth) + radixQsort(kvs[gt:], d, maxDepth) + kvs = kvs[lt:gt] + d += 1 + } else { + radixQsort(kvs[:lt], d, maxDepth) + radixQsort(kvs[lt:gt], d+1, maxDepth) + kvs = kvs[gt:] + } + } + } + insertRadixSort(kvs, d) } func insertRadixSort(kvs []_MapPair, d int) { - for i := 1; i < len(kvs); i++ { - for j := i; j > 0 && lessFrom(kvs[j].k, kvs[j-1].k, d); j-- { - swap(kvs, j, j-1) - } - } + for i := 1; i < len(kvs); i++ { + for j := i; j > 0 && lessFrom(kvs[j].k, kvs[j-1].k, d); j-- { + swap(kvs, j, j-1) + } + } } func pivot(kvs []_MapPair, d int) int { - m := len(kvs) >> 1 - if len(kvs) > 40 { - // Tukey's ``Ninther,'' median of three mediankvs of three. - t := len(kvs) / 8 - return medianThree( - medianThree(byteAt(kvs[0].k, d), byteAt(kvs[t].k, d), byteAt(kvs[2*t].k, d)), - medianThree(byteAt(kvs[m].k, d), byteAt(kvs[m-t].k, d), byteAt(kvs[m+t].k, d)), - medianThree(byteAt(kvs[len(kvs)-1].k, d), - byteAt(kvs[len(kvs)-1-t].k, d), - byteAt(kvs[len(kvs)-1-2*t].k, d))) - } - return medianThree(byteAt(kvs[0].k, d), byteAt(kvs[m].k, d), byteAt(kvs[len(kvs)-1].k, d)) + m := len(kvs) >> 1 + if len(kvs) > 40 { + // Tukey's ``Ninther,'' median of three mediankvs of three. + t := len(kvs) / 8 + return medianThree( + medianThree(byteAt(kvs[0].k, d), byteAt(kvs[t].k, d), byteAt(kvs[2*t].k, d)), + medianThree(byteAt(kvs[m].k, d), byteAt(kvs[m-t].k, d), byteAt(kvs[m+t].k, d)), + medianThree(byteAt(kvs[len(kvs)-1].k, d), + byteAt(kvs[len(kvs)-1-t].k, d), + byteAt(kvs[len(kvs)-1-2*t].k, d))) + } + return medianThree(byteAt(kvs[0].k, d), byteAt(kvs[m].k, d), byteAt(kvs[len(kvs)-1].k, d)) } func medianThree(i, j, k int) int { - if i > j { - i, j = j, i - } // i < j - if k < i { - return i - } - if k > j { - return j - } - return k + if i > j { + i, j = j, i + } // i < j + if k < i { + return i + } + if k > j { + return j + } + return k } func maxThree(i, j, k int) int { - max := i - if max < j { - max = j - } - if max < k { - max = k - } - return max + max := i + if max < j { + max = j + } + if max < k { + max = k + } + return max } // maxDepth returns a threshold at which quicksort should switch // to heapsort. It returnkvs 2*ceil(lg(n+1)). func maxDepth(n int) int { - var depth int - for i := n; i > 0; i >>= 1 { - depth++ - } - return depth * 2 + var depth int + for i := n; i > 0; i >>= 1 { + depth++ + } + return depth * 2 } // siftDown implements the heap property on kvs[lo:hi]. // first is an offset into the array where the root of the heap lies. func siftDown(kvs []_MapPair, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && kvs[first+child].k < kvs[first+child+1].k { - child++ - } - if kvs[first+root].k >= kvs[first+child].k { - return - } - swap(kvs, first+root, first+child) - root = child - } + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && kvs[first+child].k < kvs[first+child+1].k { + child++ + } + if kvs[first+root].k >= kvs[first+child].k { + return + } + swap(kvs, first+root, first+child) + root = child + } } func heapSort(kvs []_MapPair, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with the greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDown(kvs, i, hi, first) - } - - // Pop elements, the largest first, into end of kvs. - for i := hi - 1; i >= 0; i-- { - swap(kvs, first, first+i) - siftDown(kvs, lo, i, first) - } + first := a + lo := 0 + hi := b - a + + // Build heap with the greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDown(kvs, i, hi, first) + } + + // Pop elements, the largest first, into end of kvs. + for i := hi - 1; i >= 0; i-- { + swap(kvs, first, first+i) + siftDown(kvs, lo, i, first) + } } // Note that _MapPair.k is NOT pointed to _MapPair.m when map key is integer after swap func swap(kvs []_MapPair, a, b int) { - kvs[a].k, kvs[b].k = kvs[b].k, kvs[a].k - kvs[a].v, kvs[b].v = kvs[b].v, kvs[a].v + kvs[a].k, kvs[b].k = kvs[b].k, kvs[a].k + kvs[a].v, kvs[b].v = kvs[b].v, kvs[a].v } // Compare two strings from the pos d. func lessFrom(a, b string, d int) bool { - l := len(a) - if l > len(b) { - l = len(b) - } - for i := d; i < l; i++ { - if a[i] == b[i] { - continue - } - return a[i] < b[i] - } - return len(a) < len(b) + l := len(a) + if l > len(b) { + l = len(b) + } + for i := d; i < l; i++ { + if a[i] == b[i] { + continue + } + return a[i] < b[i] + } + return len(a) < len(b) } func byteAt(b string, p int) int { - if p < len(b) { - return int(b[p]) - } - return -1 + if p < len(b) { + return int(b[p]) + } + return -1 } diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/stream.go b/vendor/github.com/bytedance/sonic/internal/encoder/stream.go index 8f73a2bf0..5ecc00eb5 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/stream.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/stream.go @@ -17,68 +17,68 @@ package encoder import ( - `encoding/json` - `io` + "encoding/json" + "io" ) // StreamEncoder uses io.Writer as input. type StreamEncoder struct { - w io.Writer - Encoder + w io.Writer + Encoder } // NewStreamEncoder adapts to encoding/json.NewDecoder API. // // NewStreamEncoder returns a new encoder that write to w. func NewStreamEncoder(w io.Writer) *StreamEncoder { - return &StreamEncoder{w: w} + return &StreamEncoder{w: w} } // Encode encodes interface{} as JSON to io.Writer func (enc *StreamEncoder) Encode(val interface{}) (err error) { - out := newBytes() + out := newBytes() - /* encode into the buffer */ - err = EncodeInto(&out, val, enc.Opts) - if err != nil { - goto free_bytes - } + /* encode into the buffer */ + err = EncodeInto(&out, val, enc.Opts) + if err != nil { + goto free_bytes + } - if enc.indent != "" || enc.prefix != "" { - /* indent the JSON */ - buf := newBuffer() - err = json.Indent(buf, out, enc.prefix, enc.indent) - if err != nil { - freeBuffer(buf) - goto free_bytes - } + if enc.indent != "" || enc.prefix != "" { + /* indent the JSON */ + buf := newBuffer() + err = json.Indent(buf, out, enc.prefix, enc.indent) + if err != nil { + freeBuffer(buf) + goto free_bytes + } - // according to standard library, terminate each value with a newline... - buf.WriteByte('\n') + // according to standard library, terminate each value with a newline... + buf.WriteByte('\n') - /* copy into io.Writer */ - _, err = io.Copy(enc.w, buf) - if err != nil { - freeBuffer(buf) - goto free_bytes - } + /* copy into io.Writer */ + _, err = io.Copy(enc.w, buf) + if err != nil { + freeBuffer(buf) + goto free_bytes + } - } else { - /* copy into io.Writer */ - var n int - for len(out) > 0 { - n, err = enc.w.Write(out) - out = out[n:] - if err != nil { - goto free_bytes - } - } + } else { + /* copy into io.Writer */ + var n int + for len(out) > 0 { + n, err = enc.w.Write(out) + out = out[n:] + if err != nil { + goto free_bytes + } + } - // according to standard library, terminate each value with a newline... - enc.w.Write([]byte{'\n'}) - } + // according to standard library, terminate each value with a newline... + enc.w.Write([]byte{'\n'}) + } free_bytes: - freeBytes(out) - return err -} \ No newline at end of file + freeBytes(out) + return err +} diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/stubs_go116.go b/vendor/github.com/bytedance/sonic/internal/encoder/stubs_go116.go index 40d06f4af..d4320aef3 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/stubs_go116.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/stubs_go116.go @@ -1,3 +1,4 @@ +//go:build go1.15 && !go1.17 // +build go1.15,!go1.17 /* @@ -19,11 +20,11 @@ package encoder import ( - `unsafe` + "unsafe" - _ `github.com/chenzhuoyu/base64x` + _ "github.com/chenzhuoyu/base64x" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) //go:linkname _subr__b64encode github.com/chenzhuoyu/base64x._subr__b64encode @@ -62,4 +63,4 @@ func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) var _runtime_writeBarrier uintptr = rt.GcwbAddr() //go:linkname gcWriteBarrierAX runtime.gcWriteBarrier -func gcWriteBarrierAX() \ No newline at end of file +func gcWriteBarrierAX() diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/stubs_go117.go b/vendor/github.com/bytedance/sonic/internal/encoder/stubs_go117.go index 6c8c6ec75..f99f81839 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/stubs_go117.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/stubs_go117.go @@ -1,3 +1,4 @@ +//go:build go1.17 && !go1.20 // +build go1.17,!go1.20 /* @@ -19,11 +20,11 @@ package encoder import ( - `unsafe` + "unsafe" - _ `github.com/chenzhuoyu/base64x` + _ "github.com/chenzhuoyu/base64x" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) //go:linkname _subr__b64encode github.com/chenzhuoyu/base64x._subr__b64encode @@ -63,4 +64,4 @@ func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) var _runtime_writeBarrier uintptr //go:linkname gcWriteBarrierAX runtime.gcWriteBarrier -func gcWriteBarrierAX() \ No newline at end of file +func gcWriteBarrierAX() diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/stubs_go120.go b/vendor/github.com/bytedance/sonic/internal/encoder/stubs_go120.go index f1a7d10c7..91d8e9d5e 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/stubs_go120.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/stubs_go120.go @@ -1,3 +1,4 @@ +//go:build go1.20 // +build go1.20 /* @@ -19,11 +20,11 @@ package encoder import ( - `unsafe` + "unsafe" - _ `github.com/chenzhuoyu/base64x` + _ "github.com/chenzhuoyu/base64x" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) //go:linkname _subr__b64encode github.com/chenzhuoyu/base64x._subr__b64encode @@ -63,4 +64,4 @@ func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) var _runtime_writeBarrier uintptr //go:linkname gcWriteBarrierAX runtime.gcWriteBarrier -func gcWriteBarrierAX() \ No newline at end of file +func gcWriteBarrierAX() diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/types.go b/vendor/github.com/bytedance/sonic/internal/encoder/types.go index 3d4a00668..3e5352f65 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/types.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/types.go @@ -17,31 +17,31 @@ package encoder import ( - `encoding` - `encoding/json` - `reflect` + "encoding" + "encoding/json" + "reflect" ) var ( - byteType = reflect.TypeOf(byte(0)) - jsonNumberType = reflect.TypeOf(json.Number("")) - jsonUnsupportedValueType = reflect.TypeOf(new(json.UnsupportedValueError)) + byteType = reflect.TypeOf(byte(0)) + jsonNumberType = reflect.TypeOf(json.Number("")) + jsonUnsupportedValueType = reflect.TypeOf(new(json.UnsupportedValueError)) ) var ( - errorType = reflect.TypeOf((*error)(nil)).Elem() - jsonMarshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() - encodingTextMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + errorType = reflect.TypeOf((*error)(nil)).Elem() + jsonMarshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() + encodingTextMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() ) func isSimpleByte(vt reflect.Type) bool { - if vt.Kind() != byteType.Kind() { - return false - } else { - return !isEitherMarshaler(vt) && !isEitherMarshaler(reflect.PtrTo(vt)) - } + if vt.Kind() != byteType.Kind() { + return false + } else { + return !isEitherMarshaler(vt) && !isEitherMarshaler(reflect.PtrTo(vt)) + } } func isEitherMarshaler(vt reflect.Type) bool { - return vt.Implements(jsonMarshalerType) || vt.Implements(encodingTextMarshalerType) + return vt.Implements(jsonMarshalerType) || vt.Implements(encodingTextMarshalerType) } diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/utils.go b/vendor/github.com/bytedance/sonic/internal/encoder/utils.go index 510596fda..4d8edf3fc 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/utils.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/utils.go @@ -17,36 +17,36 @@ package encoder import ( - `encoding/json` - `unsafe` + "encoding/json" + "unsafe" - `github.com/bytedance/sonic/loader` + "github.com/bytedance/sonic/loader" ) //go:nosplit func padd(p unsafe.Pointer, v int) unsafe.Pointer { - return unsafe.Pointer(uintptr(p) + uintptr(v)) + return unsafe.Pointer(uintptr(p) + uintptr(v)) } //go:nosplit func ptoenc(p loader.Function) _Encoder { - return *(*_Encoder)(unsafe.Pointer(&p)) + return *(*_Encoder)(unsafe.Pointer(&p)) } func compact(p *[]byte, v []byte) error { - buf := newBuffer() - err := json.Compact(buf, v) + buf := newBuffer() + err := json.Compact(buf, v) - /* check for errors */ - if err != nil { - return err - } + /* check for errors */ + if err != nil { + return err + } - /* add to result */ - v = buf.Bytes() - *p = append(*p, v...) + /* add to result */ + v = buf.Bytes() + *p = append(*p, v...) - /* return the buffer into pool */ - freeBuffer(buf) - return nil + /* return the buffer into pool */ + freeBuffer(buf) + return nil } diff --git a/vendor/github.com/bytedance/sonic/internal/jit/arch_amd64.go b/vendor/github.com/bytedance/sonic/internal/jit/arch_amd64.go index 7405052d6..0e39c0db4 100644 --- a/vendor/github.com/bytedance/sonic/internal/jit/arch_amd64.go +++ b/vendor/github.com/bytedance/sonic/internal/jit/arch_amd64.go @@ -17,51 +17,51 @@ package jit import ( - `github.com/twitchyliquid64/golang-asm/asm/arch` - `github.com/twitchyliquid64/golang-asm/obj` + "github.com/twitchyliquid64/golang-asm/asm/arch" + "github.com/twitchyliquid64/golang-asm/obj" ) var ( - _AC = arch.Set("amd64") + _AC = arch.Set("amd64") ) func As(op string) obj.As { - if ret, ok := _AC.Instructions[op]; ok { - return ret - } else { - panic("invalid instruction: " + op) - } + if ret, ok := _AC.Instructions[op]; ok { + return ret + } else { + panic("invalid instruction: " + op) + } } func Imm(imm int64) obj.Addr { - return obj.Addr { - Type : obj.TYPE_CONST, - Offset : imm, - } + return obj.Addr{ + Type: obj.TYPE_CONST, + Offset: imm, + } } func Reg(reg string) obj.Addr { - if ret, ok := _AC.Register[reg]; ok { - return obj.Addr{Reg: ret, Type: obj.TYPE_REG} - } else { - panic("invalid register name: " + reg) - } + if ret, ok := _AC.Register[reg]; ok { + return obj.Addr{Reg: ret, Type: obj.TYPE_REG} + } else { + panic("invalid register name: " + reg) + } } func Ptr(reg obj.Addr, offs int64) obj.Addr { - return obj.Addr { - Reg : reg.Reg, - Type : obj.TYPE_MEM, - Offset : offs, - } + return obj.Addr{ + Reg: reg.Reg, + Type: obj.TYPE_MEM, + Offset: offs, + } } func Sib(reg obj.Addr, idx obj.Addr, scale int16, offs int64) obj.Addr { - return obj.Addr { - Reg : reg.Reg, - Index : idx.Reg, - Scale : scale, - Type : obj.TYPE_MEM, - Offset : offs, - } + return obj.Addr{ + Reg: reg.Reg, + Index: idx.Reg, + Scale: scale, + Type: obj.TYPE_MEM, + Offset: offs, + } } diff --git a/vendor/github.com/bytedance/sonic/internal/jit/assembler_amd64.go b/vendor/github.com/bytedance/sonic/internal/jit/assembler_amd64.go index d7d1751e5..2f4383c88 100644 --- a/vendor/github.com/bytedance/sonic/internal/jit/assembler_amd64.go +++ b/vendor/github.com/bytedance/sonic/internal/jit/assembler_amd64.go @@ -17,265 +17,278 @@ package jit import ( - `encoding/binary` - `strconv` - `strings` - `sync` - - `github.com/bytedance/sonic/loader` - `github.com/bytedance/sonic/internal/rt` - `github.com/twitchyliquid64/golang-asm/obj` - `github.com/twitchyliquid64/golang-asm/obj/x86` + "encoding/binary" + "strconv" + "strings" + "sync" + + "github.com/bytedance/sonic/internal/rt" + "github.com/bytedance/sonic/loader" + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/obj/x86" ) const ( - _LB_jump_pc = "_jump_pc_" + _LB_jump_pc = "_jump_pc_" ) type BaseAssembler struct { - i int - f func() - c []byte - o sync.Once - pb *Backend - xrefs map[string][]*obj.Prog - labels map[string]*obj.Prog - pendings map[string][]*obj.Prog + i int + f func() + c []byte + o sync.Once + pb *Backend + xrefs map[string][]*obj.Prog + labels map[string]*obj.Prog + pendings map[string][]*obj.Prog } /** Instruction Encoders **/ -var _NOPS = [][16]byte { - {0x90}, // NOP - {0x66, 0x90}, // 66 NOP - {0x0f, 0x1f, 0x00}, // NOP DWORD ptr [EAX] - {0x0f, 0x1f, 0x40, 0x00}, // NOP DWORD ptr [EAX + 00H] - {0x0f, 0x1f, 0x44, 0x00, 0x00}, // NOP DWORD ptr [EAX + EAX*1 + 00H] - {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00}, // 66 NOP DWORD ptr [EAX + EAX*1 + 00H] - {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00}, // NOP DWORD ptr [EAX + 00000000H] - {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, // NOP DWORD ptr [EAX + EAX*1 + 00000000H] - {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, // 66 NOP DWORD ptr [EAX + EAX*1 + 00000000H] +var _NOPS = [][16]byte{ + {0x90}, // NOP + {0x66, 0x90}, // 66 NOP + {0x0f, 0x1f, 0x00}, // NOP DWORD ptr [EAX] + {0x0f, 0x1f, 0x40, 0x00}, // NOP DWORD ptr [EAX + 00H] + {0x0f, 0x1f, 0x44, 0x00, 0x00}, // NOP DWORD ptr [EAX + EAX*1 + 00H] + {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00}, // 66 NOP DWORD ptr [EAX + EAX*1 + 00H] + {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00}, // NOP DWORD ptr [EAX + 00000000H] + {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, // NOP DWORD ptr [EAX + EAX*1 + 00000000H] + {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, // 66 NOP DWORD ptr [EAX + EAX*1 + 00000000H] } func (self *BaseAssembler) NOP() *obj.Prog { - p := self.pb.New() - p.As = obj.ANOP - self.pb.Append(p) - return p + p := self.pb.New() + p.As = obj.ANOP + self.pb.Append(p) + return p } func (self *BaseAssembler) NOPn(n int) { - for i := len(_NOPS); i > 0 && n > 0; i-- { - for ; n >= i; n -= i { - self.Byte(_NOPS[i - 1][:i]...) - } - } + for i := len(_NOPS); i > 0 && n > 0; i-- { + for ; n >= i; n -= i { + self.Byte(_NOPS[i-1][:i]...) + } + } } func (self *BaseAssembler) StorePtr(ptr int64, to obj.Addr, tmp obj.Addr) { - if (to.Type != obj.TYPE_MEM) || (tmp.Type != obj.TYPE_REG) { - panic("must store imm to memory, tmp must be register") - } - if (ptr >> 32) != 0 { - self.Emit("MOVQ", Imm(ptr), tmp) - self.Emit("MOVQ", tmp, to) - } else { - self.Emit("MOVQ", Imm(ptr), to); - } + if (to.Type != obj.TYPE_MEM) || (tmp.Type != obj.TYPE_REG) { + panic("must store imm to memory, tmp must be register") + } + if (ptr >> 32) != 0 { + self.Emit("MOVQ", Imm(ptr), tmp) + self.Emit("MOVQ", tmp, to) + } else { + self.Emit("MOVQ", Imm(ptr), to) + } } func (self *BaseAssembler) Byte(v ...byte) { - for ; len(v) >= 8; v = v[8:] { self.From("QUAD", Imm(rt.Get64(v))) } - for ; len(v) >= 4; v = v[4:] { self.From("LONG", Imm(int64(rt.Get32(v)))) } - for ; len(v) >= 2; v = v[2:] { self.From("WORD", Imm(int64(rt.Get16(v)))) } - for ; len(v) >= 1; v = v[1:] { self.From("BYTE", Imm(int64(v[0]))) } + for ; len(v) >= 8; v = v[8:] { + self.From("QUAD", Imm(rt.Get64(v))) + } + for ; len(v) >= 4; v = v[4:] { + self.From("LONG", Imm(int64(rt.Get32(v)))) + } + for ; len(v) >= 2; v = v[2:] { + self.From("WORD", Imm(int64(rt.Get16(v)))) + } + for ; len(v) >= 1; v = v[1:] { + self.From("BYTE", Imm(int64(v[0]))) + } } func (self *BaseAssembler) Mark(pc int) { - self.i++ - self.Link(_LB_jump_pc + strconv.Itoa(pc)) + self.i++ + self.Link(_LB_jump_pc + strconv.Itoa(pc)) } func (self *BaseAssembler) Link(to string) { - var p *obj.Prog - var v []*obj.Prog - - /* placeholder substitution */ - if strings.Contains(to, "{n}") { - to = strings.ReplaceAll(to, "{n}", strconv.Itoa(self.i)) - } - - /* check for duplications */ - if _, ok := self.labels[to]; ok { - panic("label " + to + " has already been linked") - } - - /* get the pending links */ - p = self.NOP() - v = self.pendings[to] - - /* patch all the pending jumps */ - for _, q := range v { - q.To.Val = p - } - - /* mark the label as resolved */ - self.labels[to] = p - delete(self.pendings, to) + var p *obj.Prog + var v []*obj.Prog + + /* placeholder substitution */ + if strings.Contains(to, "{n}") { + to = strings.ReplaceAll(to, "{n}", strconv.Itoa(self.i)) + } + + /* check for duplications */ + if _, ok := self.labels[to]; ok { + panic("label " + to + " has already been linked") + } + + /* get the pending links */ + p = self.NOP() + v = self.pendings[to] + + /* patch all the pending jumps */ + for _, q := range v { + q.To.Val = p + } + + /* mark the label as resolved */ + self.labels[to] = p + delete(self.pendings, to) } func (self *BaseAssembler) Xref(pc int, d int64) { - self.Sref(_LB_jump_pc + strconv.Itoa(pc), d) + self.Sref(_LB_jump_pc+strconv.Itoa(pc), d) } func (self *BaseAssembler) Sref(to string, d int64) { - p := self.pb.New() - p.As = x86.ALONG - p.From = Imm(-d) - - /* placeholder substitution */ - if strings.Contains(to, "{n}") { - to = strings.ReplaceAll(to, "{n}", strconv.Itoa(self.i)) - } - - /* record the patch point */ - self.pb.Append(p) - self.xrefs[to] = append(self.xrefs[to], p) + p := self.pb.New() + p.As = x86.ALONG + p.From = Imm(-d) + + /* placeholder substitution */ + if strings.Contains(to, "{n}") { + to = strings.ReplaceAll(to, "{n}", strconv.Itoa(self.i)) + } + + /* record the patch point */ + self.pb.Append(p) + self.xrefs[to] = append(self.xrefs[to], p) } func (self *BaseAssembler) Xjmp(op string, to int) { - self.Sjmp(op, _LB_jump_pc + strconv.Itoa(to)) + self.Sjmp(op, _LB_jump_pc+strconv.Itoa(to)) } func (self *BaseAssembler) Sjmp(op string, to string) { - p := self.pb.New() - p.As = As(op) - - /* placeholder substitution */ - if strings.Contains(to, "{n}") { - to = strings.ReplaceAll(to, "{n}", strconv.Itoa(self.i)) - } - - /* check for backward jumps */ - if v, ok := self.labels[to]; ok { - p.To.Val = v - } else { - self.pendings[to] = append(self.pendings[to], p) - } - - /* mark as a branch, and add to instruction buffer */ - p.To.Type = obj.TYPE_BRANCH - self.pb.Append(p) + p := self.pb.New() + p.As = As(op) + + /* placeholder substitution */ + if strings.Contains(to, "{n}") { + to = strings.ReplaceAll(to, "{n}", strconv.Itoa(self.i)) + } + + /* check for backward jumps */ + if v, ok := self.labels[to]; ok { + p.To.Val = v + } else { + self.pendings[to] = append(self.pendings[to], p) + } + + /* mark as a branch, and add to instruction buffer */ + p.To.Type = obj.TYPE_BRANCH + self.pb.Append(p) } func (self *BaseAssembler) Rjmp(op string, to obj.Addr) { - p := self.pb.New() - p.To = to - p.As = As(op) - self.pb.Append(p) + p := self.pb.New() + p.To = to + p.As = As(op) + self.pb.Append(p) } func (self *BaseAssembler) From(op string, val obj.Addr) { - p := self.pb.New() - p.As = As(op) - p.From = val - self.pb.Append(p) + p := self.pb.New() + p.As = As(op) + p.From = val + self.pb.Append(p) } func (self *BaseAssembler) Emit(op string, args ...obj.Addr) { - p := self.pb.New() - p.As = As(op) - self.assignOperands(p, args) - self.pb.Append(p) + p := self.pb.New() + p.As = As(op) + self.assignOperands(p, args) + self.pb.Append(p) } func (self *BaseAssembler) assignOperands(p *obj.Prog, args []obj.Addr) { - switch len(args) { - case 0 : - case 1 : p.To = args[0] - case 2 : p.To, p.From = args[1], args[0] - case 3 : p.To, p.From, p.RestArgs = args[2], args[0], args[1:2] - case 4 : p.To, p.From, p.RestArgs = args[2], args[3], args[:2] - default : panic("invalid operands") - } + switch len(args) { + case 0: + case 1: + p.To = args[0] + case 2: + p.To, p.From = args[1], args[0] + case 3: + p.To, p.From, p.RestArgs = args[2], args[0], args[1:2] + case 4: + p.To, p.From, p.RestArgs = args[2], args[3], args[:2] + default: + panic("invalid operands") + } } /** Assembler Helpers **/ func (self *BaseAssembler) Size() int { - self.build() - return len(self.c) + self.build() + return len(self.c) } func (self *BaseAssembler) Init(f func()) { - self.i = 0 - self.f = f - self.c = nil - self.o = sync.Once{} + self.i = 0 + self.f = f + self.c = nil + self.o = sync.Once{} } var jitLoader = loader.Loader{ - Name: "sonic.jit.", - File: "github.com/bytedance/sonic/jit.go", - Options: loader.Options{ - NoPreempt: true, - }, + Name: "sonic.jit.", + File: "github.com/bytedance/sonic/jit.go", + Options: loader.Options{ + NoPreempt: true, + }, } func (self *BaseAssembler) Load(name string, frameSize int, argSize int, argStackmap []bool, localStackmap []bool) loader.Function { - self.build() - return jitLoader.LoadOne(self.c, name, frameSize, argSize, argStackmap, localStackmap) + self.build() + return jitLoader.LoadOne(self.c, name, frameSize, argSize, argStackmap, localStackmap) } /** Assembler Stages **/ func (self *BaseAssembler) init() { - self.pb = newBackend("amd64") - self.xrefs = map[string][]*obj.Prog{} - self.labels = map[string]*obj.Prog{} - self.pendings = map[string][]*obj.Prog{} + self.pb = newBackend("amd64") + self.xrefs = map[string][]*obj.Prog{} + self.labels = map[string]*obj.Prog{} + self.pendings = map[string][]*obj.Prog{} } func (self *BaseAssembler) build() { - self.o.Do(func() { - self.init() - self.f() - self.validate() - self.assemble() - self.resolve() - self.release() - }) + self.o.Do(func() { + self.init() + self.f() + self.validate() + self.assemble() + self.resolve() + self.release() + }) } func (self *BaseAssembler) release() { - self.pb.Release() - self.pb = nil - self.xrefs = nil - self.labels = nil - self.pendings = nil + self.pb.Release() + self.pb = nil + self.xrefs = nil + self.labels = nil + self.pendings = nil } func (self *BaseAssembler) resolve() { - for s, v := range self.xrefs { - for _, prog := range v { - if prog.As != x86.ALONG { - panic("invalid RIP relative reference") - } else if p, ok := self.labels[s]; !ok { - panic("links are not fully resolved: " + s) - } else { - off := prog.From.Offset + p.Pc - prog.Pc - binary.LittleEndian.PutUint32(self.c[prog.Pc:], uint32(off)) - } - } - } + for s, v := range self.xrefs { + for _, prog := range v { + if prog.As != x86.ALONG { + panic("invalid RIP relative reference") + } else if p, ok := self.labels[s]; !ok { + panic("links are not fully resolved: " + s) + } else { + off := prog.From.Offset + p.Pc - prog.Pc + binary.LittleEndian.PutUint32(self.c[prog.Pc:], uint32(off)) + } + } + } } func (self *BaseAssembler) validate() { - for key := range self.pendings { - panic("links are not fully resolved: " + key) - } + for key := range self.pendings { + panic("links are not fully resolved: " + key) + } } func (self *BaseAssembler) assemble() { - self.c = self.pb.Assemble() + self.c = self.pb.Assemble() } diff --git a/vendor/github.com/bytedance/sonic/internal/jit/backend.go b/vendor/github.com/bytedance/sonic/internal/jit/backend.go index 75e180415..c9bfed0e5 100644 --- a/vendor/github.com/bytedance/sonic/internal/jit/backend.go +++ b/vendor/github.com/bytedance/sonic/internal/jit/backend.go @@ -17,25 +17,25 @@ package jit import ( - `fmt` - `sync` - _ `unsafe` + "fmt" + "sync" + _ "unsafe" - `github.com/twitchyliquid64/golang-asm/asm/arch` - `github.com/twitchyliquid64/golang-asm/obj` - `github.com/twitchyliquid64/golang-asm/objabi` + "github.com/twitchyliquid64/golang-asm/asm/arch" + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/objabi" ) type Backend struct { - Ctxt *obj.Link - Arch *arch.Arch - Head *obj.Prog - Tail *obj.Prog - Prog []*obj.Prog + Ctxt *obj.Link + Arch *arch.Arch + Head *obj.Prog + Tail *obj.Prog + Prog []*obj.Prog } var ( - _progPool sync.Pool + _progPool sync.Pool ) //go:nosplit @@ -43,78 +43,78 @@ var ( func throw(_ string) func newProg() *obj.Prog { - if val := _progPool.Get(); val == nil { - return new(obj.Prog) - } else { - return remProg(val.(*obj.Prog)) - } + if val := _progPool.Get(); val == nil { + return new(obj.Prog) + } else { + return remProg(val.(*obj.Prog)) + } } func remProg(p *obj.Prog) *obj.Prog { - *p = obj.Prog{} - return p + *p = obj.Prog{} + return p } func newBackend(name string) (ret *Backend) { - ret = new(Backend) - ret.Arch = arch.Set(name) - ret.Ctxt = newLinkContext(ret.Arch.LinkArch) - ret.Arch.Init(ret.Ctxt) - return + ret = new(Backend) + ret.Arch = arch.Set(name) + ret.Ctxt = newLinkContext(ret.Arch.LinkArch) + ret.Arch.Init(ret.Ctxt) + return } func newLinkContext(arch *obj.LinkArch) (ret *obj.Link) { - ret = obj.Linknew(arch) - ret.Headtype = objabi.Hlinux - ret.DiagFunc = diagLinkContext - return + ret = obj.Linknew(arch) + ret.Headtype = objabi.Hlinux + ret.DiagFunc = diagLinkContext + return } func diagLinkContext(str string, args ...interface{}) { - throw(fmt.Sprintf(str, args...)) + throw(fmt.Sprintf(str, args...)) } func (self *Backend) New() (ret *obj.Prog) { - ret = newProg() - ret.Ctxt = self.Ctxt - self.Prog = append(self.Prog, ret) - return + ret = newProg() + ret.Ctxt = self.Ctxt + self.Prog = append(self.Prog, ret) + return } func (self *Backend) Append(p *obj.Prog) { - if self.Head == nil { - self.Head = p - self.Tail = p - } else { - self.Tail.Link = p - self.Tail = p - } + if self.Head == nil { + self.Head = p + self.Tail = p + } else { + self.Tail.Link = p + self.Tail = p + } } func (self *Backend) Release() { - self.Arch = nil - self.Ctxt = nil - - /* return all the progs into pool */ - for _, p := range self.Prog { - _progPool.Put(p) - } - - /* clear all the references */ - self.Head = nil - self.Tail = nil - self.Prog = nil + self.Arch = nil + self.Ctxt = nil + + /* return all the progs into pool */ + for _, p := range self.Prog { + _progPool.Put(p) + } + + /* clear all the references */ + self.Head = nil + self.Tail = nil + self.Prog = nil } func (self *Backend) Assemble() []byte { - var sym obj.LSym - var fnv obj.FuncInfo + var sym obj.LSym + var fnv obj.FuncInfo - /* construct the function */ - sym.Func = &fnv - fnv.Text = self.Head + /* construct the function */ + sym.Func = &fnv + fnv.Text = self.Head - /* call the assembler */ - self.Arch.Assemble(self.Ctxt, &sym, self.New) - return sym.P + /* call the assembler */ + self.Arch.Assemble(self.Ctxt, &sym, self.New) + return sym.P } diff --git a/vendor/github.com/bytedance/sonic/internal/jit/runtime.go b/vendor/github.com/bytedance/sonic/internal/jit/runtime.go index ec69d067a..bedbda0c8 100644 --- a/vendor/github.com/bytedance/sonic/internal/jit/runtime.go +++ b/vendor/github.com/bytedance/sonic/internal/jit/runtime.go @@ -17,11 +17,11 @@ package jit import ( - `reflect` - `unsafe` + "reflect" + "unsafe" - `github.com/bytedance/sonic/internal/rt` - `github.com/twitchyliquid64/golang-asm/obj` + "github.com/bytedance/sonic/internal/rt" + "github.com/twitchyliquid64/golang-asm/obj" ) //go:noescape @@ -30,25 +30,25 @@ import ( func getitab(inter *rt.GoType, typ *rt.GoType, canfail bool) *rt.GoItab func Func(f interface{}) obj.Addr { - if p := rt.UnpackEface(f); p.Type.Kind() != reflect.Func { - panic("f is not a function") - } else { - return Imm(*(*int64)(p.Value)) - } + if p := rt.UnpackEface(f); p.Type.Kind() != reflect.Func { + panic("f is not a function") + } else { + return Imm(*(*int64)(p.Value)) + } } func Type(t reflect.Type) obj.Addr { - return Gtype(rt.UnpackType(t)) + return Gtype(rt.UnpackType(t)) } func Itab(i *rt.GoType, t reflect.Type) obj.Addr { - return Imm(int64(uintptr(unsafe.Pointer(getitab(i, rt.UnpackType(t), false))))) + return Imm(int64(uintptr(unsafe.Pointer(getitab(i, rt.UnpackType(t), false))))) } func Gitab(i *rt.GoItab) obj.Addr { - return Imm(int64(uintptr(unsafe.Pointer(i)))) + return Imm(int64(uintptr(unsafe.Pointer(i)))) } func Gtype(t *rt.GoType) obj.Addr { - return Imm(int64(uintptr(unsafe.Pointer(t)))) + return Imm(int64(uintptr(unsafe.Pointer(t)))) } diff --git a/vendor/github.com/bytedance/sonic/internal/loader/funcdata.go b/vendor/github.com/bytedance/sonic/internal/loader/funcdata.go index 59a3cb349..d011c8e2d 100644 --- a/vendor/github.com/bytedance/sonic/internal/loader/funcdata.go +++ b/vendor/github.com/bytedance/sonic/internal/loader/funcdata.go @@ -17,11 +17,11 @@ package loader import ( - `reflect` - `sync` - `unsafe` + "reflect" + "sync" + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) //go:linkname lastmoduledatap runtime.lastmoduledatap @@ -35,13 +35,13 @@ func moduledataverify1(_ *_ModuleData) // // See funcdata.h and $GROOT/src/cmd/internal/objabi/funcdata.go. const ( - _FUNCDATA_ArgsPointerMaps = 0 - _FUNCDATA_LocalsPointerMaps = 1 + _FUNCDATA_ArgsPointerMaps = 0 + _FUNCDATA_LocalsPointerMaps = 1 ) type funcInfo struct { - *_Func - datap *_ModuleData + *_Func + datap *_ModuleData } //go:linkname findfunc runtime.findfunc @@ -51,74 +51,74 @@ func findfunc(pc uintptr) funcInfo func funcdata(f funcInfo, i uint8) unsafe.Pointer var ( - modLock sync.Mutex - modList []*_ModuleData + modLock sync.Mutex + modList []*_ModuleData ) var emptyByte byte func encodeVariant(v int) []byte { - var u int - var r []byte - - /* split every 7 bits */ - for v > 127 { - u = v & 0x7f - v = v >> 7 - r = append(r, byte(u) | 0x80) - } - - /* check for last one */ - if v == 0 { - return r - } - - /* add the last one */ - r = append(r, byte(v)) - return r + var u int + var r []byte + + /* split every 7 bits */ + for v > 127 { + u = v & 0x7f + v = v >> 7 + r = append(r, byte(u)|0x80) + } + + /* check for last one */ + if v == 0 { + return r + } + + /* add the last one */ + r = append(r, byte(v)) + return r } func registerModule(mod *_ModuleData) { - modLock.Lock() - modList = append(modList, mod) - lastmoduledatap.next = mod - lastmoduledatap = mod - modLock.Unlock() + modLock.Lock() + modList = append(modList, mod) + lastmoduledatap.next = mod + lastmoduledatap = mod + modLock.Unlock() } func stackMap(f interface{}) (args uintptr, locals uintptr) { - fv := reflect.ValueOf(f) - if fv.Kind() != reflect.Func { - panic("f must be reflect.Func kind!") - } - fi := findfunc(fv.Pointer()) - return uintptr(funcdata(fi, uint8(_FUNCDATA_ArgsPointerMaps))), uintptr(funcdata(fi, uint8(_FUNCDATA_LocalsPointerMaps))) + fv := reflect.ValueOf(f) + if fv.Kind() != reflect.Func { + panic("f must be reflect.Func kind!") + } + fi := findfunc(fv.Pointer()) + return uintptr(funcdata(fi, uint8(_FUNCDATA_ArgsPointerMaps))), uintptr(funcdata(fi, uint8(_FUNCDATA_LocalsPointerMaps))) } -var moduleCache = struct{ - m map[*_ModuleData][]byte - l sync.Mutex +var moduleCache = struct { + m map[*_ModuleData][]byte + l sync.Mutex }{ - m : make(map[*_ModuleData][]byte), + m: make(map[*_ModuleData][]byte), } func cacheStackmap(argPtrs []bool, localPtrs []bool, mod *_ModuleData) (argptrs uintptr, localptrs uintptr) { - as := rt.StackMapBuilder{} - for _, b := range argPtrs { - as.AddField(b) - } - ab, _ := as.Build().MarshalBinary() - ls := rt.StackMapBuilder{} - for _, b := range localPtrs { - ls.AddField(b) - } - lb, _ := ls.Build().MarshalBinary() - cache := make([]byte, len(ab) + len(lb)) - copy(cache, ab) - copy(cache[len(ab):], lb) - moduleCache.l.Lock() - moduleCache.m[mod] = cache - moduleCache.l.Unlock() - return uintptr(rt.IndexByte(cache, 0)), uintptr(rt.IndexByte(cache, len(ab))) - -} \ No newline at end of file + as := rt.StackMapBuilder{} + for _, b := range argPtrs { + as.AddField(b) + } + ab, _ := as.Build().MarshalBinary() + ls := rt.StackMapBuilder{} + for _, b := range localPtrs { + ls.AddField(b) + } + lb, _ := ls.Build().MarshalBinary() + cache := make([]byte, len(ab)+len(lb)) + copy(cache, ab) + copy(cache[len(ab):], lb) + moduleCache.l.Lock() + moduleCache.m[mod] = cache + moduleCache.l.Unlock() + return uintptr(rt.IndexByte(cache, 0)), uintptr(rt.IndexByte(cache, len(ab))) + +} diff --git a/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go115.go b/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go115.go index b0d2d6c65..ba04dea57 100644 --- a/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go115.go +++ b/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go115.go @@ -1,3 +1,4 @@ +//go:build go1.15 && !go1.16 // +build go1.15,!go1.16 /* @@ -19,151 +20,151 @@ package loader import ( - `unsafe` + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) type _Func struct { - entry uintptr // start pc - nameoff int32 // function name - args int32 // in/out args size - deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. - pcsp int32 - pcfile int32 - pcln int32 - npcdata int32 - funcID uint8 // set for certain special runtime functions - _ [2]int8 // unused - nfuncdata uint8 // must be last - argptrs uintptr - localptrs uintptr + entry uintptr // start pc + nameoff int32 // function name + args int32 // in/out args size + deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. + pcsp int32 + pcfile int32 + pcln int32 + npcdata int32 + funcID uint8 // set for certain special runtime functions + _ [2]int8 // unused + nfuncdata uint8 // must be last + argptrs uintptr + localptrs uintptr } type _FuncTab struct { - entry uintptr - funcoff uintptr + entry uintptr + funcoff uintptr } type _BitVector struct { - n int32 // # of bits - bytedata *uint8 + n int32 // # of bits + bytedata *uint8 } type _PtabEntry struct { - name int32 - typ int32 + name int32 + typ int32 } type _TextSection struct { - vaddr uintptr // prelinked section vaddr - length uintptr // section length - baseaddr uintptr // relocated section address + vaddr uintptr // prelinked section vaddr + length uintptr // section length + baseaddr uintptr // relocated section address } type _ModuleData struct { - pclntable []byte - ftab []_FuncTab - filetab []uint32 - findfunctab *_FindFuncBucket - minpc, maxpc uintptr - text, etext uintptr - noptrdata, enoptrdata uintptr - data, edata uintptr - bss, ebss uintptr - noptrbss, enoptrbss uintptr - end, gcdata, gcbss uintptr - types, etypes uintptr - textsectmap []_TextSection - typelinks []int32 // offsets from types - itablinks []*rt.GoItab - ptab []_PtabEntry - pluginpath string - pkghashes []byte - modulename string - modulehashes []byte - hasmain uint8 // 1 if module contains the main function, 0 otherwise - gcdatamask, gcbssmask _BitVector - typemap map[int32]*rt.GoType // offset to *_rtype in previous module - bad bool // module failed to load and should be ignored - next *_ModuleData + pclntable []byte + ftab []_FuncTab + filetab []uint32 + findfunctab *_FindFuncBucket + minpc, maxpc uintptr + text, etext uintptr + noptrdata, enoptrdata uintptr + data, edata uintptr + bss, ebss uintptr + noptrbss, enoptrbss uintptr + end, gcdata, gcbss uintptr + types, etypes uintptr + textsectmap []_TextSection + typelinks []int32 // offsets from types + itablinks []*rt.GoItab + ptab []_PtabEntry + pluginpath string + pkghashes []byte + modulename string + modulehashes []byte + hasmain uint8 // 1 if module contains the main function, 0 otherwise + gcdatamask, gcbssmask _BitVector + typemap map[int32]*rt.GoType // offset to *_rtype in previous module + bad bool // module failed to load and should be ignored + next *_ModuleData } type _FindFuncBucket struct { - idx uint32 - subbuckets [16]byte + idx uint32 + subbuckets [16]byte } -var findFuncTab = &_FindFuncBucket { - idx: 1, +var findFuncTab = &_FindFuncBucket{ + idx: 1, } func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argPtrs []bool, localPtrs []bool) { - mod := new(_ModuleData) - minpc := pc - maxpc := pc + size - - /* build the PC & line table */ - pclnt := []byte { - 0xfb, 0xff, 0xff, 0xff, // magic : 0xfffffffb - 0, // pad1 : 0 - 0, // pad2 : 0 - 1, // minLC : 1 - 4 << (^uintptr(0) >> 63), // ptrSize : 4 << (^uintptr(0) >> 63) - } - - // cache arg and local stackmap - argptrs, localptrs := cacheStackmap(argPtrs, localPtrs, mod) - - /* add the function name */ - noff := len(pclnt) - pclnt = append(append(pclnt, name...), 0) - - /* add PCDATA */ - pcsp := len(pclnt) - pclnt = append(pclnt, encodeVariant((fp + 1) << 1)...) - pclnt = append(pclnt, encodeVariant(int(size))...) - - /* function entry */ - fnv := _Func { - entry : pc, - nameoff : int32(noff), - args : int32(args), - pcsp : int32(pcsp), - nfuncdata : 2, - argptrs : uintptr(argptrs), - localptrs : uintptr(localptrs), - } - - /* align the func to 8 bytes */ - if p := len(pclnt) % 8; p != 0 { - pclnt = append(pclnt, make([]byte, 8 - p)...) - } - - /* add the function descriptor */ - foff := len(pclnt) - pclnt = append(pclnt, (*(*[unsafe.Sizeof(_Func{})]byte)(unsafe.Pointer(&fnv)))[:]...) - - /* function table */ - tab := []_FuncTab { - {entry: pc, funcoff: uintptr(foff)}, - {entry: pc, funcoff: uintptr(foff)}, - {entry: maxpc}, - } - - /* module data */ - *mod = _ModuleData { - pclntable : pclnt, - ftab : tab, - findfunctab : findFuncTab, - minpc : minpc, - maxpc : maxpc, - modulename : name, - gcdata: uintptr(unsafe.Pointer(&emptyByte)), - gcbss: uintptr(unsafe.Pointer(&emptyByte)), - } - - /* verify and register the new module */ - moduledataverify1(mod) - registerModule(mod) + mod := new(_ModuleData) + minpc := pc + maxpc := pc + size + + /* build the PC & line table */ + pclnt := []byte{ + 0xfb, 0xff, 0xff, 0xff, // magic : 0xfffffffb + 0, // pad1 : 0 + 0, // pad2 : 0 + 1, // minLC : 1 + 4 << (^uintptr(0) >> 63), // ptrSize : 4 << (^uintptr(0) >> 63) + } + + // cache arg and local stackmap + argptrs, localptrs := cacheStackmap(argPtrs, localPtrs, mod) + + /* add the function name */ + noff := len(pclnt) + pclnt = append(append(pclnt, name...), 0) + + /* add PCDATA */ + pcsp := len(pclnt) + pclnt = append(pclnt, encodeVariant((fp+1)<<1)...) + pclnt = append(pclnt, encodeVariant(int(size))...) + + /* function entry */ + fnv := _Func{ + entry: pc, + nameoff: int32(noff), + args: int32(args), + pcsp: int32(pcsp), + nfuncdata: 2, + argptrs: uintptr(argptrs), + localptrs: uintptr(localptrs), + } + + /* align the func to 8 bytes */ + if p := len(pclnt) % 8; p != 0 { + pclnt = append(pclnt, make([]byte, 8-p)...) + } + + /* add the function descriptor */ + foff := len(pclnt) + pclnt = append(pclnt, (*(*[unsafe.Sizeof(_Func{})]byte)(unsafe.Pointer(&fnv)))[:]...) + + /* function table */ + tab := []_FuncTab{ + {entry: pc, funcoff: uintptr(foff)}, + {entry: pc, funcoff: uintptr(foff)}, + {entry: maxpc}, + } + + /* module data */ + *mod = _ModuleData{ + pclntable: pclnt, + ftab: tab, + findfunctab: findFuncTab, + minpc: minpc, + maxpc: maxpc, + modulename: name, + gcdata: uintptr(unsafe.Pointer(&emptyByte)), + gcbss: uintptr(unsafe.Pointer(&emptyByte)), + } + + /* verify and register the new module */ + moduledataverify1(mod) + registerModule(mod) } diff --git a/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go116.go b/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go116.go index f01747f93..a98e0a185 100644 --- a/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go116.go +++ b/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go116.go @@ -20,156 +20,156 @@ package loader import ( - `unsafe` + "unsafe" ) type _Func struct { - entry uintptr // start pc - nameoff int32 // function name - args int32 // in/out args size - deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. - pcsp uint32 - pcfile uint32 - pcln uint32 - npcdata uint32 - cuOffset uint32 // runtime.cutab offset of this function's CU - funcID uint8 // set for certain special runtime functions - _ [2]byte // pad - nfuncdata uint8 // must be last - argptrs uintptr - localptrs uintptr + entry uintptr // start pc + nameoff int32 // function name + args int32 // in/out args size + deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. + pcsp uint32 + pcfile uint32 + pcln uint32 + npcdata uint32 + cuOffset uint32 // runtime.cutab offset of this function's CU + funcID uint8 // set for certain special runtime functions + _ [2]byte // pad + nfuncdata uint8 // must be last + argptrs uintptr + localptrs uintptr } type _FuncTab struct { - entry uintptr - funcoff uintptr + entry uintptr + funcoff uintptr } type _PCHeader struct { - magic uint32 // 0xFFFFFFFA - pad1, pad2 uint8 // 0,0 - minLC uint8 // min instruction size - ptrSize uint8 // size of a ptr in bytes - nfunc int // number of functions in the module - nfiles uint // number of entries in the file tab. - funcnameOffset uintptr // offset to the funcnametab variable from _PCHeader - cuOffset uintptr // offset to the cutab variable from _PCHeader - filetabOffset uintptr // offset to the filetab variable from _PCHeader - pctabOffset uintptr // offset to the pctab varible from _PCHeader - pclnOffset uintptr // offset to the pclntab variable from _PCHeader + magic uint32 // 0xFFFFFFFA + pad1, pad2 uint8 // 0,0 + minLC uint8 // min instruction size + ptrSize uint8 // size of a ptr in bytes + nfunc int // number of functions in the module + nfiles uint // number of entries in the file tab. + funcnameOffset uintptr // offset to the funcnametab variable from _PCHeader + cuOffset uintptr // offset to the cutab variable from _PCHeader + filetabOffset uintptr // offset to the filetab variable from _PCHeader + pctabOffset uintptr // offset to the pctab varible from _PCHeader + pclnOffset uintptr // offset to the pclntab variable from _PCHeader } type _BitVector struct { - n int32 // # of bits - bytedata *uint8 + n int32 // # of bits + bytedata *uint8 } type _PtabEntry struct { - name int32 - typ int32 + name int32 + typ int32 } type _TextSection struct { - vaddr uintptr // prelinked section vaddr - length uintptr // section length - baseaddr uintptr // relocated section address + vaddr uintptr // prelinked section vaddr + length uintptr // section length + baseaddr uintptr // relocated section address } type _ModuleData struct { - pcHeader *_PCHeader - funcnametab []byte - cutab []uint32 - filetab []byte - pctab []byte - pclntable []_Func - ftab []_FuncTab - findfunctab *_FindFuncBucket - minpc, maxpc uintptr - text, etext uintptr - noptrdata, enoptrdata uintptr - data, edata uintptr - bss, ebss uintptr - noptrbss, enoptrbss uintptr - end, gcdata, gcbss uintptr - types, etypes uintptr - textsectmap []_TextSection - typelinks []int32 - itablinks []unsafe.Pointer - ptab []_PtabEntry - pluginpath string - pkghashes []struct{} - modulename string - modulehashes []struct{} - hasmain uint8 - gcdatamask, gcbssmask _BitVector - typemap map[int32]unsafe.Pointer - bad bool - next *_ModuleData + pcHeader *_PCHeader + funcnametab []byte + cutab []uint32 + filetab []byte + pctab []byte + pclntable []_Func + ftab []_FuncTab + findfunctab *_FindFuncBucket + minpc, maxpc uintptr + text, etext uintptr + noptrdata, enoptrdata uintptr + data, edata uintptr + bss, ebss uintptr + noptrbss, enoptrbss uintptr + end, gcdata, gcbss uintptr + types, etypes uintptr + textsectmap []_TextSection + typelinks []int32 + itablinks []unsafe.Pointer + ptab []_PtabEntry + pluginpath string + pkghashes []struct{} + modulename string + modulehashes []struct{} + hasmain uint8 + gcdatamask, gcbssmask _BitVector + typemap map[int32]unsafe.Pointer + bad bool + next *_ModuleData } type _FindFuncBucket struct { - idx uint32 - subbuckets [16]byte + idx uint32 + subbuckets [16]byte } -var modHeader = &_PCHeader { - magic : 0xfffffffa, - minLC : 1, - nfunc : 1, - ptrSize : 4 << (^uintptr(0) >> 63), +var modHeader = &_PCHeader{ + magic: 0xfffffffa, + minLC: 1, + nfunc: 1, + ptrSize: 4 << (^uintptr(0) >> 63), } -var findFuncTab = &_FindFuncBucket { - idx: 1, +var findFuncTab = &_FindFuncBucket{ + idx: 1, } func makePCtab(fp int) []byte { - return append([]byte{0}, encodeVariant((fp + 1) << 1)...) + return append([]byte{0}, encodeVariant((fp+1)<<1)...) } func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argPtrs []bool, localPtrs []bool) { - mod := new(_ModuleData) - - minpc := pc - maxpc := pc + size - - // cache arg and local stackmap - argptrs, localptrs := cacheStackmap(argPtrs, localPtrs, mod) - - /* function entry */ - lnt := []_Func {{ - entry : pc, - nameoff : 1, - args : int32(args), - pcsp : 1, - nfuncdata : 2, - argptrs : uintptr(argptrs), - localptrs : uintptr(localptrs), - }} - - /* function table */ - tab := []_FuncTab { - {entry: pc}, - {entry: pc}, - {entry: maxpc}, - } - - /* module data */ - *mod = _ModuleData { - pcHeader : modHeader, - funcnametab : append(append([]byte{0}, name...), 0), - pctab : append(makePCtab(fp), encodeVariant(int(size))...), - pclntable : lnt, - ftab : tab, - findfunctab : findFuncTab, - minpc : minpc, - maxpc : maxpc, - modulename : name, - gcdata: uintptr(unsafe.Pointer(&emptyByte)), - gcbss: uintptr(unsafe.Pointer(&emptyByte)), - } - - /* verify and register the new module */ - moduledataverify1(mod) - registerModule(mod) -} \ No newline at end of file + mod := new(_ModuleData) + + minpc := pc + maxpc := pc + size + + // cache arg and local stackmap + argptrs, localptrs := cacheStackmap(argPtrs, localPtrs, mod) + + /* function entry */ + lnt := []_Func{{ + entry: pc, + nameoff: 1, + args: int32(args), + pcsp: 1, + nfuncdata: 2, + argptrs: uintptr(argptrs), + localptrs: uintptr(localptrs), + }} + + /* function table */ + tab := []_FuncTab{ + {entry: pc}, + {entry: pc}, + {entry: maxpc}, + } + + /* module data */ + *mod = _ModuleData{ + pcHeader: modHeader, + funcnametab: append(append([]byte{0}, name...), 0), + pctab: append(makePCtab(fp), encodeVariant(int(size))...), + pclntable: lnt, + ftab: tab, + findfunctab: findFuncTab, + minpc: minpc, + maxpc: maxpc, + modulename: name, + gcdata: uintptr(unsafe.Pointer(&emptyByte)), + gcbss: uintptr(unsafe.Pointer(&emptyByte)), + } + + /* verify and register the new module */ + moduledataverify1(mod) + registerModule(mod) +} diff --git a/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go118.go b/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go118.go index f1d585d97..efb847533 100644 --- a/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go118.go +++ b/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go118.go @@ -1,3 +1,4 @@ +//go:build go1.18 && !go1.20 // +build go1.18,!go1.20 /* @@ -19,9 +20,9 @@ package loader import ( - `unsafe` + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) // A FuncFlag holds bits about a function. @@ -29,173 +30,170 @@ import ( type funcFlag uint8 type _Func struct { - entryOff uint32 // start pc - nameoff int32 // function name - args int32 // in/out args size - deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. - pcsp uint32 - pcfile uint32 - pcln uint32 - npcdata uint32 - cuOffset uint32 // runtime.cutab offset of this function's CU - funcID uint8 // set for certain special runtime functions - flag funcFlag - _ [1]byte // pad - nfuncdata uint8 // must be last - argptrs uint32 - localptrs uint32 + entryOff uint32 // start pc + nameoff int32 // function name + args int32 // in/out args size + deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. + pcsp uint32 + pcfile uint32 + pcln uint32 + npcdata uint32 + cuOffset uint32 // runtime.cutab offset of this function's CU + funcID uint8 // set for certain special runtime functions + flag funcFlag + _ [1]byte // pad + nfuncdata uint8 // must be last + argptrs uint32 + localptrs uint32 } type _FuncTab struct { - entry uint32 - funcoff uint32 + entry uint32 + funcoff uint32 } type _PCHeader struct { - magic uint32 // 0xFFFFFFF0 - pad1, pad2 uint8 // 0,0 - minLC uint8 // min instruction size - ptrSize uint8 // size of a ptr in bytes - nfunc int // number of functions in the module - nfiles uint // number of entries in the file tab - textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text - funcnameOffset uintptr // offset to the funcnametab variable from pcHeader - cuOffset uintptr // offset to the cutab variable from pcHeader - filetabOffset uintptr // offset to the filetab variable from pcHeader - pctabOffset uintptr // offset to the pctab variable from pcHeader - pclnOffset uintptr // offset to the pclntab variable from pcHeader + magic uint32 // 0xFFFFFFF0 + pad1, pad2 uint8 // 0,0 + minLC uint8 // min instruction size + ptrSize uint8 // size of a ptr in bytes + nfunc int // number of functions in the module + nfiles uint // number of entries in the file tab + textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text + funcnameOffset uintptr // offset to the funcnametab variable from pcHeader + cuOffset uintptr // offset to the cutab variable from pcHeader + filetabOffset uintptr // offset to the filetab variable from pcHeader + pctabOffset uintptr // offset to the pctab variable from pcHeader + pclnOffset uintptr // offset to the pclntab variable from pcHeader } type _BitVector struct { - n int32 // # of bits - bytedata *uint8 + n int32 // # of bits + bytedata *uint8 } type _PtabEntry struct { - name int32 - typ int32 + name int32 + typ int32 } type _TextSection struct { - vaddr uintptr // prelinked section vaddr - length uintptr // section length - baseaddr uintptr // relocated section address + vaddr uintptr // prelinked section vaddr + length uintptr // section length + baseaddr uintptr // relocated section address } type _ModuleData struct { - pcHeader *_PCHeader - funcnametab []byte - cutab []uint32 - filetab []byte - pctab []byte - pclntable []byte - ftab []_FuncTab - findfunctab *_FindFuncBucket - minpc, maxpc uintptr - text, etext uintptr - noptrdata, enoptrdata uintptr - data, edata uintptr - bss, ebss uintptr - noptrbss, enoptrbss uintptr - end, gcdata, gcbss uintptr - types, etypes uintptr - rodata uintptr - gofunc uintptr - textsectmap []_TextSection - typelinks []int32 - itablinks []unsafe.Pointer - ptab []_PtabEntry - pluginpath string - pkghashes []struct{} - modulename string - modulehashes []struct{} - hasmain uint8 - gcdatamask, gcbssmask _BitVector - typemap map[int32]unsafe.Pointer - bad bool - next *_ModuleData + pcHeader *_PCHeader + funcnametab []byte + cutab []uint32 + filetab []byte + pctab []byte + pclntable []byte + ftab []_FuncTab + findfunctab *_FindFuncBucket + minpc, maxpc uintptr + text, etext uintptr + noptrdata, enoptrdata uintptr + data, edata uintptr + bss, ebss uintptr + noptrbss, enoptrbss uintptr + end, gcdata, gcbss uintptr + types, etypes uintptr + rodata uintptr + gofunc uintptr + textsectmap []_TextSection + typelinks []int32 + itablinks []unsafe.Pointer + ptab []_PtabEntry + pluginpath string + pkghashes []struct{} + modulename string + modulehashes []struct{} + hasmain uint8 + gcdatamask, gcbssmask _BitVector + typemap map[int32]unsafe.Pointer + bad bool + next *_ModuleData } - type _FindFuncBucket struct { - idx uint32 - subbuckets [16]byte + idx uint32 + subbuckets [16]byte } - - func makePCtab(fp int) []byte { - return append([]byte{0}, encodeVariant((fp + 1) << 1)...) + return append([]byte{0}, encodeVariant((fp+1)<<1)...) } func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argPtrs []bool, localPtrs []bool) { - mod := new(_ModuleData) - - minpc := pc - maxpc := pc + size - - findFuncTab := make([]_FindFuncBucket, textSize/4096 + 1) - - modHeader := &_PCHeader { - magic : 0xfffffff0, - minLC : 1, - nfunc : 1, - ptrSize : 4 << (^uintptr(0) >> 63), - textStart: minpc, - } - - // cache arg and local stackmap - argptrs, localptrs := cacheStackmap(argPtrs, localPtrs, mod) - - base := argptrs - if argptrs > localptrs { - base = localptrs - } - - /* function entry */ - lnt := []_Func {{ - entryOff : 0, - nameoff : 1, - args : int32(args), - pcsp : 1, - nfuncdata : 2, - argptrs: uint32(argptrs - base), - localptrs: uint32(localptrs - base), - }} - nlnt := len(lnt)*int(unsafe.Sizeof(_Func{})) - plnt := unsafe.Pointer(&lnt[0]) - - /* function table */ - ftab := []_FuncTab { - {entry : 0, funcoff : 16}, - {entry : uint32(size)}, - } - nftab := len(ftab)*int(unsafe.Sizeof(_FuncTab{})) - pftab := unsafe.Pointer(&ftab[0]) - - pclntab := make([]byte, 0, nftab + nlnt) - pclntab = append(pclntab, rt.BytesFrom(pftab, nftab, nftab)...) - pclntab = append(pclntab, rt.BytesFrom(plnt, nlnt, nlnt)...) - - /* module data */ - *mod = _ModuleData { - pcHeader : modHeader, - funcnametab : append(append([]byte{0}, name...), 0), - pctab : append(makePCtab(fp), encodeVariant(int(size))...), - pclntable : pclntab, - ftab : ftab, - text : minpc, - etext : pc + textSize, - findfunctab : &findFuncTab[0], - minpc : minpc, - maxpc : maxpc, - modulename : name, - gcdata: uintptr(unsafe.Pointer(&emptyByte)), - gcbss: uintptr(unsafe.Pointer(&emptyByte)), - gofunc: base, - } - - /* verify and register the new module */ - moduledataverify1(mod) - registerModule(mod) -} \ No newline at end of file + mod := new(_ModuleData) + + minpc := pc + maxpc := pc + size + + findFuncTab := make([]_FindFuncBucket, textSize/4096+1) + + modHeader := &_PCHeader{ + magic: 0xfffffff0, + minLC: 1, + nfunc: 1, + ptrSize: 4 << (^uintptr(0) >> 63), + textStart: minpc, + } + + // cache arg and local stackmap + argptrs, localptrs := cacheStackmap(argPtrs, localPtrs, mod) + + base := argptrs + if argptrs > localptrs { + base = localptrs + } + + /* function entry */ + lnt := []_Func{{ + entryOff: 0, + nameoff: 1, + args: int32(args), + pcsp: 1, + nfuncdata: 2, + argptrs: uint32(argptrs - base), + localptrs: uint32(localptrs - base), + }} + nlnt := len(lnt) * int(unsafe.Sizeof(_Func{})) + plnt := unsafe.Pointer(&lnt[0]) + + /* function table */ + ftab := []_FuncTab{ + {entry: 0, funcoff: 16}, + {entry: uint32(size)}, + } + nftab := len(ftab) * int(unsafe.Sizeof(_FuncTab{})) + pftab := unsafe.Pointer(&ftab[0]) + + pclntab := make([]byte, 0, nftab+nlnt) + pclntab = append(pclntab, rt.BytesFrom(pftab, nftab, nftab)...) + pclntab = append(pclntab, rt.BytesFrom(plnt, nlnt, nlnt)...) + + /* module data */ + *mod = _ModuleData{ + pcHeader: modHeader, + funcnametab: append(append([]byte{0}, name...), 0), + pctab: append(makePCtab(fp), encodeVariant(int(size))...), + pclntable: pclntab, + ftab: ftab, + text: minpc, + etext: pc + textSize, + findfunctab: &findFuncTab[0], + minpc: minpc, + maxpc: maxpc, + modulename: name, + gcdata: uintptr(unsafe.Pointer(&emptyByte)), + gcbss: uintptr(unsafe.Pointer(&emptyByte)), + gofunc: base, + } + + /* verify and register the new module */ + moduledataverify1(mod) + registerModule(mod) +} diff --git a/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go120.go b/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go120.go index c12f8a73c..d69534938 100644 --- a/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go120.go +++ b/vendor/github.com/bytedance/sonic/internal/loader/funcdata_go120.go @@ -1,3 +1,4 @@ +//go:build go1.20 // +build go1.20 /* @@ -19,9 +20,9 @@ package loader import ( - `unsafe` + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) // A FuncFlag holds bits about a function. @@ -29,173 +30,170 @@ import ( type funcFlag uint8 type _Func struct { - entryOff uint32 // start pc - nameoff int32 // function name - args int32 // in/out args size - deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. - pcsp uint32 - pcfile uint32 - pcln uint32 - npcdata uint32 - cuOffset uint32 // runtime.cutab offset of this function's CU - funcID uint8 // set for certain special runtime functions - flag funcFlag - _ [1]byte // pad - nfuncdata uint8 // must be last - argptrs uint32 - localptrs uint32 + entryOff uint32 // start pc + nameoff int32 // function name + args int32 // in/out args size + deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. + pcsp uint32 + pcfile uint32 + pcln uint32 + npcdata uint32 + cuOffset uint32 // runtime.cutab offset of this function's CU + funcID uint8 // set for certain special runtime functions + flag funcFlag + _ [1]byte // pad + nfuncdata uint8 // must be last + argptrs uint32 + localptrs uint32 } type _FuncTab struct { - entry uint32 - funcoff uint32 + entry uint32 + funcoff uint32 } type _PCHeader struct { - magic uint32 // 0xFFFFFFF0 - pad1, pad2 uint8 // 0,0 - minLC uint8 // min instruction size - ptrSize uint8 // size of a ptr in bytes - nfunc int // number of functions in the module - nfiles uint // number of entries in the file tab - textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text - funcnameOffset uintptr // offset to the funcnametab variable from pcHeader - cuOffset uintptr // offset to the cutab variable from pcHeader - filetabOffset uintptr // offset to the filetab variable from pcHeader - pctabOffset uintptr // offset to the pctab variable from pcHeader - pclnOffset uintptr // offset to the pclntab variable from pcHeader + magic uint32 // 0xFFFFFFF0 + pad1, pad2 uint8 // 0,0 + minLC uint8 // min instruction size + ptrSize uint8 // size of a ptr in bytes + nfunc int // number of functions in the module + nfiles uint // number of entries in the file tab + textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text + funcnameOffset uintptr // offset to the funcnametab variable from pcHeader + cuOffset uintptr // offset to the cutab variable from pcHeader + filetabOffset uintptr // offset to the filetab variable from pcHeader + pctabOffset uintptr // offset to the pctab variable from pcHeader + pclnOffset uintptr // offset to the pclntab variable from pcHeader } type _BitVector struct { - n int32 // # of bits - bytedata *uint8 + n int32 // # of bits + bytedata *uint8 } type _PtabEntry struct { - name int32 - typ int32 + name int32 + typ int32 } type _TextSection struct { - vaddr uintptr // prelinked section vaddr - length uintptr // section length - baseaddr uintptr // relocated section address + vaddr uintptr // prelinked section vaddr + length uintptr // section length + baseaddr uintptr // relocated section address } type _ModuleData struct { - pcHeader *_PCHeader - funcnametab []byte - cutab []uint32 - filetab []byte - pctab []byte - pclntable []byte - ftab []_FuncTab - findfunctab *_FindFuncBucket - minpc, maxpc uintptr - text, etext uintptr - noptrdata, enoptrdata uintptr - data, edata uintptr - bss, ebss uintptr - noptrbss, enoptrbss uintptr - end, gcdata, gcbss uintptr - types, etypes uintptr - rodata uintptr - gofunc uintptr - textsectmap []_TextSection - typelinks []int32 - itablinks []unsafe.Pointer - ptab []_PtabEntry - pluginpath string - pkghashes []struct{} - modulename string - modulehashes []struct{} - hasmain uint8 - gcdatamask, gcbssmask _BitVector - typemap map[int32]unsafe.Pointer - bad bool - next *_ModuleData + pcHeader *_PCHeader + funcnametab []byte + cutab []uint32 + filetab []byte + pctab []byte + pclntable []byte + ftab []_FuncTab + findfunctab *_FindFuncBucket + minpc, maxpc uintptr + text, etext uintptr + noptrdata, enoptrdata uintptr + data, edata uintptr + bss, ebss uintptr + noptrbss, enoptrbss uintptr + end, gcdata, gcbss uintptr + types, etypes uintptr + rodata uintptr + gofunc uintptr + textsectmap []_TextSection + typelinks []int32 + itablinks []unsafe.Pointer + ptab []_PtabEntry + pluginpath string + pkghashes []struct{} + modulename string + modulehashes []struct{} + hasmain uint8 + gcdatamask, gcbssmask _BitVector + typemap map[int32]unsafe.Pointer + bad bool + next *_ModuleData } - type _FindFuncBucket struct { - idx uint32 - subbuckets [16]byte + idx uint32 + subbuckets [16]byte } - - func makePCtab(fp int) []byte { - return append([]byte{0}, encodeVariant((fp + 1) << 1)...) + return append([]byte{0}, encodeVariant((fp+1)<<1)...) } func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argPtrs []bool, localPtrs []bool) { - mod := new(_ModuleData) - - minpc := pc - maxpc := pc + size - - findFuncTab := make([]_FindFuncBucket, textSize/4096 + 1) - - modHeader := &_PCHeader { - magic : 0xfffffff0, - minLC : 1, - nfunc : 1, - ptrSize : 4 << (^uintptr(0) >> 63), - textStart: minpc, - } - - // cache arg and local stackmap - argptrs, localptrs := cacheStackmap(argPtrs, localPtrs, mod) - - base := argptrs - if argptrs > localptrs { - base = localptrs - } - - /* function entry */ - lnt := []_Func {{ - entryOff : 0, - nameoff : 1, - args : int32(args), - pcsp : 1, - nfuncdata : 2, - argptrs: uint32(argptrs - base), - localptrs: uint32(localptrs - base), - }} - nlnt := len(lnt)*int(unsafe.Sizeof(_Func{})) - plnt := unsafe.Pointer(&lnt[0]) - - /* function table */ - ftab := []_FuncTab { - {entry : 0, funcoff : 16}, - {entry : uint32(size)}, - } - nftab := len(ftab)*int(unsafe.Sizeof(_FuncTab{})) - pftab := unsafe.Pointer(&ftab[0]) - - pclntab := make([]byte, 0, nftab + nlnt) - pclntab = append(pclntab, rt.BytesFrom(pftab, nftab, nftab)...) - pclntab = append(pclntab, rt.BytesFrom(plnt, nlnt, nlnt)...) - - /* module data */ - *mod = _ModuleData { - pcHeader : modHeader, - funcnametab : append(append([]byte{0}, name...), 0), - pctab : append(makePCtab(fp), encodeVariant(int(size))...), - pclntable : pclntab, - ftab : ftab, - text : minpc, - etext : pc + textSize, - findfunctab : &findFuncTab[0], - minpc : minpc, - maxpc : maxpc, - modulename : name, - gcdata: uintptr(unsafe.Pointer(&emptyByte)), - gcbss: uintptr(unsafe.Pointer(&emptyByte)), - gofunc: base, - } - - /* verify and register the new module */ - moduledataverify1(mod) - registerModule(mod) -} \ No newline at end of file + mod := new(_ModuleData) + + minpc := pc + maxpc := pc + size + + findFuncTab := make([]_FindFuncBucket, textSize/4096+1) + + modHeader := &_PCHeader{ + magic: 0xfffffff0, + minLC: 1, + nfunc: 1, + ptrSize: 4 << (^uintptr(0) >> 63), + textStart: minpc, + } + + // cache arg and local stackmap + argptrs, localptrs := cacheStackmap(argPtrs, localPtrs, mod) + + base := argptrs + if argptrs > localptrs { + base = localptrs + } + + /* function entry */ + lnt := []_Func{{ + entryOff: 0, + nameoff: 1, + args: int32(args), + pcsp: 1, + nfuncdata: 2, + argptrs: uint32(argptrs - base), + localptrs: uint32(localptrs - base), + }} + nlnt := len(lnt) * int(unsafe.Sizeof(_Func{})) + plnt := unsafe.Pointer(&lnt[0]) + + /* function table */ + ftab := []_FuncTab{ + {entry: 0, funcoff: 16}, + {entry: uint32(size)}, + } + nftab := len(ftab) * int(unsafe.Sizeof(_FuncTab{})) + pftab := unsafe.Pointer(&ftab[0]) + + pclntab := make([]byte, 0, nftab+nlnt) + pclntab = append(pclntab, rt.BytesFrom(pftab, nftab, nftab)...) + pclntab = append(pclntab, rt.BytesFrom(plnt, nlnt, nlnt)...) + + /* module data */ + *mod = _ModuleData{ + pcHeader: modHeader, + funcnametab: append(append([]byte{0}, name...), 0), + pctab: append(makePCtab(fp), encodeVariant(int(size))...), + pclntable: pclntab, + ftab: ftab, + text: minpc, + etext: pc + textSize, + findfunctab: &findFuncTab[0], + minpc: minpc, + maxpc: maxpc, + modulename: name, + gcdata: uintptr(unsafe.Pointer(&emptyByte)), + gcbss: uintptr(unsafe.Pointer(&emptyByte)), + gofunc: base, + } + + /* verify and register the new module */ + moduledataverify1(mod) + registerModule(mod) +} diff --git a/vendor/github.com/bytedance/sonic/internal/loader/loader.go b/vendor/github.com/bytedance/sonic/internal/loader/loader.go index 6446a5f07..96d5d2059 100644 --- a/vendor/github.com/bytedance/sonic/internal/loader/loader.go +++ b/vendor/github.com/bytedance/sonic/internal/loader/loader.go @@ -20,55 +20,55 @@ package loader import ( - `fmt` - `os` - `reflect` - `syscall` - `unsafe` + "fmt" + "os" + "reflect" + "syscall" + "unsafe" ) const ( - _AP = syscall.MAP_ANON | syscall.MAP_PRIVATE - _RX = syscall.PROT_READ | syscall.PROT_EXEC - _RW = syscall.PROT_READ | syscall.PROT_WRITE + _AP = syscall.MAP_ANON | syscall.MAP_PRIVATE + _RX = syscall.PROT_READ | syscall.PROT_EXEC + _RW = syscall.PROT_READ | syscall.PROT_WRITE ) -type Loader []byte +type Loader []byte type Function unsafe.Pointer func (self Loader) Load(fn string, fp int, args int, argPtrs []bool, localPtrs []bool) (f Function) { - p := os.Getpagesize() - n := (((len(self) - 1) / p) + 1) * p + p := os.Getpagesize() + n := (((len(self) - 1) / p) + 1) * p - /* register the function */ - m := mmap(n) - v := fmt.Sprintf("runtime.__%s_%x", fn, m) - - registerFunction(v, m, uintptr(n), fp, args, uintptr(len(self)), argPtrs, localPtrs) + /* register the function */ + m := mmap(n) + v := fmt.Sprintf("runtime.__%s_%x", fn, m) - /* reference as a slice */ - s := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader { - Data : m, - Cap : n, - Len : len(self), - })) + registerFunction(v, m, uintptr(n), fp, args, uintptr(len(self)), argPtrs, localPtrs) - /* copy the machine code, and make it executable */ - copy(s, self) - mprotect(m, n) - return Function(&m) + /* reference as a slice */ + s := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: m, + Cap: n, + Len: len(self), + })) + + /* copy the machine code, and make it executable */ + copy(s, self) + mprotect(m, n) + return Function(&m) } func mmap(nb int) uintptr { - if m, _, e := syscall.RawSyscall6(syscall.SYS_MMAP, 0, uintptr(nb), _RW, _AP, 0, 0); e != 0 { - panic(e) - } else { - return m - } + if m, _, e := syscall.RawSyscall6(syscall.SYS_MMAP, 0, uintptr(nb), _RW, _AP, 0, 0); e != 0 { + panic(e) + } else { + return m + } } func mprotect(p uintptr, nb int) { - if _, _, err := syscall.RawSyscall(syscall.SYS_MPROTECT, p, uintptr(nb), _RX); err != 0 { - panic(err) - } + if _, _, err := syscall.RawSyscall(syscall.SYS_MPROTECT, p, uintptr(nb), _RX); err != 0 { + panic(err) + } } diff --git a/vendor/github.com/bytedance/sonic/internal/loader/loader_windows.go b/vendor/github.com/bytedance/sonic/internal/loader/loader_windows.go index 4053ee9bb..396601c41 100644 --- a/vendor/github.com/bytedance/sonic/internal/loader/loader_windows.go +++ b/vendor/github.com/bytedance/sonic/internal/loader/loader_windows.go @@ -20,92 +20,92 @@ package loader import ( - `fmt` - `os` - `reflect` - `syscall` - `unsafe` + "fmt" + "os" + "reflect" + "syscall" + "unsafe" ) const ( - MEM_COMMIT = 0x00001000 - MEM_RESERVE = 0x00002000 + MEM_COMMIT = 0x00001000 + MEM_RESERVE = 0x00002000 ) var ( - libKernel32 = syscall.NewLazyDLL("KERNEL32.DLL") - libKernel32_VirtualAlloc = libKernel32.NewProc("VirtualAlloc") - libKernel32_VirtualProtect = libKernel32.NewProc("VirtualProtect") + libKernel32 = syscall.NewLazyDLL("KERNEL32.DLL") + libKernel32_VirtualAlloc = libKernel32.NewProc("VirtualAlloc") + libKernel32_VirtualProtect = libKernel32.NewProc("VirtualProtect") ) -type Loader []byte +type Loader []byte type Function unsafe.Pointer func (self Loader) Load(fn string, fp int, args int, argPtrs []bool, localPtrs []bool) (f Function) { - p := os.Getpagesize() - n := (((len(self) - 1) / p) + 1) * p + p := os.Getpagesize() + n := (((len(self) - 1) / p) + 1) * p - /* register the function */ - m := mmap(n) - v := fmt.Sprintf("runtime.__%s_%x", fn, m) - - registerFunction(v, m, uintptr(n), fp, args, uintptr(len(self)), argPtrs, localPtrs) + /* register the function */ + m := mmap(n) + v := fmt.Sprintf("runtime.__%s_%x", fn, m) - /* reference as a slice */ - s := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader { - Data : m, - Cap : n, - Len : len(self), - })) + registerFunction(v, m, uintptr(n), fp, args, uintptr(len(self)), argPtrs, localPtrs) - /* copy the machine code, and make it executable */ - copy(s, self) - mprotect(m, n) - return Function(&m) + /* reference as a slice */ + s := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: m, + Cap: n, + Len: len(self), + })) + + /* copy the machine code, and make it executable */ + copy(s, self) + mprotect(m, n) + return Function(&m) } func mmap(nb int) uintptr { - addr, err := winapi_VirtualAlloc(0, nb, MEM_COMMIT|MEM_RESERVE, syscall.PAGE_READWRITE) - if err != nil { - panic(err) - } - return addr + addr, err := winapi_VirtualAlloc(0, nb, MEM_COMMIT|MEM_RESERVE, syscall.PAGE_READWRITE) + if err != nil { + panic(err) + } + return addr } func mprotect(p uintptr, nb int) (oldProtect int) { - err := winapi_VirtualProtect(p, nb, syscall.PAGE_EXECUTE_READ, &oldProtect) - if err != nil { - panic(err) - } - return + err := winapi_VirtualProtect(p, nb, syscall.PAGE_EXECUTE_READ, &oldProtect) + if err != nil { + panic(err) + } + return } // winapi_VirtualAlloc allocate memory // Doc: https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc func winapi_VirtualAlloc(lpAddr uintptr, dwSize int, flAllocationType int, flProtect int) (uintptr, error) { - r1, _, err := libKernel32_VirtualAlloc.Call( - lpAddr, - uintptr(dwSize), - uintptr(flAllocationType), - uintptr(flProtect), - ) - if r1 == 0 { - return 0, err - } - return r1, nil + r1, _, err := libKernel32_VirtualAlloc.Call( + lpAddr, + uintptr(dwSize), + uintptr(flAllocationType), + uintptr(flProtect), + ) + if r1 == 0 { + return 0, err + } + return r1, nil } // winapi_VirtualProtect change memory protection // Doc: https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualprotect func winapi_VirtualProtect(lpAddr uintptr, dwSize int, flNewProtect int, lpflOldProtect *int) error { - r1, _, err := libKernel32_VirtualProtect.Call( - lpAddr, - uintptr(dwSize), - uintptr(flNewProtect), - uintptr(unsafe.Pointer(lpflOldProtect)), - ) - if r1 == 0 { - return err - } - return nil + r1, _, err := libKernel32_VirtualProtect.Call( + lpAddr, + uintptr(dwSize), + uintptr(flNewProtect), + uintptr(unsafe.Pointer(lpflOldProtect)), + ) + if r1 == 0 { + return err + } + return nil } diff --git a/vendor/github.com/bytedance/sonic/internal/native/avx/native_amd64.go b/vendor/github.com/bytedance/sonic/internal/native/avx/native_amd64.go index d6a861068..e3d6f3360 100644 --- a/vendor/github.com/bytedance/sonic/internal/native/avx/native_amd64.go +++ b/vendor/github.com/bytedance/sonic/internal/native/avx/native_amd64.go @@ -19,9 +19,9 @@ package avx import ( - `unsafe` + "unsafe" - `github.com/bytedance/sonic/internal/native/types` + "github.com/bytedance/sonic/internal/native/types" ) //go:nosplit @@ -132,4 +132,4 @@ func __validate_utf8(s *string, p *int, m *types.StateMachine) (ret int) //go:nosplit //go:noescape //goland:noinspection GoUnusedParameter -func __validate_utf8_fast(s *string) (ret int) \ No newline at end of file +func __validate_utf8_fast(s *string) (ret int) diff --git a/vendor/github.com/bytedance/sonic/internal/native/avx/native_export_amd64.go b/vendor/github.com/bytedance/sonic/internal/native/avx/native_export_amd64.go index 1ca7f5753..e25111feb 100644 --- a/vendor/github.com/bytedance/sonic/internal/native/avx/native_export_amd64.go +++ b/vendor/github.com/bytedance/sonic/internal/native/avx/native_export_amd64.go @@ -19,31 +19,31 @@ package avx var ( - S_f64toa = _subr__f64toa - S_f32toa = _subr__f32toa - S_i64toa = _subr__i64toa - S_u64toa = _subr__u64toa - S_lspace = _subr__lspace + S_f64toa = _subr__f64toa + S_f32toa = _subr__f32toa + S_i64toa = _subr__i64toa + S_u64toa = _subr__u64toa + S_lspace = _subr__lspace ) var ( - S_quote = _subr__quote - S_unquote = _subr__unquote + S_quote = _subr__quote + S_unquote = _subr__unquote ) var ( - S_value = _subr__value - S_vstring = _subr__vstring - S_vnumber = _subr__vnumber - S_vsigned = _subr__vsigned - S_vunsigned = _subr__vunsigned + S_value = _subr__value + S_vstring = _subr__vstring + S_vnumber = _subr__vnumber + S_vsigned = _subr__vsigned + S_vunsigned = _subr__vunsigned ) var ( - S_skip_one = _subr__skip_one - S_skip_one_fast = _subr__skip_one_fast - S_skip_array = _subr__skip_array - S_skip_object = _subr__skip_object - S_skip_number = _subr__skip_number - S_get_by_path = _subr__get_by_path + S_skip_one = _subr__skip_one + S_skip_one_fast = _subr__skip_one_fast + S_skip_array = _subr__skip_array + S_skip_object = _subr__skip_object + S_skip_number = _subr__skip_number + S_get_by_path = _subr__get_by_path ) diff --git a/vendor/github.com/bytedance/sonic/internal/native/avx/native_subr_amd64.go b/vendor/github.com/bytedance/sonic/internal/native/avx/native_subr_amd64.go index f20095d3d..f764fd47b 100644 --- a/vendor/github.com/bytedance/sonic/internal/native/avx/native_subr_amd64.go +++ b/vendor/github.com/bytedance/sonic/internal/native/avx/native_subr_amd64.go @@ -1,4 +1,6 @@ +//go:build !noasm || !appengine // +build !noasm !appengine + // Code generated by asm2asm, DO NOT EDIT. package avx @@ -9,101 +11,101 @@ package avx func __native_entry__() uintptr var ( - _subr__f32toa = __native_entry__() + 31264 - _subr__f64toa = __native_entry__() + 192 - _subr__get_by_path = __native_entry__() + 25856 - _subr__html_escape = __native_entry__() + 9040 - _subr__i64toa = __native_entry__() + 3488 - _subr__lspace = __native_entry__() + 16 - _subr__quote = __native_entry__() + 4880 - _subr__skip_array = __native_entry__() + 17952 - _subr__skip_number = __native_entry__() + 21952 - _subr__skip_object = __native_entry__() + 20368 - _subr__skip_one = __native_entry__() + 22112 - _subr__skip_one_fast = __native_entry__() + 22352 - _subr__u64toa = __native_entry__() + 3600 - _subr__unquote = __native_entry__() + 6672 - _subr__validate_one = __native_entry__() + 22176 - _subr__validate_utf8 = __native_entry__() + 30000 - _subr__validate_utf8_fast = __native_entry__() + 30672 - _subr__value = __native_entry__() + 12224 - _subr__vnumber = __native_entry__() + 15616 - _subr__vsigned = __native_entry__() + 17232 - _subr__vstring = __native_entry__() + 14064 - _subr__vunsigned = __native_entry__() + 17600 + _subr__f32toa = __native_entry__() + 31264 + _subr__f64toa = __native_entry__() + 192 + _subr__get_by_path = __native_entry__() + 25856 + _subr__html_escape = __native_entry__() + 9040 + _subr__i64toa = __native_entry__() + 3488 + _subr__lspace = __native_entry__() + 16 + _subr__quote = __native_entry__() + 4880 + _subr__skip_array = __native_entry__() + 17952 + _subr__skip_number = __native_entry__() + 21952 + _subr__skip_object = __native_entry__() + 20368 + _subr__skip_one = __native_entry__() + 22112 + _subr__skip_one_fast = __native_entry__() + 22352 + _subr__u64toa = __native_entry__() + 3600 + _subr__unquote = __native_entry__() + 6672 + _subr__validate_one = __native_entry__() + 22176 + _subr__validate_utf8 = __native_entry__() + 30000 + _subr__validate_utf8_fast = __native_entry__() + 30672 + _subr__value = __native_entry__() + 12224 + _subr__vnumber = __native_entry__() + 15616 + _subr__vsigned = __native_entry__() + 17232 + _subr__vstring = __native_entry__() + 14064 + _subr__vunsigned = __native_entry__() + 17600 ) const ( - _stack__f32toa = 48 - _stack__f64toa = 80 - _stack__get_by_path = 304 - _stack__html_escape = 64 - _stack__i64toa = 16 - _stack__lspace = 8 - _stack__quote = 56 - _stack__skip_array = 128 - _stack__skip_number = 72 - _stack__skip_object = 128 - _stack__skip_one = 128 - _stack__skip_one_fast = 200 - _stack__u64toa = 8 - _stack__unquote = 88 - _stack__validate_one = 128 - _stack__validate_utf8 = 48 - _stack__validate_utf8_fast = 24 - _stack__value = 328 - _stack__vnumber = 240 - _stack__vsigned = 16 - _stack__vstring = 136 - _stack__vunsigned = 16 + _stack__f32toa = 48 + _stack__f64toa = 80 + _stack__get_by_path = 304 + _stack__html_escape = 64 + _stack__i64toa = 16 + _stack__lspace = 8 + _stack__quote = 56 + _stack__skip_array = 128 + _stack__skip_number = 72 + _stack__skip_object = 128 + _stack__skip_one = 128 + _stack__skip_one_fast = 200 + _stack__u64toa = 8 + _stack__unquote = 88 + _stack__validate_one = 128 + _stack__validate_utf8 = 48 + _stack__validate_utf8_fast = 24 + _stack__value = 328 + _stack__vnumber = 240 + _stack__vsigned = 16 + _stack__vstring = 136 + _stack__vunsigned = 16 ) var ( - _ = _subr__f32toa - _ = _subr__f64toa - _ = _subr__get_by_path - _ = _subr__html_escape - _ = _subr__i64toa - _ = _subr__lspace - _ = _subr__quote - _ = _subr__skip_array - _ = _subr__skip_number - _ = _subr__skip_object - _ = _subr__skip_one - _ = _subr__skip_one_fast - _ = _subr__u64toa - _ = _subr__unquote - _ = _subr__validate_one - _ = _subr__validate_utf8 - _ = _subr__validate_utf8_fast - _ = _subr__value - _ = _subr__vnumber - _ = _subr__vsigned - _ = _subr__vstring - _ = _subr__vunsigned + _ = _subr__f32toa + _ = _subr__f64toa + _ = _subr__get_by_path + _ = _subr__html_escape + _ = _subr__i64toa + _ = _subr__lspace + _ = _subr__quote + _ = _subr__skip_array + _ = _subr__skip_number + _ = _subr__skip_object + _ = _subr__skip_one + _ = _subr__skip_one_fast + _ = _subr__u64toa + _ = _subr__unquote + _ = _subr__validate_one + _ = _subr__validate_utf8 + _ = _subr__validate_utf8_fast + _ = _subr__value + _ = _subr__vnumber + _ = _subr__vsigned + _ = _subr__vstring + _ = _subr__vunsigned ) const ( - _ = _stack__f32toa - _ = _stack__f64toa - _ = _stack__get_by_path - _ = _stack__html_escape - _ = _stack__i64toa - _ = _stack__lspace - _ = _stack__quote - _ = _stack__skip_array - _ = _stack__skip_number - _ = _stack__skip_object - _ = _stack__skip_one - _ = _stack__skip_one_fast - _ = _stack__u64toa - _ = _stack__unquote - _ = _stack__validate_one - _ = _stack__validate_utf8 - _ = _stack__validate_utf8_fast - _ = _stack__value - _ = _stack__vnumber - _ = _stack__vsigned - _ = _stack__vstring - _ = _stack__vunsigned + _ = _stack__f32toa + _ = _stack__f64toa + _ = _stack__get_by_path + _ = _stack__html_escape + _ = _stack__i64toa + _ = _stack__lspace + _ = _stack__quote + _ = _stack__skip_array + _ = _stack__skip_number + _ = _stack__skip_object + _ = _stack__skip_one + _ = _stack__skip_one_fast + _ = _stack__u64toa + _ = _stack__unquote + _ = _stack__validate_one + _ = _stack__validate_utf8 + _ = _stack__validate_utf8_fast + _ = _stack__value + _ = _stack__vnumber + _ = _stack__vsigned + _ = _stack__vstring + _ = _stack__vunsigned ) diff --git a/vendor/github.com/bytedance/sonic/internal/native/avx2/native_amd64.go b/vendor/github.com/bytedance/sonic/internal/native/avx2/native_amd64.go index bebd83c80..67d5d2b76 100644 --- a/vendor/github.com/bytedance/sonic/internal/native/avx2/native_amd64.go +++ b/vendor/github.com/bytedance/sonic/internal/native/avx2/native_amd64.go @@ -19,9 +19,9 @@ package avx2 import ( - `unsafe` + "unsafe" - `github.com/bytedance/sonic/internal/native/types` + "github.com/bytedance/sonic/internal/native/types" ) //go:nosplit @@ -132,4 +132,4 @@ func __validate_utf8(s *string, p *int, m *types.StateMachine) (ret int) //go:nosplit //go:noescape //goland:noinspection GoUnusedParameter -func __validate_utf8_fast(s *string) (ret int) \ No newline at end of file +func __validate_utf8_fast(s *string) (ret int) diff --git a/vendor/github.com/bytedance/sonic/internal/native/avx2/native_export_amd64.go b/vendor/github.com/bytedance/sonic/internal/native/avx2/native_export_amd64.go index 8adf5e91f..61474a615 100644 --- a/vendor/github.com/bytedance/sonic/internal/native/avx2/native_export_amd64.go +++ b/vendor/github.com/bytedance/sonic/internal/native/avx2/native_export_amd64.go @@ -19,31 +19,31 @@ package avx2 var ( - S_f64toa = _subr__f64toa - S_f32toa = _subr__f32toa - S_i64toa = _subr__i64toa - S_u64toa = _subr__u64toa - S_lspace = _subr__lspace + S_f64toa = _subr__f64toa + S_f32toa = _subr__f32toa + S_i64toa = _subr__i64toa + S_u64toa = _subr__u64toa + S_lspace = _subr__lspace ) var ( - S_quote = _subr__quote - S_unquote = _subr__unquote + S_quote = _subr__quote + S_unquote = _subr__unquote ) var ( - S_value = _subr__value - S_vstring = _subr__vstring - S_vnumber = _subr__vnumber - S_vsigned = _subr__vsigned - S_vunsigned = _subr__vunsigned + S_value = _subr__value + S_vstring = _subr__vstring + S_vnumber = _subr__vnumber + S_vsigned = _subr__vsigned + S_vunsigned = _subr__vunsigned ) var ( - S_skip_one = _subr__skip_one - S_skip_one_fast = _subr__skip_one_fast - S_skip_array = _subr__skip_array - S_skip_object = _subr__skip_object - S_skip_number = _subr__skip_number - S_get_by_path = _subr__get_by_path + S_skip_one = _subr__skip_one + S_skip_one_fast = _subr__skip_one_fast + S_skip_array = _subr__skip_array + S_skip_object = _subr__skip_object + S_skip_number = _subr__skip_number + S_get_by_path = _subr__get_by_path ) diff --git a/vendor/github.com/bytedance/sonic/internal/native/avx2/native_subr_amd64.go b/vendor/github.com/bytedance/sonic/internal/native/avx2/native_subr_amd64.go index 10b3d60f5..ca54ce813 100644 --- a/vendor/github.com/bytedance/sonic/internal/native/avx2/native_subr_amd64.go +++ b/vendor/github.com/bytedance/sonic/internal/native/avx2/native_subr_amd64.go @@ -1,4 +1,6 @@ +//go:build !noasm || !appengine // +build !noasm !appengine + // Code generated by asm2asm, DO NOT EDIT. package avx2 @@ -9,101 +11,101 @@ package avx2 func __native_entry__() uintptr var ( - _subr__f32toa = __native_entry__() + 33888 - _subr__f64toa = __native_entry__() + 288 - _subr__get_by_path = __native_entry__() + 28336 - _subr__html_escape = __native_entry__() + 10496 - _subr__i64toa = __native_entry__() + 3584 - _subr__lspace = __native_entry__() + 64 - _subr__quote = __native_entry__() + 5072 - _subr__skip_array = __native_entry__() + 20688 - _subr__skip_number = __native_entry__() + 24912 - _subr__skip_object = __native_entry__() + 22736 - _subr__skip_one = __native_entry__() + 25072 - _subr__skip_one_fast = __native_entry__() + 25488 - _subr__u64toa = __native_entry__() + 3696 - _subr__unquote = __native_entry__() + 7888 - _subr__validate_one = __native_entry__() + 25136 - _subr__validate_utf8 = __native_entry__() + 30320 - _subr__validate_utf8_fast = __native_entry__() + 31280 - _subr__value = __native_entry__() + 15024 - _subr__vnumber = __native_entry__() + 18352 - _subr__vsigned = __native_entry__() + 19968 - _subr__vstring = __native_entry__() + 17024 - _subr__vunsigned = __native_entry__() + 20336 + _subr__f32toa = __native_entry__() + 33888 + _subr__f64toa = __native_entry__() + 288 + _subr__get_by_path = __native_entry__() + 28336 + _subr__html_escape = __native_entry__() + 10496 + _subr__i64toa = __native_entry__() + 3584 + _subr__lspace = __native_entry__() + 64 + _subr__quote = __native_entry__() + 5072 + _subr__skip_array = __native_entry__() + 20688 + _subr__skip_number = __native_entry__() + 24912 + _subr__skip_object = __native_entry__() + 22736 + _subr__skip_one = __native_entry__() + 25072 + _subr__skip_one_fast = __native_entry__() + 25488 + _subr__u64toa = __native_entry__() + 3696 + _subr__unquote = __native_entry__() + 7888 + _subr__validate_one = __native_entry__() + 25136 + _subr__validate_utf8 = __native_entry__() + 30320 + _subr__validate_utf8_fast = __native_entry__() + 31280 + _subr__value = __native_entry__() + 15024 + _subr__vnumber = __native_entry__() + 18352 + _subr__vsigned = __native_entry__() + 19968 + _subr__vstring = __native_entry__() + 17024 + _subr__vunsigned = __native_entry__() + 20336 ) const ( - _stack__f32toa = 48 - _stack__f64toa = 80 - _stack__get_by_path = 296 - _stack__html_escape = 72 - _stack__i64toa = 16 - _stack__lspace = 8 - _stack__quote = 56 - _stack__skip_array = 128 - _stack__skip_number = 72 - _stack__skip_object = 128 - _stack__skip_one = 128 - _stack__skip_one_fast = 208 - _stack__u64toa = 8 - _stack__unquote = 72 - _stack__validate_one = 128 - _stack__validate_utf8 = 48 - _stack__validate_utf8_fast = 176 - _stack__value = 328 - _stack__vnumber = 240 - _stack__vsigned = 16 - _stack__vstring = 112 - _stack__vunsigned = 16 + _stack__f32toa = 48 + _stack__f64toa = 80 + _stack__get_by_path = 296 + _stack__html_escape = 72 + _stack__i64toa = 16 + _stack__lspace = 8 + _stack__quote = 56 + _stack__skip_array = 128 + _stack__skip_number = 72 + _stack__skip_object = 128 + _stack__skip_one = 128 + _stack__skip_one_fast = 208 + _stack__u64toa = 8 + _stack__unquote = 72 + _stack__validate_one = 128 + _stack__validate_utf8 = 48 + _stack__validate_utf8_fast = 176 + _stack__value = 328 + _stack__vnumber = 240 + _stack__vsigned = 16 + _stack__vstring = 112 + _stack__vunsigned = 16 ) var ( - _ = _subr__f32toa - _ = _subr__f64toa - _ = _subr__get_by_path - _ = _subr__html_escape - _ = _subr__i64toa - _ = _subr__lspace - _ = _subr__quote - _ = _subr__skip_array - _ = _subr__skip_number - _ = _subr__skip_object - _ = _subr__skip_one - _ = _subr__skip_one_fast - _ = _subr__u64toa - _ = _subr__unquote - _ = _subr__validate_one - _ = _subr__validate_utf8 - _ = _subr__validate_utf8_fast - _ = _subr__value - _ = _subr__vnumber - _ = _subr__vsigned - _ = _subr__vstring - _ = _subr__vunsigned + _ = _subr__f32toa + _ = _subr__f64toa + _ = _subr__get_by_path + _ = _subr__html_escape + _ = _subr__i64toa + _ = _subr__lspace + _ = _subr__quote + _ = _subr__skip_array + _ = _subr__skip_number + _ = _subr__skip_object + _ = _subr__skip_one + _ = _subr__skip_one_fast + _ = _subr__u64toa + _ = _subr__unquote + _ = _subr__validate_one + _ = _subr__validate_utf8 + _ = _subr__validate_utf8_fast + _ = _subr__value + _ = _subr__vnumber + _ = _subr__vsigned + _ = _subr__vstring + _ = _subr__vunsigned ) const ( - _ = _stack__f32toa - _ = _stack__f64toa - _ = _stack__get_by_path - _ = _stack__html_escape - _ = _stack__i64toa - _ = _stack__lspace - _ = _stack__quote - _ = _stack__skip_array - _ = _stack__skip_number - _ = _stack__skip_object - _ = _stack__skip_one - _ = _stack__skip_one_fast - _ = _stack__u64toa - _ = _stack__unquote - _ = _stack__validate_one - _ = _stack__validate_utf8 - _ = _stack__validate_utf8_fast - _ = _stack__value - _ = _stack__vnumber - _ = _stack__vsigned - _ = _stack__vstring - _ = _stack__vunsigned + _ = _stack__f32toa + _ = _stack__f64toa + _ = _stack__get_by_path + _ = _stack__html_escape + _ = _stack__i64toa + _ = _stack__lspace + _ = _stack__quote + _ = _stack__skip_array + _ = _stack__skip_number + _ = _stack__skip_object + _ = _stack__skip_one + _ = _stack__skip_one_fast + _ = _stack__u64toa + _ = _stack__unquote + _ = _stack__validate_one + _ = _stack__validate_utf8 + _ = _stack__validate_utf8_fast + _ = _stack__value + _ = _stack__vnumber + _ = _stack__vsigned + _ = _stack__vstring + _ = _stack__vunsigned ) diff --git a/vendor/github.com/bytedance/sonic/internal/native/dispatch_amd64.go b/vendor/github.com/bytedance/sonic/internal/native/dispatch_amd64.go index 11e517231..7ab4901e7 100644 --- a/vendor/github.com/bytedance/sonic/internal/native/dispatch_amd64.go +++ b/vendor/github.com/bytedance/sonic/internal/native/dispatch_amd64.go @@ -17,48 +17,48 @@ package native import ( - `unsafe` + "unsafe" - `github.com/bytedance/sonic/internal/cpu` - `github.com/bytedance/sonic/internal/native/avx` - `github.com/bytedance/sonic/internal/native/avx2` - `github.com/bytedance/sonic/internal/native/sse` - `github.com/bytedance/sonic/internal/native/types` + "github.com/bytedance/sonic/internal/cpu" + "github.com/bytedance/sonic/internal/native/avx" + "github.com/bytedance/sonic/internal/native/avx2" + "github.com/bytedance/sonic/internal/native/sse" + "github.com/bytedance/sonic/internal/native/types" ) const ( - MaxFrameSize uintptr = 400 - BufPaddingSize int = 64 + MaxFrameSize uintptr = 400 + BufPaddingSize int = 64 ) var ( - S_f64toa uintptr - S_f32toa uintptr - S_i64toa uintptr - S_u64toa uintptr - S_lspace uintptr + S_f64toa uintptr + S_f32toa uintptr + S_i64toa uintptr + S_u64toa uintptr + S_lspace uintptr ) var ( - S_quote uintptr - S_unquote uintptr + S_quote uintptr + S_unquote uintptr ) var ( - S_value uintptr - S_vstring uintptr - S_vnumber uintptr - S_vsigned uintptr - S_vunsigned uintptr + S_value uintptr + S_vstring uintptr + S_vnumber uintptr + S_vsigned uintptr + S_vunsigned uintptr ) var ( - S_skip_one uintptr - S_skip_one_fast uintptr - S_get_by_path uintptr - S_skip_array uintptr - S_skip_object uintptr - S_skip_number uintptr + S_skip_one uintptr + S_skip_one_fast uintptr + S_get_by_path uintptr + S_skip_array uintptr + S_skip_object uintptr + S_skip_number uintptr ) //go:nosplit @@ -127,76 +127,76 @@ func ValidateUTF8(s *string, p *int, m *types.StateMachine) (ret int) func ValidateUTF8Fast(s *string) (ret int) func useAVX() { - S_f64toa = avx.S_f64toa - S_f32toa = avx.S_f32toa - S_i64toa = avx.S_i64toa - S_u64toa = avx.S_u64toa - S_lspace = avx.S_lspace - S_quote = avx.S_quote - S_unquote = avx.S_unquote - S_value = avx.S_value - S_vstring = avx.S_vstring - S_vnumber = avx.S_vnumber - S_vsigned = avx.S_vsigned - S_vunsigned = avx.S_vunsigned - S_skip_one = avx.S_skip_one - S_skip_one_fast = avx.S_skip_one_fast - S_skip_array = avx.S_skip_array - S_skip_object = avx.S_skip_object - S_skip_number = avx.S_skip_number - S_get_by_path = avx.S_get_by_path + S_f64toa = avx.S_f64toa + S_f32toa = avx.S_f32toa + S_i64toa = avx.S_i64toa + S_u64toa = avx.S_u64toa + S_lspace = avx.S_lspace + S_quote = avx.S_quote + S_unquote = avx.S_unquote + S_value = avx.S_value + S_vstring = avx.S_vstring + S_vnumber = avx.S_vnumber + S_vsigned = avx.S_vsigned + S_vunsigned = avx.S_vunsigned + S_skip_one = avx.S_skip_one + S_skip_one_fast = avx.S_skip_one_fast + S_skip_array = avx.S_skip_array + S_skip_object = avx.S_skip_object + S_skip_number = avx.S_skip_number + S_get_by_path = avx.S_get_by_path } func useAVX2() { - S_f64toa = avx2.S_f64toa - S_f32toa = avx2.S_f32toa - S_i64toa = avx2.S_i64toa - S_u64toa = avx2.S_u64toa - S_lspace = avx2.S_lspace - S_quote = avx2.S_quote - S_unquote = avx2.S_unquote - S_value = avx2.S_value - S_vstring = avx2.S_vstring - S_vnumber = avx2.S_vnumber - S_vsigned = avx2.S_vsigned - S_vunsigned = avx2.S_vunsigned - S_skip_one = avx2.S_skip_one - S_skip_one_fast = avx2.S_skip_one_fast - S_skip_array = avx2.S_skip_array - S_skip_object = avx2.S_skip_object - S_skip_number = avx2.S_skip_number - S_get_by_path = avx2.S_get_by_path + S_f64toa = avx2.S_f64toa + S_f32toa = avx2.S_f32toa + S_i64toa = avx2.S_i64toa + S_u64toa = avx2.S_u64toa + S_lspace = avx2.S_lspace + S_quote = avx2.S_quote + S_unquote = avx2.S_unquote + S_value = avx2.S_value + S_vstring = avx2.S_vstring + S_vnumber = avx2.S_vnumber + S_vsigned = avx2.S_vsigned + S_vunsigned = avx2.S_vunsigned + S_skip_one = avx2.S_skip_one + S_skip_one_fast = avx2.S_skip_one_fast + S_skip_array = avx2.S_skip_array + S_skip_object = avx2.S_skip_object + S_skip_number = avx2.S_skip_number + S_get_by_path = avx2.S_get_by_path } func useSSE() { - S_f64toa = sse.S_f64toa - S_f32toa = sse.S_f32toa - S_i64toa = sse.S_i64toa - S_u64toa = sse.S_u64toa - S_lspace = sse.S_lspace - S_quote = sse.S_quote - S_unquote = sse.S_unquote - S_value = sse.S_value - S_vstring = sse.S_vstring - S_vnumber = sse.S_vnumber - S_vsigned = sse.S_vsigned - S_vunsigned = sse.S_vunsigned - S_skip_one = sse.S_skip_one - S_skip_one_fast = sse.S_skip_one_fast - S_skip_array = sse.S_skip_array - S_skip_object = sse.S_skip_object - S_skip_number = sse.S_skip_number - S_get_by_path = sse.S_get_by_path + S_f64toa = sse.S_f64toa + S_f32toa = sse.S_f32toa + S_i64toa = sse.S_i64toa + S_u64toa = sse.S_u64toa + S_lspace = sse.S_lspace + S_quote = sse.S_quote + S_unquote = sse.S_unquote + S_value = sse.S_value + S_vstring = sse.S_vstring + S_vnumber = sse.S_vnumber + S_vsigned = sse.S_vsigned + S_vunsigned = sse.S_vunsigned + S_skip_one = sse.S_skip_one + S_skip_one_fast = sse.S_skip_one_fast + S_skip_array = sse.S_skip_array + S_skip_object = sse.S_skip_object + S_skip_number = sse.S_skip_number + S_get_by_path = sse.S_get_by_path } func init() { - if cpu.HasAVX2 { - useAVX2() - } else if cpu.HasAVX { - useAVX() - } else if cpu.HasSSE { - useSSE() - } else { - panic("Unsupported CPU, maybe it's too old to run Sonic.") - } + if cpu.HasAVX2 { + useAVX2() + } else if cpu.HasAVX { + useAVX() + } else if cpu.HasSSE { + useSSE() + } else { + panic("Unsupported CPU, maybe it's too old to run Sonic.") + } } diff --git a/vendor/github.com/bytedance/sonic/internal/native/sse/native_amd64.go b/vendor/github.com/bytedance/sonic/internal/native/sse/native_amd64.go index cb30a0ebf..797a8ee72 100644 --- a/vendor/github.com/bytedance/sonic/internal/native/sse/native_amd64.go +++ b/vendor/github.com/bytedance/sonic/internal/native/sse/native_amd64.go @@ -19,9 +19,9 @@ package sse import ( - `unsafe` + "unsafe" - `github.com/bytedance/sonic/internal/native/types` + "github.com/bytedance/sonic/internal/native/types" ) //go:nosplit @@ -132,4 +132,4 @@ func __validate_utf8(s *string, p *int, m *types.StateMachine) (ret int) //go:nosplit //go:noescape //goland:noinspection GoUnusedParameter -func __validate_utf8_fast(s *string) (ret int) \ No newline at end of file +func __validate_utf8_fast(s *string) (ret int) diff --git a/vendor/github.com/bytedance/sonic/internal/native/sse/native_export_amd64.go b/vendor/github.com/bytedance/sonic/internal/native/sse/native_export_amd64.go index 898bad431..9e370553a 100644 --- a/vendor/github.com/bytedance/sonic/internal/native/sse/native_export_amd64.go +++ b/vendor/github.com/bytedance/sonic/internal/native/sse/native_export_amd64.go @@ -19,31 +19,31 @@ package sse var ( - S_f64toa = _subr__f64toa - S_f32toa = _subr__f32toa - S_i64toa = _subr__i64toa - S_u64toa = _subr__u64toa - S_lspace = _subr__lspace + S_f64toa = _subr__f64toa + S_f32toa = _subr__f32toa + S_i64toa = _subr__i64toa + S_u64toa = _subr__u64toa + S_lspace = _subr__lspace ) var ( - S_quote = _subr__quote - S_unquote = _subr__unquote + S_quote = _subr__quote + S_unquote = _subr__unquote ) var ( - S_value = _subr__value - S_vstring = _subr__vstring - S_vnumber = _subr__vnumber - S_vsigned = _subr__vsigned - S_vunsigned = _subr__vunsigned + S_value = _subr__value + S_vstring = _subr__vstring + S_vnumber = _subr__vnumber + S_vsigned = _subr__vsigned + S_vunsigned = _subr__vunsigned ) var ( - S_skip_one = _subr__skip_one - S_skip_one_fast = _subr__skip_one_fast - S_skip_array = _subr__skip_array - S_skip_object = _subr__skip_object - S_skip_number = _subr__skip_number - S_get_by_path = _subr__get_by_path + S_skip_one = _subr__skip_one + S_skip_one_fast = _subr__skip_one_fast + S_skip_array = _subr__skip_array + S_skip_object = _subr__skip_object + S_skip_number = _subr__skip_number + S_get_by_path = _subr__get_by_path ) diff --git a/vendor/github.com/bytedance/sonic/internal/native/sse/native_subr_amd64.go b/vendor/github.com/bytedance/sonic/internal/native/sse/native_subr_amd64.go index 2682825ed..7c9d46674 100644 --- a/vendor/github.com/bytedance/sonic/internal/native/sse/native_subr_amd64.go +++ b/vendor/github.com/bytedance/sonic/internal/native/sse/native_subr_amd64.go @@ -1,4 +1,6 @@ +//go:build !noasm || !appengine // +build !noasm !appengine + // Code generated by asm2asm, DO NOT EDIT. package sse @@ -9,101 +11,101 @@ package sse func __native_entry__() uintptr var ( - _subr__f32toa = __native_entry__() + 31760 - _subr__f64toa = __native_entry__() + 160 - _subr__get_by_path = __native_entry__() + 26384 - _subr__html_escape = __native_entry__() + 9072 - _subr__i64toa = __native_entry__() + 3424 - _subr__lspace = __native_entry__() + 16 - _subr__quote = __native_entry__() + 4864 - _subr__skip_array = __native_entry__() + 18112 - _subr__skip_number = __native_entry__() + 22128 - _subr__skip_object = __native_entry__() + 20512 - _subr__skip_one = __native_entry__() + 22288 - _subr__skip_one_fast = __native_entry__() + 22512 - _subr__u64toa = __native_entry__() + 3552 - _subr__unquote = __native_entry__() + 6704 - _subr__validate_one = __native_entry__() + 22336 - _subr__validate_utf8 = __native_entry__() + 30528 - _subr__validate_utf8_fast = __native_entry__() + 31200 - _subr__value = __native_entry__() + 12272 - _subr__vnumber = __native_entry__() + 15728 - _subr__vsigned = __native_entry__() + 17376 - _subr__vstring = __native_entry__() + 14112 - _subr__vunsigned = __native_entry__() + 17760 + _subr__f32toa = __native_entry__() + 31760 + _subr__f64toa = __native_entry__() + 160 + _subr__get_by_path = __native_entry__() + 26384 + _subr__html_escape = __native_entry__() + 9072 + _subr__i64toa = __native_entry__() + 3424 + _subr__lspace = __native_entry__() + 16 + _subr__quote = __native_entry__() + 4864 + _subr__skip_array = __native_entry__() + 18112 + _subr__skip_number = __native_entry__() + 22128 + _subr__skip_object = __native_entry__() + 20512 + _subr__skip_one = __native_entry__() + 22288 + _subr__skip_one_fast = __native_entry__() + 22512 + _subr__u64toa = __native_entry__() + 3552 + _subr__unquote = __native_entry__() + 6704 + _subr__validate_one = __native_entry__() + 22336 + _subr__validate_utf8 = __native_entry__() + 30528 + _subr__validate_utf8_fast = __native_entry__() + 31200 + _subr__value = __native_entry__() + 12272 + _subr__vnumber = __native_entry__() + 15728 + _subr__vsigned = __native_entry__() + 17376 + _subr__vstring = __native_entry__() + 14112 + _subr__vunsigned = __native_entry__() + 17760 ) const ( - _stack__f32toa = 48 - _stack__f64toa = 80 - _stack__get_by_path = 240 - _stack__html_escape = 64 - _stack__i64toa = 16 - _stack__lspace = 8 - _stack__quote = 64 - _stack__skip_array = 128 - _stack__skip_number = 72 - _stack__skip_object = 128 - _stack__skip_one = 128 - _stack__skip_one_fast = 136 - _stack__u64toa = 8 - _stack__unquote = 88 - _stack__validate_one = 128 - _stack__validate_utf8 = 48 - _stack__validate_utf8_fast = 24 - _stack__value = 328 - _stack__vnumber = 240 - _stack__vsigned = 16 - _stack__vstring = 136 - _stack__vunsigned = 16 + _stack__f32toa = 48 + _stack__f64toa = 80 + _stack__get_by_path = 240 + _stack__html_escape = 64 + _stack__i64toa = 16 + _stack__lspace = 8 + _stack__quote = 64 + _stack__skip_array = 128 + _stack__skip_number = 72 + _stack__skip_object = 128 + _stack__skip_one = 128 + _stack__skip_one_fast = 136 + _stack__u64toa = 8 + _stack__unquote = 88 + _stack__validate_one = 128 + _stack__validate_utf8 = 48 + _stack__validate_utf8_fast = 24 + _stack__value = 328 + _stack__vnumber = 240 + _stack__vsigned = 16 + _stack__vstring = 136 + _stack__vunsigned = 16 ) var ( - _ = _subr__f32toa - _ = _subr__f64toa - _ = _subr__get_by_path - _ = _subr__html_escape - _ = _subr__i64toa - _ = _subr__lspace - _ = _subr__quote - _ = _subr__skip_array - _ = _subr__skip_number - _ = _subr__skip_object - _ = _subr__skip_one - _ = _subr__skip_one_fast - _ = _subr__u64toa - _ = _subr__unquote - _ = _subr__validate_one - _ = _subr__validate_utf8 - _ = _subr__validate_utf8_fast - _ = _subr__value - _ = _subr__vnumber - _ = _subr__vsigned - _ = _subr__vstring - _ = _subr__vunsigned + _ = _subr__f32toa + _ = _subr__f64toa + _ = _subr__get_by_path + _ = _subr__html_escape + _ = _subr__i64toa + _ = _subr__lspace + _ = _subr__quote + _ = _subr__skip_array + _ = _subr__skip_number + _ = _subr__skip_object + _ = _subr__skip_one + _ = _subr__skip_one_fast + _ = _subr__u64toa + _ = _subr__unquote + _ = _subr__validate_one + _ = _subr__validate_utf8 + _ = _subr__validate_utf8_fast + _ = _subr__value + _ = _subr__vnumber + _ = _subr__vsigned + _ = _subr__vstring + _ = _subr__vunsigned ) const ( - _ = _stack__f32toa - _ = _stack__f64toa - _ = _stack__get_by_path - _ = _stack__html_escape - _ = _stack__i64toa - _ = _stack__lspace - _ = _stack__quote - _ = _stack__skip_array - _ = _stack__skip_number - _ = _stack__skip_object - _ = _stack__skip_one - _ = _stack__skip_one_fast - _ = _stack__u64toa - _ = _stack__unquote - _ = _stack__validate_one - _ = _stack__validate_utf8 - _ = _stack__validate_utf8_fast - _ = _stack__value - _ = _stack__vnumber - _ = _stack__vsigned - _ = _stack__vstring - _ = _stack__vunsigned + _ = _stack__f32toa + _ = _stack__f64toa + _ = _stack__get_by_path + _ = _stack__html_escape + _ = _stack__i64toa + _ = _stack__lspace + _ = _stack__quote + _ = _stack__skip_array + _ = _stack__skip_number + _ = _stack__skip_object + _ = _stack__skip_one + _ = _stack__skip_one_fast + _ = _stack__u64toa + _ = _stack__unquote + _ = _stack__validate_one + _ = _stack__validate_utf8 + _ = _stack__validate_utf8_fast + _ = _stack__value + _ = _stack__vnumber + _ = _stack__vsigned + _ = _stack__vstring + _ = _stack__vunsigned ) diff --git a/vendor/github.com/bytedance/sonic/internal/native/types/types.go b/vendor/github.com/bytedance/sonic/internal/native/types/types.go index e4e75705c..20e3bd21b 100644 --- a/vendor/github.com/bytedance/sonic/internal/native/types/types.go +++ b/vendor/github.com/bytedance/sonic/internal/native/types/types.go @@ -17,8 +17,8 @@ package types import ( - `fmt` - `sync` + "fmt" + "sync" ) type ValueType int @@ -29,110 +29,109 @@ type SearchingError uint // This definitions are followed in native/types.h. const ( - V_EOF ValueType = 1 - V_NULL ValueType = 2 - V_TRUE ValueType = 3 - V_FALSE ValueType = 4 - V_ARRAY ValueType = 5 - V_OBJECT ValueType = 6 - V_STRING ValueType = 7 - V_DOUBLE ValueType = 8 - V_INTEGER ValueType = 9 - _ ValueType = 10 // V_KEY_SEP - _ ValueType = 11 // V_ELEM_SEP - _ ValueType = 12 // V_ARRAY_END - _ ValueType = 13 // V_OBJECT_END - V_MAX + V_EOF ValueType = 1 + V_NULL ValueType = 2 + V_TRUE ValueType = 3 + V_FALSE ValueType = 4 + V_ARRAY ValueType = 5 + V_OBJECT ValueType = 6 + V_STRING ValueType = 7 + V_DOUBLE ValueType = 8 + V_INTEGER ValueType = 9 + _ ValueType = 10 // V_KEY_SEP + _ ValueType = 11 // V_ELEM_SEP + _ ValueType = 12 // V_ARRAY_END + _ ValueType = 13 // V_OBJECT_END + V_MAX ) const ( - B_DOUBLE_UNQUOTE = 0 - B_UNICODE_REPLACE = 1 - B_VALIDATE_STRING = 5 + B_DOUBLE_UNQUOTE = 0 + B_UNICODE_REPLACE = 1 + B_VALIDATE_STRING = 5 ) const ( - F_DOUBLE_UNQUOTE = 1 << B_DOUBLE_UNQUOTE - F_UNICODE_REPLACE = 1 << B_UNICODE_REPLACE - F_VALIDATE_STRING = 1 << B_VALIDATE_STRING + F_DOUBLE_UNQUOTE = 1 << B_DOUBLE_UNQUOTE + F_UNICODE_REPLACE = 1 << B_UNICODE_REPLACE + F_VALIDATE_STRING = 1 << B_VALIDATE_STRING ) const ( - MAX_RECURSE = 4096 + MAX_RECURSE = 4096 ) const ( - SPACE_MASK = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n') + SPACE_MASK = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n') ) const ( - ERR_EOF ParsingError = 1 - ERR_INVALID_CHAR ParsingError = 2 - ERR_INVALID_ESCAPE ParsingError = 3 - ERR_INVALID_UNICODE ParsingError = 4 - ERR_INTEGER_OVERFLOW ParsingError = 5 - ERR_INVALID_NUMBER_FMT ParsingError = 6 - ERR_RECURSE_EXCEED_MAX ParsingError = 7 - ERR_FLOAT_INFINITY ParsingError = 8 - ERR_MISMATCH ParsingError = 9 - ERR_INVALID_UTF8 ParsingError = 10 - - // error code used in ast - ERR_NOT_FOUND ParsingError = 33 - ERR_UNSUPPORT_TYPE ParsingError = 34 + ERR_EOF ParsingError = 1 + ERR_INVALID_CHAR ParsingError = 2 + ERR_INVALID_ESCAPE ParsingError = 3 + ERR_INVALID_UNICODE ParsingError = 4 + ERR_INTEGER_OVERFLOW ParsingError = 5 + ERR_INVALID_NUMBER_FMT ParsingError = 6 + ERR_RECURSE_EXCEED_MAX ParsingError = 7 + ERR_FLOAT_INFINITY ParsingError = 8 + ERR_MISMATCH ParsingError = 9 + ERR_INVALID_UTF8 ParsingError = 10 + + // error code used in ast + ERR_NOT_FOUND ParsingError = 33 + ERR_UNSUPPORT_TYPE ParsingError = 34 ) var _ParsingErrors = []string{ - 0 : "ok", - ERR_EOF : "eof", - ERR_INVALID_CHAR : "invalid char", - ERR_INVALID_ESCAPE : "invalid escape char", - ERR_INVALID_UNICODE : "invalid unicode escape", - ERR_INTEGER_OVERFLOW : "integer overflow", - ERR_INVALID_NUMBER_FMT : "invalid number format", - ERR_RECURSE_EXCEED_MAX : "recursion exceeded max depth", - ERR_FLOAT_INFINITY : "float number is infinity", - ERR_MISMATCH : "mismatched type with value", - ERR_INVALID_UTF8 : "invalid UTF8", + 0: "ok", + ERR_EOF: "eof", + ERR_INVALID_CHAR: "invalid char", + ERR_INVALID_ESCAPE: "invalid escape char", + ERR_INVALID_UNICODE: "invalid unicode escape", + ERR_INTEGER_OVERFLOW: "integer overflow", + ERR_INVALID_NUMBER_FMT: "invalid number format", + ERR_RECURSE_EXCEED_MAX: "recursion exceeded max depth", + ERR_FLOAT_INFINITY: "float number is infinity", + ERR_MISMATCH: "mismatched type with value", + ERR_INVALID_UTF8: "invalid UTF8", } func (self ParsingError) Error() string { - return "json: error when parsing input: " + self.Message() + return "json: error when parsing input: " + self.Message() } func (self ParsingError) Message() string { - if int(self) < len(_ParsingErrors) { - return _ParsingErrors[self] - } else { - return fmt.Sprintf("unknown error %d", self) - } + if int(self) < len(_ParsingErrors) { + return _ParsingErrors[self] + } else { + return fmt.Sprintf("unknown error %d", self) + } } type JsonState struct { - Vt ValueType - Dv float64 - Iv int64 - Ep int - Dbuf *byte - Dcap int + Vt ValueType + Dv float64 + Iv int64 + Ep int + Dbuf *byte + Dcap int } type StateMachine struct { - Sp int - Vt [MAX_RECURSE]int + Sp int + Vt [MAX_RECURSE]int } var stackPool = sync.Pool{ - New: func()interface{}{ - return &StateMachine{} - }, + New: func() interface{} { + return &StateMachine{} + }, } func NewStateMachine() *StateMachine { - return stackPool.Get().(*StateMachine) + return stackPool.Get().(*StateMachine) } func FreeStateMachine(fsm *StateMachine) { - stackPool.Put(fsm) + stackPool.Put(fsm) } - diff --git a/vendor/github.com/bytedance/sonic/internal/resolver/resolver.go b/vendor/github.com/bytedance/sonic/internal/resolver/resolver.go index 795434f4e..fb3539380 100644 --- a/vendor/github.com/bytedance/sonic/internal/resolver/resolver.go +++ b/vendor/github.com/bytedance/sonic/internal/resolver/resolver.go @@ -17,198 +17,198 @@ package resolver import ( - `fmt` - `reflect` - `strings` - `sync` + "fmt" + "reflect" + "strings" + "sync" ) type FieldOpts int type OffsetType int const ( - F_omitempty FieldOpts = 1 << iota - F_stringize + F_omitempty FieldOpts = 1 << iota + F_stringize ) const ( - F_offset OffsetType = iota - F_deref + F_offset OffsetType = iota + F_deref ) type Offset struct { - Size uintptr - Kind OffsetType - Type reflect.Type + Size uintptr + Kind OffsetType + Type reflect.Type } type FieldMeta struct { - Name string - Path []Offset - Opts FieldOpts - Type reflect.Type + Name string + Path []Offset + Opts FieldOpts + Type reflect.Type } func (self *FieldMeta) String() string { - var path []string - var opts []string - - /* dump the field path */ - for _, off := range self.Path { - if off.Kind == F_offset { - path = append(path, fmt.Sprintf("%d", off.Size)) - } else { - path = append(path, fmt.Sprintf("%d.(*%s)", off.Size, off.Type)) - } - } - - /* check for "string" */ - if (self.Opts & F_stringize) != 0 { - opts = append(opts, "string") - } - - /* check for "omitempty" */ - if (self.Opts & F_omitempty) != 0 { - opts = append(opts, "omitempty") - } - - /* format the field */ - return fmt.Sprintf( - "{Field \"%s\" @ %s, opts=%s, type=%s}", - self.Name, - strings.Join(path, "."), - strings.Join(opts, ","), - self.Type, - ) + var path []string + var opts []string + + /* dump the field path */ + for _, off := range self.Path { + if off.Kind == F_offset { + path = append(path, fmt.Sprintf("%d", off.Size)) + } else { + path = append(path, fmt.Sprintf("%d.(*%s)", off.Size, off.Type)) + } + } + + /* check for "string" */ + if (self.Opts & F_stringize) != 0 { + opts = append(opts, "string") + } + + /* check for "omitempty" */ + if (self.Opts & F_omitempty) != 0 { + opts = append(opts, "omitempty") + } + + /* format the field */ + return fmt.Sprintf( + "{Field \"%s\" @ %s, opts=%s, type=%s}", + self.Name, + strings.Join(path, "."), + strings.Join(opts, ","), + self.Type, + ) } func (self *FieldMeta) optimize() { - var n int - var v uintptr - - /* merge adjacent offsets */ - for _, o := range self.Path { - if v += o.Size; o.Kind == F_deref { - self.Path[n].Size = v - self.Path[n].Type, v = o.Type, 0 - self.Path[n].Kind, n = F_deref, n + 1 - } - } - - /* last offset value */ - if v != 0 { - self.Path[n].Size = v - self.Path[n].Type = nil - self.Path[n].Kind = F_offset - n++ - } - - /* must be at least 1 offset */ - if n != 0 { - self.Path = self.Path[:n] - } else { - self.Path = []Offset{{Kind: F_offset}} - } + var n int + var v uintptr + + /* merge adjacent offsets */ + for _, o := range self.Path { + if v += o.Size; o.Kind == F_deref { + self.Path[n].Size = v + self.Path[n].Type, v = o.Type, 0 + self.Path[n].Kind, n = F_deref, n+1 + } + } + + /* last offset value */ + if v != 0 { + self.Path[n].Size = v + self.Path[n].Type = nil + self.Path[n].Kind = F_offset + n++ + } + + /* must be at least 1 offset */ + if n != 0 { + self.Path = self.Path[:n] + } else { + self.Path = []Offset{{Kind: F_offset}} + } } func resolveFields(vt reflect.Type) []FieldMeta { - tfv := typeFields(vt) - ret := []FieldMeta(nil) - - /* convert each field */ - for _, fv := range tfv.list { - item := vt - path := []Offset(nil) - opts := FieldOpts(0) - - /* check for "string" */ - if fv.quoted { - opts |= F_stringize - } - - /* check for "omitempty" */ - if fv.omitEmpty { - opts |= F_omitempty - } - - /* dump the field path */ - for _, i := range fv.index { - kind := F_offset - fval := item.Field(i) - item = fval.Type - - /* deref the pointer if needed */ - if item.Kind() == reflect.Ptr { - kind = F_deref - item = item.Elem() - } - - /* add to path */ - path = append(path, Offset { - Kind: kind, - Type: item, - Size: fval.Offset, - }) - } - - /* get the index to the last offset */ - idx := len(path) - 1 - fvt := path[idx].Type - - /* do not dereference into fields */ - if path[idx].Kind == F_deref { - fvt = reflect.PtrTo(fvt) - path[idx].Kind = F_offset - } - - /* add to result */ - ret = append(ret, FieldMeta { - Type: fvt, - Opts: opts, - Path: path, - Name: fv.name, - }) - } - - /* optimize the offsets */ - for i := range ret { - ret[i].optimize() - } - - /* all done */ - return ret + tfv := typeFields(vt) + ret := []FieldMeta(nil) + + /* convert each field */ + for _, fv := range tfv.list { + item := vt + path := []Offset(nil) + opts := FieldOpts(0) + + /* check for "string" */ + if fv.quoted { + opts |= F_stringize + } + + /* check for "omitempty" */ + if fv.omitEmpty { + opts |= F_omitempty + } + + /* dump the field path */ + for _, i := range fv.index { + kind := F_offset + fval := item.Field(i) + item = fval.Type + + /* deref the pointer if needed */ + if item.Kind() == reflect.Ptr { + kind = F_deref + item = item.Elem() + } + + /* add to path */ + path = append(path, Offset{ + Kind: kind, + Type: item, + Size: fval.Offset, + }) + } + + /* get the index to the last offset */ + idx := len(path) - 1 + fvt := path[idx].Type + + /* do not dereference into fields */ + if path[idx].Kind == F_deref { + fvt = reflect.PtrTo(fvt) + path[idx].Kind = F_offset + } + + /* add to result */ + ret = append(ret, FieldMeta{ + Type: fvt, + Opts: opts, + Path: path, + Name: fv.name, + }) + } + + /* optimize the offsets */ + for i := range ret { + ret[i].optimize() + } + + /* all done */ + return ret } var ( - fieldLock = sync.RWMutex{} - fieldCache = map[reflect.Type][]FieldMeta{} + fieldLock = sync.RWMutex{} + fieldCache = map[reflect.Type][]FieldMeta{} ) func ResolveStruct(vt reflect.Type) []FieldMeta { - var ok bool - var fm []FieldMeta - - /* attempt to read from cache */ - fieldLock.RLock() - fm, ok = fieldCache[vt] - fieldLock.RUnlock() - - /* check if it was cached */ - if ok { - return fm - } - - /* otherwise use write-lock */ - fieldLock.Lock() - defer fieldLock.Unlock() - - /* double check */ - if fm, ok = fieldCache[vt]; ok { - return fm - } - - /* resolve the field */ - fm = resolveFields(vt) - fieldCache[vt] = fm - return fm + var ok bool + var fm []FieldMeta + + /* attempt to read from cache */ + fieldLock.RLock() + fm, ok = fieldCache[vt] + fieldLock.RUnlock() + + /* check if it was cached */ + if ok { + return fm + } + + /* otherwise use write-lock */ + fieldLock.Lock() + defer fieldLock.Unlock() + + /* double check */ + if fm, ok = fieldCache[vt]; ok { + return fm + } + + /* resolve the field */ + fm = resolveFields(vt) + fieldCache[vt] = fm + return fm } diff --git a/vendor/github.com/bytedance/sonic/internal/resolver/stubs.go b/vendor/github.com/bytedance/sonic/internal/resolver/stubs.go index ac27aa3d6..8f1f04c94 100644 --- a/vendor/github.com/bytedance/sonic/internal/resolver/stubs.go +++ b/vendor/github.com/bytedance/sonic/internal/resolver/stubs.go @@ -17,28 +17,28 @@ package resolver import ( - _ `encoding/json` - `reflect` - _ `unsafe` + _ "encoding/json" + "reflect" + _ "unsafe" ) type StdField struct { - name string - nameBytes []byte - equalFold func() - nameNonEsc string - nameEscHTML string - tag bool - index []int - typ reflect.Type - omitEmpty bool - quoted bool - encoder func() + name string + nameBytes []byte + equalFold func() + nameNonEsc string + nameEscHTML string + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool + encoder func() } type StdStructFields struct { - list []StdField - nameIndex map[string]int + list []StdField + nameIndex map[string]int } //go:noescape diff --git a/vendor/github.com/bytedance/sonic/internal/rt/fastmem.go b/vendor/github.com/bytedance/sonic/internal/rt/fastmem.go index 5bf80dd4e..e80b555c6 100644 --- a/vendor/github.com/bytedance/sonic/internal/rt/fastmem.go +++ b/vendor/github.com/bytedance/sonic/internal/rt/fastmem.go @@ -17,53 +17,53 @@ package rt import ( - `unsafe` - `reflect` + "reflect" + "unsafe" ) //go:nosplit func Get16(v []byte) int16 { - return *(*int16)((*GoSlice)(unsafe.Pointer(&v)).Ptr) + return *(*int16)((*GoSlice)(unsafe.Pointer(&v)).Ptr) } //go:nosplit func Get32(v []byte) int32 { - return *(*int32)((*GoSlice)(unsafe.Pointer(&v)).Ptr) + return *(*int32)((*GoSlice)(unsafe.Pointer(&v)).Ptr) } //go:nosplit func Get64(v []byte) int64 { - return *(*int64)((*GoSlice)(unsafe.Pointer(&v)).Ptr) + return *(*int64)((*GoSlice)(unsafe.Pointer(&v)).Ptr) } //go:nosplit func Mem2Str(v []byte) (s string) { - (*GoString)(unsafe.Pointer(&s)).Len = (*GoSlice)(unsafe.Pointer(&v)).Len - (*GoString)(unsafe.Pointer(&s)).Ptr = (*GoSlice)(unsafe.Pointer(&v)).Ptr - return + (*GoString)(unsafe.Pointer(&s)).Len = (*GoSlice)(unsafe.Pointer(&v)).Len + (*GoString)(unsafe.Pointer(&s)).Ptr = (*GoSlice)(unsafe.Pointer(&v)).Ptr + return } //go:nosplit func Str2Mem(s string) (v []byte) { - (*GoSlice)(unsafe.Pointer(&v)).Cap = (*GoString)(unsafe.Pointer(&s)).Len - (*GoSlice)(unsafe.Pointer(&v)).Len = (*GoString)(unsafe.Pointer(&s)).Len - (*GoSlice)(unsafe.Pointer(&v)).Ptr = (*GoString)(unsafe.Pointer(&s)).Ptr - return + (*GoSlice)(unsafe.Pointer(&v)).Cap = (*GoString)(unsafe.Pointer(&s)).Len + (*GoSlice)(unsafe.Pointer(&v)).Len = (*GoString)(unsafe.Pointer(&s)).Len + (*GoSlice)(unsafe.Pointer(&v)).Ptr = (*GoString)(unsafe.Pointer(&s)).Ptr + return } func BytesFrom(p unsafe.Pointer, n int, c int) (r []byte) { - (*GoSlice)(unsafe.Pointer(&r)).Ptr = p - (*GoSlice)(unsafe.Pointer(&r)).Len = n - (*GoSlice)(unsafe.Pointer(&r)).Cap = c - return + (*GoSlice)(unsafe.Pointer(&r)).Ptr = p + (*GoSlice)(unsafe.Pointer(&r)).Len = n + (*GoSlice)(unsafe.Pointer(&r)).Cap = c + return } func FuncAddr(f interface{}) unsafe.Pointer { - if vv := UnpackEface(f); vv.Type.Kind() != reflect.Func { - panic("f is not a function") - } else { - return *(*unsafe.Pointer)(vv.Value) - } + if vv := UnpackEface(f); vv.Type.Kind() != reflect.Func { + panic("f is not a function") + } else { + return *(*unsafe.Pointer)(vv.Value) + } } func IndexChar(src string, index int) unsafe.Pointer { @@ -91,22 +91,22 @@ func GuardSlice(buf *[]byte, n int) { //go:nosplit func Ptr2SlicePtr(s unsafe.Pointer, l int, c int) unsafe.Pointer { - slice := &GoSlice{ - Ptr: s, - Len: l, - Cap: c, - } - return unsafe.Pointer(slice) + slice := &GoSlice{ + Ptr: s, + Len: l, + Cap: c, + } + return unsafe.Pointer(slice) } //go:nosplit func StrPtr(s string) unsafe.Pointer { - return (*GoString)(unsafe.Pointer(&s)).Ptr + return (*GoString)(unsafe.Pointer(&s)).Ptr } //go:nosplit func StrFrom(p unsafe.Pointer, n int64) (s string) { - (*GoString)(unsafe.Pointer(&s)).Ptr = p - (*GoString)(unsafe.Pointer(&s)).Len = int(n) - return -} \ No newline at end of file + (*GoString)(unsafe.Pointer(&s)).Ptr = p + (*GoString)(unsafe.Pointer(&s)).Len = int(n) + return +} diff --git a/vendor/github.com/bytedance/sonic/internal/rt/fastvalue.go b/vendor/github.com/bytedance/sonic/internal/rt/fastvalue.go index 2b2757f5b..1ef4fd2c5 100644 --- a/vendor/github.com/bytedance/sonic/internal/rt/fastvalue.go +++ b/vendor/github.com/bytedance/sonic/internal/rt/fastvalue.go @@ -17,197 +17,197 @@ package rt import ( - `reflect` - `unsafe` + "reflect" + "unsafe" ) var ( - reflectRtypeItab = findReflectRtypeItab() + reflectRtypeItab = findReflectRtypeItab() ) // GoType.KindFlags const const ( - F_direct = 1 << 5 - F_kind_mask = (1 << 5) - 1 + F_direct = 1 << 5 + F_kind_mask = (1 << 5) - 1 ) // GoType.Flags const const ( - tflagUncommon uint8 = 1 << 0 - tflagExtraStar uint8 = 1 << 1 - tflagNamed uint8 = 1 << 2 - tflagRegularMemory uint8 = 1 << 3 + tflagUncommon uint8 = 1 << 0 + tflagExtraStar uint8 = 1 << 1 + tflagNamed uint8 = 1 << 2 + tflagRegularMemory uint8 = 1 << 3 ) type GoType struct { - Size uintptr - PtrData uintptr - Hash uint32 - Flags uint8 - Align uint8 - FieldAlign uint8 - KindFlags uint8 - Traits unsafe.Pointer - GCData *byte - Str int32 - PtrToSelf int32 + Size uintptr + PtrData uintptr + Hash uint32 + Flags uint8 + Align uint8 + FieldAlign uint8 + KindFlags uint8 + Traits unsafe.Pointer + GCData *byte + Str int32 + PtrToSelf int32 } func (self *GoType) IsNamed() bool { - return (self.Flags & tflagNamed) != 0 + return (self.Flags & tflagNamed) != 0 } func (self *GoType) Kind() reflect.Kind { - return reflect.Kind(self.KindFlags & F_kind_mask) + return reflect.Kind(self.KindFlags & F_kind_mask) } func (self *GoType) Pack() (t reflect.Type) { - (*GoIface)(unsafe.Pointer(&t)).Itab = reflectRtypeItab - (*GoIface)(unsafe.Pointer(&t)).Value = unsafe.Pointer(self) - return + (*GoIface)(unsafe.Pointer(&t)).Itab = reflectRtypeItab + (*GoIface)(unsafe.Pointer(&t)).Value = unsafe.Pointer(self) + return } func (self *GoType) String() string { - return self.Pack().String() + return self.Pack().String() } func (self *GoType) Indirect() bool { - return self.KindFlags & F_direct == 0 + return self.KindFlags&F_direct == 0 } type GoMap struct { - Count int - Flags uint8 - B uint8 - Overflow uint16 - Hash0 uint32 - Buckets unsafe.Pointer - OldBuckets unsafe.Pointer - Evacuate uintptr - Extra unsafe.Pointer + Count int + Flags uint8 + B uint8 + Overflow uint16 + Hash0 uint32 + Buckets unsafe.Pointer + OldBuckets unsafe.Pointer + Evacuate uintptr + Extra unsafe.Pointer } type GoMapIterator struct { - K unsafe.Pointer - V unsafe.Pointer - T *GoMapType - H *GoMap - Buckets unsafe.Pointer - Bptr *unsafe.Pointer - Overflow *[]unsafe.Pointer - OldOverflow *[]unsafe.Pointer - StartBucket uintptr - Offset uint8 - Wrapped bool - B uint8 - I uint8 - Bucket uintptr - CheckBucket uintptr + K unsafe.Pointer + V unsafe.Pointer + T *GoMapType + H *GoMap + Buckets unsafe.Pointer + Bptr *unsafe.Pointer + Overflow *[]unsafe.Pointer + OldOverflow *[]unsafe.Pointer + StartBucket uintptr + Offset uint8 + Wrapped bool + B uint8 + I uint8 + Bucket uintptr + CheckBucket uintptr } type GoItab struct { - it unsafe.Pointer - Vt *GoType - hv uint32 - _ [4]byte - fn [1]uintptr + it unsafe.Pointer + Vt *GoType + hv uint32 + _ [4]byte + fn [1]uintptr } type GoIface struct { - Itab *GoItab - Value unsafe.Pointer + Itab *GoItab + Value unsafe.Pointer } type GoEface struct { - Type *GoType - Value unsafe.Pointer + Type *GoType + Value unsafe.Pointer } func (self GoEface) Pack() (v interface{}) { - *(*GoEface)(unsafe.Pointer(&v)) = self - return + *(*GoEface)(unsafe.Pointer(&v)) = self + return } type GoPtrType struct { - GoType - Elem *GoType + GoType + Elem *GoType } type GoMapType struct { - GoType - Key *GoType - Elem *GoType - Bucket *GoType - Hasher func(unsafe.Pointer, uintptr) uintptr - KeySize uint8 - ElemSize uint8 - BucketSize uint16 - Flags uint32 + GoType + Key *GoType + Elem *GoType + Bucket *GoType + Hasher func(unsafe.Pointer, uintptr) uintptr + KeySize uint8 + ElemSize uint8 + BucketSize uint16 + Flags uint32 } func (self *GoMapType) IndirectElem() bool { - return self.Flags & 2 != 0 + return self.Flags&2 != 0 } type GoStructType struct { - GoType - Pkg *byte - Fields []GoStructField + GoType + Pkg *byte + Fields []GoStructField } type GoStructField struct { - Name *byte - Type *GoType - OffEmbed uintptr + Name *byte + Type *GoType + OffEmbed uintptr } type GoInterfaceType struct { - GoType - PkgPath *byte - Methods []GoInterfaceMethod + GoType + PkgPath *byte + Methods []GoInterfaceMethod } type GoInterfaceMethod struct { - Name int32 - Type int32 + Name int32 + Type int32 } type GoSlice struct { - Ptr unsafe.Pointer - Len int - Cap int + Ptr unsafe.Pointer + Len int + Cap int } type GoString struct { - Ptr unsafe.Pointer - Len int + Ptr unsafe.Pointer + Len int } func PtrElem(t *GoType) *GoType { - return (*GoPtrType)(unsafe.Pointer(t)).Elem + return (*GoPtrType)(unsafe.Pointer(t)).Elem } func MapType(t *GoType) *GoMapType { - return (*GoMapType)(unsafe.Pointer(t)) + return (*GoMapType)(unsafe.Pointer(t)) } func IfaceType(t *GoType) *GoInterfaceType { - return (*GoInterfaceType)(unsafe.Pointer(t)) + return (*GoInterfaceType)(unsafe.Pointer(t)) } func UnpackType(t reflect.Type) *GoType { - return (*GoType)((*GoIface)(unsafe.Pointer(&t)).Value) + return (*GoType)((*GoIface)(unsafe.Pointer(&t)).Value) } func UnpackEface(v interface{}) GoEface { - return *(*GoEface)(unsafe.Pointer(&v)) + return *(*GoEface)(unsafe.Pointer(&v)) } func UnpackIface(v interface{}) GoIface { - return *(*GoIface)(unsafe.Pointer(&v)) + return *(*GoIface)(unsafe.Pointer(&v)) } func findReflectRtypeItab() *GoItab { - v := reflect.TypeOf(struct{}{}) - return (*GoIface)(unsafe.Pointer(&v)).Itab + v := reflect.TypeOf(struct{}{}) + return (*GoIface)(unsafe.Pointer(&v)).Itab } diff --git a/vendor/github.com/bytedance/sonic/internal/rt/gcwb.go b/vendor/github.com/bytedance/sonic/internal/rt/gcwb.go index c3217c899..9d1123883 100644 --- a/vendor/github.com/bytedance/sonic/internal/rt/gcwb.go +++ b/vendor/github.com/bytedance/sonic/internal/rt/gcwb.go @@ -17,63 +17,63 @@ package rt import ( - `os` - `sync/atomic` - `unsafe` + "os" + "sync/atomic" + "unsafe" - `golang.org/x/arch/x86/x86asm` + "golang.org/x/arch/x86/x86asm" ) const ( - _MaxInstr = 15 + _MaxInstr = 15 ) func isvar(arg x86asm.Arg) bool { - v, ok := arg.(x86asm.Mem) - return ok && v.Base == x86asm.RIP + v, ok := arg.(x86asm.Mem) + return ok && v.Base == x86asm.RIP } func iszero(arg x86asm.Arg) bool { - v, ok := arg.(x86asm.Imm) - return ok && v == 0 + v, ok := arg.(x86asm.Imm) + return ok && v == 0 } func GcwbAddr() uintptr { - var err error - var off uintptr - var ins x86asm.Inst - - /* get the function address */ - pc := uintptr(0) - fp := FuncAddr(atomic.StorePointer) - - /* search within the first 16 instructions */ - for i := 0; i < 16; i++ { - mem := unsafe.Pointer(uintptr(fp) + pc) - buf := BytesFrom(mem, _MaxInstr, _MaxInstr) - - /* disassemble the instruction */ - if ins, err = x86asm.Decode(buf, 64); err != nil { - panic("gcwbaddr: " + err.Error()) - } - - /* check for a byte comparison with zero */ - if ins.Op == x86asm.CMP && ins.MemBytes == 1 && isvar(ins.Args[0]) && iszero(ins.Args[1]) { - off = pc + uintptr(ins.Len) + uintptr(ins.Args[0].(x86asm.Mem).Disp) - break - } - - /* move to next instruction */ - nb := ins.Len - pc += uintptr(nb) - } - - /* check for address */ - if off == 0 { - panic("gcwbaddr: could not locate the variable `writeBarrier`") - } else { - return uintptr(fp) + off - } + var err error + var off uintptr + var ins x86asm.Inst + + /* get the function address */ + pc := uintptr(0) + fp := FuncAddr(atomic.StorePointer) + + /* search within the first 16 instructions */ + for i := 0; i < 16; i++ { + mem := unsafe.Pointer(uintptr(fp) + pc) + buf := BytesFrom(mem, _MaxInstr, _MaxInstr) + + /* disassemble the instruction */ + if ins, err = x86asm.Decode(buf, 64); err != nil { + panic("gcwbaddr: " + err.Error()) + } + + /* check for a byte comparison with zero */ + if ins.Op == x86asm.CMP && ins.MemBytes == 1 && isvar(ins.Args[0]) && iszero(ins.Args[1]) { + off = pc + uintptr(ins.Len) + uintptr(ins.Args[0].(x86asm.Mem).Disp) + break + } + + /* move to next instruction */ + nb := ins.Len + pc += uintptr(nb) + } + + /* check for address */ + if off == 0 { + panic("gcwbaddr: could not locate the variable `writeBarrier`") + } else { + return uintptr(fp) + off + } } // StopProfiling is used to stop traceback introduced by SIGPROF while native code is running. @@ -88,14 +88,14 @@ var StopProfiling = os.Getenv("SONIC_STOP_PROFILING") != "" // } var ( - // // go:linkname runtimeProf runtime.prof - // runtimeProf Prof + // // go:linkname runtimeProf runtime.prof + // runtimeProf Prof - // count of native-C calls - yieldCount uint32 + // count of native-C calls + yieldCount uint32 - // previous value of runtimeProf.hz - oldHz int32 + // previous value of runtimeProf.hz + oldHz int32 ) //go:nosplit diff --git a/vendor/github.com/bytedance/sonic/internal/rt/int48.go b/vendor/github.com/bytedance/sonic/internal/rt/int48.go index e9f82d731..57a143978 100644 --- a/vendor/github.com/bytedance/sonic/internal/rt/int48.go +++ b/vendor/github.com/bytedance/sonic/internal/rt/int48.go @@ -17,20 +17,20 @@ package rt const ( - MinInt48 = -(1 << 47) - MaxInt48 = +(1 << 47) - 1 + MinInt48 = -(1 << 47) + MaxInt48 = +(1 << 47) - 1 ) func PackInt(v int) uint64 { - if u := uint64(v); v < MinInt48 || v > MaxInt48 { - panic("int48 out of range") - } else { - return ((u >> 63) << 47) | (u & 0x00007fffffffffff) - } + if u := uint64(v); v < MinInt48 || v > MaxInt48 { + panic("int48 out of range") + } else { + return ((u >> 63) << 47) | (u & 0x00007fffffffffff) + } } func UnpackInt(v uint64) int { - v &= 0x0000ffffffffffff - v |= (v >> 47) * (0xffff << 48) - return int(v) + v &= 0x0000ffffffffffff + v |= (v >> 47) * (0xffff << 48) + return int(v) } diff --git a/vendor/github.com/bytedance/sonic/internal/rt/stackmap.go b/vendor/github.com/bytedance/sonic/internal/rt/stackmap.go index 84ed9a95f..f60a2565c 100644 --- a/vendor/github.com/bytedance/sonic/internal/rt/stackmap.go +++ b/vendor/github.com/bytedance/sonic/internal/rt/stackmap.go @@ -17,49 +17,48 @@ package rt import ( - `fmt` - `strings` - `unsafe` - + "fmt" + "strings" + "unsafe" ) type Bitmap struct { - N int - B []byte + N int + B []byte } func (self *Bitmap) grow() { - if self.N >= len(self.B) * 8 { - self.B = append(self.B, 0) - } + if self.N >= len(self.B)*8 { + self.B = append(self.B, 0) + } } func (self *Bitmap) mark(i int, bv int) { - if bv != 0 { - self.B[i / 8] |= 1 << (i % 8) - } else { - self.B[i / 8] &^= 1 << (i % 8) - } + if bv != 0 { + self.B[i/8] |= 1 << (i % 8) + } else { + self.B[i/8] &^= 1 << (i % 8) + } } func (self *Bitmap) Set(i int, bv int) { - if i >= self.N { - panic("bitmap: invalid bit position") - } else { - self.mark(i, bv) - } + if i >= self.N { + panic("bitmap: invalid bit position") + } else { + self.mark(i, bv) + } } func (self *Bitmap) Append(bv int) { - self.grow() - self.mark(self.N, bv) - self.N++ + self.grow() + self.mark(self.N, bv) + self.N++ } func (self *Bitmap) AppendMany(n int, bv int) { - for i := 0; i < n; i++ { - self.Append(bv) - } + for i := 0; i < n; i++ { + self.Append(bv) + } } // var ( @@ -68,34 +67,34 @@ func (self *Bitmap) AppendMany(n int, bv int) { // ) type BitVec struct { - N uintptr - B unsafe.Pointer + N uintptr + B unsafe.Pointer } func (self BitVec) Bit(i uintptr) byte { - return (*(*byte)(unsafe.Pointer(uintptr(self.B) + i / 8)) >> (i % 8)) & 1 + return (*(*byte)(unsafe.Pointer(uintptr(self.B) + i/8)) >> (i % 8)) & 1 } func (self BitVec) String() string { - var i uintptr - var v []string + var i uintptr + var v []string - /* add each bit */ - for i = 0; i < self.N; i++ { - v = append(v, fmt.Sprintf("%d", self.Bit(i))) - } + /* add each bit */ + for i = 0; i < self.N; i++ { + v = append(v, fmt.Sprintf("%d", self.Bit(i))) + } - /* join them together */ - return fmt.Sprintf( - "BitVec { %s }", - strings.Join(v, ", "), - ) + /* join them together */ + return fmt.Sprintf( + "BitVec { %s }", + strings.Join(v, ", "), + ) } type StackMap struct { - N int32 - L int32 - B [1]byte + N int32 + L int32 + B [1]byte } // func (self *StackMap) add() { @@ -105,43 +104,43 @@ type StackMap struct { // } func (self *StackMap) Pin() uintptr { - // self.add() - return uintptr(unsafe.Pointer(self)) + // self.add() + return uintptr(unsafe.Pointer(self)) } func (self *StackMap) Get(i int32) BitVec { - return BitVec { - N: uintptr(self.L), - B: unsafe.Pointer(uintptr(unsafe.Pointer(&self.B)) + uintptr(i * ((self.L + 7) >> 3))), - } + return BitVec{ + N: uintptr(self.L), + B: unsafe.Pointer(uintptr(unsafe.Pointer(&self.B)) + uintptr(i*((self.L+7)>>3))), + } } func (self *StackMap) String() string { - sb := strings.Builder{} - sb.WriteString("StackMap {") + sb := strings.Builder{} + sb.WriteString("StackMap {") - /* dump every stack map */ - for i := int32(0); i < self.N; i++ { - sb.WriteRune('\n') - sb.WriteString(" " + self.Get(i).String()) - } + /* dump every stack map */ + for i := int32(0); i < self.N; i++ { + sb.WriteRune('\n') + sb.WriteString(" " + self.Get(i).String()) + } - /* close the stackmap */ - sb.WriteString("\n}") - return sb.String() + /* close the stackmap */ + sb.WriteString("\n}") + return sb.String() } func (self *StackMap) MarshalBinary() ([]byte, error) { - size := int(self.N) * int(self.L) + int(unsafe.Sizeof(self.L)) + int(unsafe.Sizeof(self.N)) - return BytesFrom(unsafe.Pointer(self), size, size), nil + size := int(self.N)*int(self.L) + int(unsafe.Sizeof(self.L)) + int(unsafe.Sizeof(self.N)) + return BytesFrom(unsafe.Pointer(self), size, size), nil } var ( - byteType = UnpackEface(byte(0)).Type + byteType = UnpackEface(byte(0)).Type ) const ( - _StackMapSize = unsafe.Sizeof(StackMap{}) + _StackMapSize = unsafe.Sizeof(StackMap{}) ) //go:linkname mallocgc runtime.mallocgc @@ -149,33 +148,33 @@ const ( func mallocgc(nb uintptr, vt *GoType, zero bool) unsafe.Pointer type StackMapBuilder struct { - b Bitmap + b Bitmap } //go:nocheckptr func (self *StackMapBuilder) Build() (p *StackMap) { - nb := len(self.b.B) - bm := mallocgc(_StackMapSize + uintptr(nb) - 1, byteType, false) + nb := len(self.b.B) + bm := mallocgc(_StackMapSize+uintptr(nb)-1, byteType, false) - /* initialize as 1 bitmap of N bits */ - p = (*StackMap)(bm) - p.N, p.L = 1, int32(self.b.N) - copy(BytesFrom(unsafe.Pointer(&p.B), nb, nb), self.b.B) - return + /* initialize as 1 bitmap of N bits */ + p = (*StackMap)(bm) + p.N, p.L = 1, int32(self.b.N) + copy(BytesFrom(unsafe.Pointer(&p.B), nb, nb), self.b.B) + return } func (self *StackMapBuilder) AddField(ptr bool) { - if ptr { - self.b.Append(1) - } else { - self.b.Append(0) - } + if ptr { + self.b.Append(1) + } else { + self.b.Append(0) + } } func (self *StackMapBuilder) AddFields(n int, ptr bool) { - if ptr { - self.b.AppendMany(n, 1) - } else { - self.b.AppendMany(n, 0) - } -} \ No newline at end of file + if ptr { + self.b.AppendMany(n, 1) + } else { + self.b.AppendMany(n, 0) + } +} diff --git a/vendor/github.com/bytedance/sonic/loader/funcdata.go b/vendor/github.com/bytedance/sonic/loader/funcdata.go index 9b760f615..fdfe83d9e 100644 --- a/vendor/github.com/bytedance/sonic/loader/funcdata.go +++ b/vendor/github.com/bytedance/sonic/loader/funcdata.go @@ -17,128 +17,128 @@ package loader import ( - `encoding` - `encoding/binary` - `fmt` - `reflect` - `strings` - `sync` - `unsafe` + "encoding" + "encoding/binary" + "fmt" + "reflect" + "strings" + "sync" + "unsafe" ) const ( - _MinLC uint8 = 1 - _PtrSize uint8 = 8 + _MinLC uint8 = 1 + _PtrSize uint8 = 8 ) const ( - _N_FUNCDATA = 8 - _INVALID_FUNCDATA_OFFSET = ^uint32(0) - _FUNC_SIZE = unsafe.Sizeof(_func{}) - - _MINFUNC = 16 // minimum size for a function - _BUCKETSIZE = 256 * _MINFUNC - _SUBBUCKETS = 16 - _SUB_BUCKETSIZE = _BUCKETSIZE / _SUBBUCKETS + _N_FUNCDATA = 8 + _INVALID_FUNCDATA_OFFSET = ^uint32(0) + _FUNC_SIZE = unsafe.Sizeof(_func{}) + + _MINFUNC = 16 // minimum size for a function + _BUCKETSIZE = 256 * _MINFUNC + _SUBBUCKETS = 16 + _SUB_BUCKETSIZE = _BUCKETSIZE / _SUBBUCKETS ) // PCDATA and FUNCDATA table indexes. // // See funcdata.h and $GROOT/src/cmd/internal/objabi/funcdata.go. const ( - _FUNCDATA_ArgsPointerMaps = 0 - _FUNCDATA_LocalsPointerMaps = 1 - _FUNCDATA_StackObjects = 2 - _FUNCDATA_InlTree = 3 - _FUNCDATA_OpenCodedDeferInfo = 4 - _FUNCDATA_ArgInfo = 5 - _FUNCDATA_ArgLiveInfo = 6 - _FUNCDATA_WrapInfo = 7 + _FUNCDATA_ArgsPointerMaps = 0 + _FUNCDATA_LocalsPointerMaps = 1 + _FUNCDATA_StackObjects = 2 + _FUNCDATA_InlTree = 3 + _FUNCDATA_OpenCodedDeferInfo = 4 + _FUNCDATA_ArgInfo = 5 + _FUNCDATA_ArgLiveInfo = 6 + _FUNCDATA_WrapInfo = 7 - // ArgsSizeUnknown is set in Func.argsize to mark all functions - // whose argument size is unknown (C vararg functions, and - // assembly code without an explicit specification). - // This value is generated by the compiler, assembler, or linker. - ArgsSizeUnknown = -0x80000000 + // ArgsSizeUnknown is set in Func.argsize to mark all functions + // whose argument size is unknown (C vararg functions, and + // assembly code without an explicit specification). + // This value is generated by the compiler, assembler, or linker. + ArgsSizeUnknown = -0x80000000 ) // moduledata used to cache the funcdata and findfuncbucket of one module var moduleCache = struct { - m map[*moduledata][]byte - sync.Mutex + m map[*moduledata][]byte + sync.Mutex }{ - m: make(map[*moduledata][]byte), + m: make(map[*moduledata][]byte), } // Func contains information about a function. type Func struct { - ID uint8 // see runtime/symtab.go - Flag uint8 // see runtime/symtab.go - ArgsSize int32 // args byte size - EntryOff uint32 // start pc, offset to moduledata.text - TextSize uint32 // size of func text - DeferReturn uint32 // offset of start of a deferreturn call instruction from entry, if any. - FileIndex uint32 // index into filetab - Name string // name of function + ID uint8 // see runtime/symtab.go + Flag uint8 // see runtime/symtab.go + ArgsSize int32 // args byte size + EntryOff uint32 // start pc, offset to moduledata.text + TextSize uint32 // size of func text + DeferReturn uint32 // offset of start of a deferreturn call instruction from entry, if any. + FileIndex uint32 // index into filetab + Name string // name of function + + // PC data + Pcsp *Pcdata // PC -> SP delta + Pcfile *Pcdata // PC -> file index + Pcline *Pcdata // PC -> line number + PcUnsafePoint *Pcdata // PC -> unsafe point, must be PCDATA_UnsafePointSafe or PCDATA_UnsafePointUnsafe + PcStackMapIndex *Pcdata // PC -> stack map index, relative to ArgsPointerMaps and LocalsPointerMaps + PcInlTreeIndex *Pcdata // PC -> inlining tree index, relative to InlTree + PcArgLiveIndex *Pcdata // PC -> arg live index, relative to ArgLiveInfo - // PC data - Pcsp *Pcdata // PC -> SP delta - Pcfile *Pcdata // PC -> file index - Pcline *Pcdata // PC -> line number - PcUnsafePoint *Pcdata // PC -> unsafe point, must be PCDATA_UnsafePointSafe or PCDATA_UnsafePointUnsafe - PcStackMapIndex *Pcdata // PC -> stack map index, relative to ArgsPointerMaps and LocalsPointerMaps - PcInlTreeIndex *Pcdata // PC -> inlining tree index, relative to InlTree - PcArgLiveIndex *Pcdata // PC -> arg live index, relative to ArgLiveInfo - - // Func data, must implement encoding.BinaryMarshaler - ArgsPointerMaps encoding.BinaryMarshaler // concrete type: *StackMap - LocalsPointerMaps encoding.BinaryMarshaler // concrete type: *StackMap - StackObjects encoding.BinaryMarshaler - InlTree encoding.BinaryMarshaler - OpenCodedDeferInfo encoding.BinaryMarshaler - ArgInfo encoding.BinaryMarshaler - ArgLiveInfo encoding.BinaryMarshaler - WrapInfo encoding.BinaryMarshaler + // Func data, must implement encoding.BinaryMarshaler + ArgsPointerMaps encoding.BinaryMarshaler // concrete type: *StackMap + LocalsPointerMaps encoding.BinaryMarshaler // concrete type: *StackMap + StackObjects encoding.BinaryMarshaler + InlTree encoding.BinaryMarshaler + OpenCodedDeferInfo encoding.BinaryMarshaler + ArgInfo encoding.BinaryMarshaler + ArgLiveInfo encoding.BinaryMarshaler + WrapInfo encoding.BinaryMarshaler } func getOffsetOf(data interface{}, field string) uintptr { - t := reflect.TypeOf(data) - fv, ok := t.FieldByName(field) - if !ok { - panic(fmt.Sprintf("field %s not found in struct %s", field, t.Name())) - } - return fv.Offset + t := reflect.TypeOf(data) + fv, ok := t.FieldByName(field) + if !ok { + panic(fmt.Sprintf("field %s not found in struct %s", field, t.Name())) + } + return fv.Offset } func rnd(v int64, r int64) int64 { - if r <= 0 { - return v - } - v += r - 1 - c := v % r - if c < 0 { - c += r - } - v -= c - return v + if r <= 0 { + return v + } + v += r - 1 + c := v % r + if c < 0 { + c += r + } + v -= c + return v } var ( - byteOrder binary.ByteOrder = binary.LittleEndian + byteOrder binary.ByteOrder = binary.LittleEndian ) func funcNameParts(name string) (string, string, string) { - i := strings.IndexByte(name, '[') - if i < 0 { - return name, "", "" - } - // TODO: use LastIndexByte once the bootstrap compiler is >= Go 1.5. - j := len(name) - 1 - for j > i && name[j] != ']' { - j-- - } - if j <= i { - return name, "", "" - } - return name[:i], "[...]", name[j+1:] -} \ No newline at end of file + i := strings.IndexByte(name, '[') + if i < 0 { + return name, "", "" + } + // TODO: use LastIndexByte once the bootstrap compiler is >= Go 1.5. + j := len(name) - 1 + for j > i && name[j] != ']' { + j-- + } + if j <= i { + return name, "", "" + } + return name[:i], "[...]", name[j+1:] +} diff --git a/vendor/github.com/bytedance/sonic/loader/funcdata_go115.go b/vendor/github.com/bytedance/sonic/loader/funcdata_go115.go index a2e3e65f9..1d15c99ad 100644 --- a/vendor/github.com/bytedance/sonic/loader/funcdata_go115.go +++ b/vendor/github.com/bytedance/sonic/loader/funcdata_go115.go @@ -20,137 +20,137 @@ package loader import ( - `encoding` - `os` - `unsafe` + "encoding" + "os" + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) const ( - _Magic uint32 = 0xfffffffa + _Magic uint32 = 0xfffffffa ) type pcHeader struct { - magic uint32 // 0xFFFFFFF0 - pad1, pad2 uint8 // 0,0 - minLC uint8 // min instruction size - ptrSize uint8 // size of a ptr in bytes - nfunc int // number of functions in the module - nfiles uint // number of entries in the file tab - funcnameOffset uintptr // offset to the funcnametab variable from pcHeader - cuOffset uintptr // offset to the cutab variable from pcHeader - filetabOffset uintptr // offset to the filetab variable from pcHeader - pctabOffset uintptr // offset to the pctab variable from pcHeader - pclnOffset uintptr // offset to the pclntab variable from pcHeader + magic uint32 // 0xFFFFFFF0 + pad1, pad2 uint8 // 0,0 + minLC uint8 // min instruction size + ptrSize uint8 // size of a ptr in bytes + nfunc int // number of functions in the module + nfiles uint // number of entries in the file tab + funcnameOffset uintptr // offset to the funcnametab variable from pcHeader + cuOffset uintptr // offset to the cutab variable from pcHeader + filetabOffset uintptr // offset to the filetab variable from pcHeader + pctabOffset uintptr // offset to the pctab variable from pcHeader + pclnOffset uintptr // offset to the pclntab variable from pcHeader } type moduledata struct { - pcHeader *pcHeader - funcnametab []byte - cutab []uint32 - filetab []byte - pctab []byte - pclntable []byte - ftab []funcTab - findfunctab uintptr - minpc, maxpc uintptr // first func address, last func address + last func size + pcHeader *pcHeader + funcnametab []byte + cutab []uint32 + filetab []byte + pctab []byte + pclntable []byte + ftab []funcTab + findfunctab uintptr + minpc, maxpc uintptr // first func address, last func address + last func size - text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC - noptrdata, enoptrdata uintptr - data, edata uintptr - bss, ebss uintptr - noptrbss, enoptrbss uintptr - end, gcdata, gcbss uintptr - types, etypes uintptr - - textsectmap []textSection // see runtime/symtab.go: textAddr() - typelinks []int32 // offsets from types - itablinks []*rt.GoItab + text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC + noptrdata, enoptrdata uintptr + data, edata uintptr + bss, ebss uintptr + noptrbss, enoptrbss uintptr + end, gcdata, gcbss uintptr + types, etypes uintptr - ptab []ptabEntry + textsectmap []textSection // see runtime/symtab.go: textAddr() + typelinks []int32 // offsets from types + itablinks []*rt.GoItab - pluginpath string - pkghashes []modulehash + ptab []ptabEntry - modulename string - modulehashes []modulehash + pluginpath string + pkghashes []modulehash - hasmain uint8 // 1 if module contains the main function, 0 otherwise + modulename string + modulehashes []modulehash - gcdatamask, gcbssmask bitVector + hasmain uint8 // 1 if module contains the main function, 0 otherwise - typemap map[int32]*rt.GoType // offset to *_rtype in previous module + gcdatamask, gcbssmask bitVector - bad bool // module failed to load and should be ignored + typemap map[int32]*rt.GoType // offset to *_rtype in previous module - next *moduledata + bad bool // module failed to load and should be ignored + + next *moduledata } type _func struct { - entry uintptr // start pc, as offset from moduledata.text/pcHeader.textStart - nameOff int32 // function name, as index into moduledata.funcnametab. - - args int32 // in/out args size - deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. - - pcsp uint32 - pcfile uint32 - pcln uint32 - npcdata uint32 - cuOffset uint32 // runtime.cutab offset of this function's CU - funcID uint8 // set for certain special runtime functions - _ [2]byte // pad - nfuncdata uint8 // - - // The end of the struct is followed immediately by two variable-length - // arrays that reference the pcdata and funcdata locations for this - // function. - - // pcdata contains the offset into moduledata.pctab for the start of - // that index's table. e.g., - // &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of - // the unsafe point table. - // - // An offset of 0 indicates that there is no table. - // - // pcdata [npcdata]uint32 - - // funcdata contains the offset past moduledata.gofunc which contains a - // pointer to that index's funcdata. e.g., - // *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is - // the argument pointer map. - // - // An offset of ^uint32(0) indicates that there is no entry. - // - // funcdata [nfuncdata]uint32 + entry uintptr // start pc, as offset from moduledata.text/pcHeader.textStart + nameOff int32 // function name, as index into moduledata.funcnametab. + + args int32 // in/out args size + deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. + + pcsp uint32 + pcfile uint32 + pcln uint32 + npcdata uint32 + cuOffset uint32 // runtime.cutab offset of this function's CU + funcID uint8 // set for certain special runtime functions + _ [2]byte // pad + nfuncdata uint8 // + + // The end of the struct is followed immediately by two variable-length + // arrays that reference the pcdata and funcdata locations for this + // function. + + // pcdata contains the offset into moduledata.pctab for the start of + // that index's table. e.g., + // &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of + // the unsafe point table. + // + // An offset of 0 indicates that there is no table. + // + // pcdata [npcdata]uint32 + + // funcdata contains the offset past moduledata.gofunc which contains a + // pointer to that index's funcdata. e.g., + // *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is + // the argument pointer map. + // + // An offset of ^uint32(0) indicates that there is no entry. + // + // funcdata [nfuncdata]uint32 } type funcTab struct { - entry uintptr - funcoff uintptr + entry uintptr + funcoff uintptr } type bitVector struct { - n int32 // # of bits - bytedata *uint8 + n int32 // # of bits + bytedata *uint8 } type ptabEntry struct { - name int32 - typ int32 + name int32 + typ int32 } type textSection struct { - vaddr uintptr // prelinked section vaddr - end uintptr // vaddr + section length - baseaddr uintptr // relocated section address + vaddr uintptr // prelinked section vaddr + end uintptr // vaddr + section length + baseaddr uintptr // relocated section address } type modulehash struct { - modulename string - linktimehash string - runtimehash *string + modulename string + linktimehash string + runtimehash *string } // findfuncbucket is an array of these structures. @@ -162,388 +162,395 @@ type modulehash struct { // index to find the target function. // This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead. type findfuncbucket struct { - idx uint32 - _SUBBUCKETS [16]byte + idx uint32 + _SUBBUCKETS [16]byte } - type compilationUnit struct { - fileNames []string + fileNames []string } -// func name table format: -// nameOff[0] -> namePartA namePartB namePartC \x00 -// nameOff[1] -> namePartA namePartB namePartC \x00 -// ... +// func name table format: +// +// nameOff[0] -> namePartA namePartB namePartC \x00 +// nameOff[1] -> namePartA namePartB namePartC \x00 +// ... func makeFuncnameTab(funcs []Func) (tab []byte, offs []int32) { - offs = make([]int32, len(funcs)) - offset := 0 + offs = make([]int32, len(funcs)) + offset := 0 - for i, f := range funcs { - offs[i] = int32(offset) + for i, f := range funcs { + offs[i] = int32(offset) - a, b, c := funcNameParts(f.Name) - tab = append(tab, a...) - tab = append(tab, b...) - tab = append(tab, c...) - tab = append(tab, 0) - offset += len(a) + len(b) + len(c) + 1 - } + a, b, c := funcNameParts(f.Name) + tab = append(tab, a...) + tab = append(tab, b...) + tab = append(tab, c...) + tab = append(tab, 0) + offset += len(a) + len(b) + len(c) + 1 + } - return + return } // CU table format: -// cuOffsets[0] -> filetabOffset[0] filetabOffset[1] ... filetabOffset[len(CUs[0].fileNames)-1] -// cuOffsets[1] -> filetabOffset[len(CUs[0].fileNames)] ... filetabOffset[len(CUs[0].fileNames) + len(CUs[1].fileNames)-1] -// ... +// +// cuOffsets[0] -> filetabOffset[0] filetabOffset[1] ... filetabOffset[len(CUs[0].fileNames)-1] +// cuOffsets[1] -> filetabOffset[len(CUs[0].fileNames)] ... filetabOffset[len(CUs[0].fileNames) + len(CUs[1].fileNames)-1] +// ... // // file name table format: -// filetabOffset[0] -> CUs[0].fileNames[0] \x00 -// ... -// filetabOffset[len(CUs[0]-1)] -> CUs[0].fileNames[len(CUs[0].fileNames)-1] \x00 -// ... -// filetabOffset[SUM(CUs,fileNames)-1] -> CUs[len(CU)-1].fileNames[len(CUs[len(CU)-1].fileNames)-1] \x00 +// +// filetabOffset[0] -> CUs[0].fileNames[0] \x00 +// ... +// filetabOffset[len(CUs[0]-1)] -> CUs[0].fileNames[len(CUs[0].fileNames)-1] \x00 +// ... +// filetabOffset[SUM(CUs,fileNames)-1] -> CUs[len(CU)-1].fileNames[len(CUs[len(CU)-1].fileNames)-1] \x00 func makeFilenametab(cus []compilationUnit) (cutab []uint32, filetab []byte, cuOffsets []uint32) { - cuOffsets = make([]uint32, len(cus)) - cuOffset := 0 - fileOffset := 0 + cuOffsets = make([]uint32, len(cus)) + cuOffset := 0 + fileOffset := 0 - for i, cu := range cus { - cuOffsets[i] = uint32(cuOffset) + for i, cu := range cus { + cuOffsets[i] = uint32(cuOffset) - for _, name := range cu.fileNames { - cutab = append(cutab, uint32(fileOffset)) + for _, name := range cu.fileNames { + cutab = append(cutab, uint32(fileOffset)) - fileOffset += len(name) + 1 - filetab = append(filetab, name...) - filetab = append(filetab, 0) - } + fileOffset += len(name) + 1 + filetab = append(filetab, name...) + filetab = append(filetab, 0) + } - cuOffset += len(cu.fileNames) - } + cuOffset += len(cu.fileNames) + } - return + return } func writeFuncdata(out *[]byte, funcs []Func) (fstart int, funcdataOffs [][]uint32) { - fstart = len(*out) - *out = append(*out, byte(0)) - offs := uint32(1) - - funcdataOffs = make([][]uint32, len(funcs)) - for i, f := range funcs { - - var writer = func(fd encoding.BinaryMarshaler) { - var ab []byte - var err error - if fd != nil { - ab, err = fd.MarshalBinary() - if err != nil { - panic(err) - } - funcdataOffs[i] = append(funcdataOffs[i], offs) - } else { - ab = []byte{0} - funcdataOffs[i] = append(funcdataOffs[i], _INVALID_FUNCDATA_OFFSET) - } - *out = append(*out, ab...) - offs += uint32(len(ab)) - } - - writer(f.ArgsPointerMaps) - writer(f.LocalsPointerMaps) - writer(f.StackObjects) - writer(f.InlTree) - writer(f.OpenCodedDeferInfo) - writer(f.ArgInfo) - writer(f.ArgLiveInfo) - writer(f.WrapInfo) - } - return + fstart = len(*out) + *out = append(*out, byte(0)) + offs := uint32(1) + + funcdataOffs = make([][]uint32, len(funcs)) + for i, f := range funcs { + + var writer = func(fd encoding.BinaryMarshaler) { + var ab []byte + var err error + if fd != nil { + ab, err = fd.MarshalBinary() + if err != nil { + panic(err) + } + funcdataOffs[i] = append(funcdataOffs[i], offs) + } else { + ab = []byte{0} + funcdataOffs[i] = append(funcdataOffs[i], _INVALID_FUNCDATA_OFFSET) + } + *out = append(*out, ab...) + offs += uint32(len(ab)) + } + + writer(f.ArgsPointerMaps) + writer(f.LocalsPointerMaps) + writer(f.StackObjects) + writer(f.InlTree) + writer(f.OpenCodedDeferInfo) + writer(f.ArgInfo) + writer(f.ArgLiveInfo) + writer(f.WrapInfo) + } + return } func makeFtab(funcs []_func, lastFuncSize uint32) (ftab []funcTab, pclntabSize int64, startLocations []uint32) { - // Allocate space for the pc->func table. This structure consists of a pc offset - // and an offset to the func structure. After that, we have a single pc - // value that marks the end of the last function in the binary. - pclntabSize = int64(len(funcs)*2*int(_PtrSize) + int(_PtrSize)) - startLocations = make([]uint32, len(funcs)) - for i, f := range funcs { - pclntabSize = rnd(pclntabSize, int64(_PtrSize)) - //writePCToFunc - startLocations[i] = uint32(pclntabSize) - pclntabSize += int64(uint8(_FUNC_SIZE) + f.nfuncdata*_PtrSize + uint8(f.npcdata)*4) - } - ftab = make([]funcTab, 0, len(funcs)+1) - - // write a map of pc->func info offsets - for i, f := range funcs { - ftab = append(ftab, funcTab{uintptr(f.entry), uintptr(startLocations[i])}) - } - - // Final entry of table is just end pc offset. - lastFunc := funcs[len(funcs)-1] - ftab = append(ftab, funcTab{lastFunc.entry + uintptr(lastFuncSize), 0}) - - return + // Allocate space for the pc->func table. This structure consists of a pc offset + // and an offset to the func structure. After that, we have a single pc + // value that marks the end of the last function in the binary. + pclntabSize = int64(len(funcs)*2*int(_PtrSize) + int(_PtrSize)) + startLocations = make([]uint32, len(funcs)) + for i, f := range funcs { + pclntabSize = rnd(pclntabSize, int64(_PtrSize)) + //writePCToFunc + startLocations[i] = uint32(pclntabSize) + pclntabSize += int64(uint8(_FUNC_SIZE) + f.nfuncdata*_PtrSize + uint8(f.npcdata)*4) + } + ftab = make([]funcTab, 0, len(funcs)+1) + + // write a map of pc->func info offsets + for i, f := range funcs { + ftab = append(ftab, funcTab{uintptr(f.entry), uintptr(startLocations[i])}) + } + + // Final entry of table is just end pc offset. + lastFunc := funcs[len(funcs)-1] + ftab = append(ftab, funcTab{lastFunc.entry + uintptr(lastFuncSize), 0}) + + return } // Pcln table format: [...]funcTab + [...]_Func func makePclntable(size int64, startLocations []uint32, funcs []_func, lastFuncSize uint32, pcdataOffs [][]uint32, funcdataAddr uintptr, funcdataOffs [][]uint32) (pclntab []byte) { - pclntab = make([]byte, size, size) - - // write a map of pc->func info offsets - offs := 0 - for i, f := range funcs { - byteOrder.PutUint64(pclntab[offs:offs+8], uint64(f.entry)) - byteOrder.PutUint64(pclntab[offs+8:offs+16], uint64(startLocations[i])) - offs += 16 - } - // Final entry of table is just end pc offset. - lastFunc := funcs[len(funcs)-1] - byteOrder.PutUint64(pclntab[offs:offs+8], uint64(lastFunc.entry)+uint64(lastFuncSize)) - offs += 8 - - // write func info table - for i, f := range funcs { - off := startLocations[i] - - // write _func structure to pclntab - byteOrder.PutUint64(pclntab[off:off+8], uint64(f.entry)) - off += 8 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.nameOff)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.args)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.deferreturn)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcsp)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcfile)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcln)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.npcdata)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.cuOffset)) - off += 4 - pclntab[off] = f.funcID - // NOTICE: _[2]byte alignment - off += 3 - pclntab[off] = f.nfuncdata - off += 1 - - // NOTICE: _func.pcdata always starts from PcUnsafePoint, which is index 3 - for j := 3; j < len(pcdataOffs[i]); j++ { - byteOrder.PutUint32(pclntab[off:off+4], uint32(pcdataOffs[i][j])) - off += 4 - } - - off = uint32(rnd(int64(off), int64(_PtrSize))) - - // funcdata refs as offsets from gofunc - for _, funcdata := range funcdataOffs[i] { - if funcdata == _INVALID_FUNCDATA_OFFSET { - byteOrder.PutUint64(pclntab[off:off+8], 0) - } else { - byteOrder.PutUint64(pclntab[off:off+8], uint64(funcdataAddr)+uint64(funcdata)) - } - off += 8 - } - } - - return + pclntab = make([]byte, size, size) + + // write a map of pc->func info offsets + offs := 0 + for i, f := range funcs { + byteOrder.PutUint64(pclntab[offs:offs+8], uint64(f.entry)) + byteOrder.PutUint64(pclntab[offs+8:offs+16], uint64(startLocations[i])) + offs += 16 + } + // Final entry of table is just end pc offset. + lastFunc := funcs[len(funcs)-1] + byteOrder.PutUint64(pclntab[offs:offs+8], uint64(lastFunc.entry)+uint64(lastFuncSize)) + offs += 8 + + // write func info table + for i, f := range funcs { + off := startLocations[i] + + // write _func structure to pclntab + byteOrder.PutUint64(pclntab[off:off+8], uint64(f.entry)) + off += 8 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.nameOff)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.args)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.deferreturn)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcsp)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcfile)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcln)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.npcdata)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.cuOffset)) + off += 4 + pclntab[off] = f.funcID + // NOTICE: _[2]byte alignment + off += 3 + pclntab[off] = f.nfuncdata + off += 1 + + // NOTICE: _func.pcdata always starts from PcUnsafePoint, which is index 3 + for j := 3; j < len(pcdataOffs[i]); j++ { + byteOrder.PutUint32(pclntab[off:off+4], uint32(pcdataOffs[i][j])) + off += 4 + } + + off = uint32(rnd(int64(off), int64(_PtrSize))) + + // funcdata refs as offsets from gofunc + for _, funcdata := range funcdataOffs[i] { + if funcdata == _INVALID_FUNCDATA_OFFSET { + byteOrder.PutUint64(pclntab[off:off+8], 0) + } else { + byteOrder.PutUint64(pclntab[off:off+8], uint64(funcdataAddr)+uint64(funcdata)) + } + off += 8 + } + } + + return } -// findfunc table used to map pc to belonging func, +// findfunc table used to map pc to belonging func, // returns the index in the func table. // // All text section are divided into buckets sized _BUCKETSIZE(4K): -// every bucket is divided into _SUBBUCKETS sized _SUB_BUCKETSIZE(64), -// and it has a base idx to plus the offset stored in jth subbucket. +// +// every bucket is divided into _SUBBUCKETS sized _SUB_BUCKETSIZE(64), +// and it has a base idx to plus the offset stored in jth subbucket. +// // see findfunc() in runtime/symtab.go func writeFindfunctab(out *[]byte, ftab []funcTab) (start int) { - start = len(*out) - - max := ftab[len(ftab)-1].entry - min := ftab[0].entry - nbuckets := (max - min + _BUCKETSIZE - 1) / _BUCKETSIZE - n := (max - min + _SUB_BUCKETSIZE - 1) / _SUB_BUCKETSIZE - - tab := make([]findfuncbucket, 0, nbuckets) - var s, e = 0, 0 - for i := 0; i<int(nbuckets); i++ { - var pc = min + uintptr((i+1)*_BUCKETSIZE) - // find the end func of the bucket - for ; e < len(ftab)-1 && ftab[e+1].entry <= pc; e++ {} - // store the start func of the bucket - var fb = findfuncbucket{idx: uint32(s)} - - for j := 0; j<_SUBBUCKETS && (i*_SUBBUCKETS+j)<int(n); j++ { - pc = min + uintptr(i*_BUCKETSIZE) + uintptr((j+1)*_SUB_BUCKETSIZE) - var ss = s - // find the end func of the subbucket - for ; ss < len(ftab)-1 && ftab[ss+1].entry <= pc; ss++ {} - // store the start func of the subbucket - fb._SUBBUCKETS[j] = byte(uint32(s) - fb.idx) - s = ss - } - s = e - tab = append(tab, fb) - } - - // write findfuncbucket - if len(tab) > 0 { - size := int(unsafe.Sizeof(findfuncbucket{}))*len(tab) - *out = append(*out, rt.BytesFrom(unsafe.Pointer(&tab[0]), size, size)...) - } - return + start = len(*out) + + max := ftab[len(ftab)-1].entry + min := ftab[0].entry + nbuckets := (max - min + _BUCKETSIZE - 1) / _BUCKETSIZE + n := (max - min + _SUB_BUCKETSIZE - 1) / _SUB_BUCKETSIZE + + tab := make([]findfuncbucket, 0, nbuckets) + var s, e = 0, 0 + for i := 0; i < int(nbuckets); i++ { + var pc = min + uintptr((i+1)*_BUCKETSIZE) + // find the end func of the bucket + for ; e < len(ftab)-1 && ftab[e+1].entry <= pc; e++ { + } + // store the start func of the bucket + var fb = findfuncbucket{idx: uint32(s)} + + for j := 0; j < _SUBBUCKETS && (i*_SUBBUCKETS+j) < int(n); j++ { + pc = min + uintptr(i*_BUCKETSIZE) + uintptr((j+1)*_SUB_BUCKETSIZE) + var ss = s + // find the end func of the subbucket + for ; ss < len(ftab)-1 && ftab[ss+1].entry <= pc; ss++ { + } + // store the start func of the subbucket + fb._SUBBUCKETS[j] = byte(uint32(s) - fb.idx) + s = ss + } + s = e + tab = append(tab, fb) + } + + // write findfuncbucket + if len(tab) > 0 { + size := int(unsafe.Sizeof(findfuncbucket{})) * len(tab) + *out = append(*out, rt.BytesFrom(unsafe.Pointer(&tab[0]), size, size)...) + } + return } func makeModuledata(name string, filenames []string, funcs []Func, text []byte) (mod *moduledata) { - mod = new(moduledata) - mod.modulename = name - - // make filename table - cu := make([]string, 0, len(filenames)) - for _, f := range filenames { - cu = append(cu, f) - } - cutab, filetab, cuOffs := makeFilenametab([]compilationUnit{{cu}}) - mod.cutab = cutab - mod.filetab = filetab - - // make funcname table - funcnametab, nameOffs := makeFuncnameTab(funcs) - mod.funcnametab = funcnametab - - // mmap() text and funcdata segements - p := os.Getpagesize() - size := int(rnd(int64(len(text)), int64(p))) - addr := mmap(size) - // copy the machine code - s := rt.BytesFrom(unsafe.Pointer(addr), len(text), size) - copy(s, text) - // make it executable - mprotect(addr, size) - - // make pcdata table - // NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata - pctab, pcdataOffs, _funcs := makePctab(funcs, addr, cuOffs, nameOffs) - mod.pctab = pctab - - // write func data - // NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata - // TODO: estimate accurate capacity - cache := make([]byte, 0, len(funcs)*int(_PtrSize)) - fstart, funcdataOffs := writeFuncdata(&cache, funcs) - - // make pc->func (binary search) func table - lastFuncsize := funcs[len(funcs)-1].TextSize - ftab, pclntSize, startLocations := makeFtab(_funcs, lastFuncsize) - mod.ftab = ftab - - // write pc->func (modmap) findfunc table - ffstart := writeFindfunctab(&cache, ftab) - - // cache funcdata and findfuncbucket - moduleCache.Lock() - moduleCache.m[mod] = cache - moduleCache.Unlock() - mod.findfunctab = uintptr(rt.IndexByte(cache, ffstart)) - funcdataAddr := uintptr(rt.IndexByte(cache, fstart)) - - // make pclnt table - pclntab := makePclntable(pclntSize, startLocations, _funcs, lastFuncsize, pcdataOffs, funcdataAddr, funcdataOffs) - mod.pclntable = pclntab - - // assign addresses - mod.text = addr - mod.etext = addr + uintptr(size) - mod.minpc = addr - mod.maxpc = addr + uintptr(len(text)) - - // make pc header - mod.pcHeader = &pcHeader { - magic : _Magic, - minLC : _MinLC, - ptrSize : _PtrSize, - nfunc : len(funcs), - nfiles: uint(len(cu)), - funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"), - cuOffset: getOffsetOf(moduledata{}, "cutab"), - filetabOffset: getOffsetOf(moduledata{}, "filetab"), - pctabOffset: getOffsetOf(moduledata{}, "pctab"), - pclnOffset: getOffsetOf(moduledata{}, "pclntable"), - } - - // sepecial case: gcdata and gcbss must by non-empty - mod.gcdata = uintptr(unsafe.Pointer(&emptyByte)) - mod.gcbss = uintptr(unsafe.Pointer(&emptyByte)) - - return + mod = new(moduledata) + mod.modulename = name + + // make filename table + cu := make([]string, 0, len(filenames)) + for _, f := range filenames { + cu = append(cu, f) + } + cutab, filetab, cuOffs := makeFilenametab([]compilationUnit{{cu}}) + mod.cutab = cutab + mod.filetab = filetab + + // make funcname table + funcnametab, nameOffs := makeFuncnameTab(funcs) + mod.funcnametab = funcnametab + + // mmap() text and funcdata segements + p := os.Getpagesize() + size := int(rnd(int64(len(text)), int64(p))) + addr := mmap(size) + // copy the machine code + s := rt.BytesFrom(unsafe.Pointer(addr), len(text), size) + copy(s, text) + // make it executable + mprotect(addr, size) + + // make pcdata table + // NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata + pctab, pcdataOffs, _funcs := makePctab(funcs, addr, cuOffs, nameOffs) + mod.pctab = pctab + + // write func data + // NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata + // TODO: estimate accurate capacity + cache := make([]byte, 0, len(funcs)*int(_PtrSize)) + fstart, funcdataOffs := writeFuncdata(&cache, funcs) + + // make pc->func (binary search) func table + lastFuncsize := funcs[len(funcs)-1].TextSize + ftab, pclntSize, startLocations := makeFtab(_funcs, lastFuncsize) + mod.ftab = ftab + + // write pc->func (modmap) findfunc table + ffstart := writeFindfunctab(&cache, ftab) + + // cache funcdata and findfuncbucket + moduleCache.Lock() + moduleCache.m[mod] = cache + moduleCache.Unlock() + mod.findfunctab = uintptr(rt.IndexByte(cache, ffstart)) + funcdataAddr := uintptr(rt.IndexByte(cache, fstart)) + + // make pclnt table + pclntab := makePclntable(pclntSize, startLocations, _funcs, lastFuncsize, pcdataOffs, funcdataAddr, funcdataOffs) + mod.pclntable = pclntab + + // assign addresses + mod.text = addr + mod.etext = addr + uintptr(size) + mod.minpc = addr + mod.maxpc = addr + uintptr(len(text)) + + // make pc header + mod.pcHeader = &pcHeader{ + magic: _Magic, + minLC: _MinLC, + ptrSize: _PtrSize, + nfunc: len(funcs), + nfiles: uint(len(cu)), + funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"), + cuOffset: getOffsetOf(moduledata{}, "cutab"), + filetabOffset: getOffsetOf(moduledata{}, "filetab"), + pctabOffset: getOffsetOf(moduledata{}, "pctab"), + pclnOffset: getOffsetOf(moduledata{}, "pclntable"), + } + + // sepecial case: gcdata and gcbss must by non-empty + mod.gcdata = uintptr(unsafe.Pointer(&emptyByte)) + mod.gcbss = uintptr(unsafe.Pointer(&emptyByte)) + + return } // makePctab generates pcdelta->valuedelta tables for functions, // and returns the table and the entry offset of every kind pcdata in the table. func makePctab(funcs []Func, addr uintptr, cuOffset []uint32, nameOffset []int32) (pctab []byte, pcdataOffs [][]uint32, _funcs []_func) { - _funcs = make([]_func, len(funcs)) - - // Pctab offsets of 0 are considered invalid in the runtime. We respect - // that by just padding a single byte at the beginning of runtime.pctab, - // that way no real offsets can be zero. - pctab = make([]byte, 1, 12*len(funcs)+1) - pcdataOffs = make([][]uint32, len(funcs)) - - for i, f := range funcs { - _f := &_funcs[i] - - var writer = func(pc *Pcdata) { - var ab []byte - var err error - if pc != nil { - ab, err = pc.MarshalBinary() - if err != nil { - panic(err) - } - pcdataOffs[i] = append(pcdataOffs[i], uint32(len(pctab))) - } else { - ab = []byte{0} - pcdataOffs[i] = append(pcdataOffs[i], _PCDATA_INVALID_OFFSET) - } - pctab = append(pctab, ab...) - } - - if f.Pcsp != nil { - _f.pcsp = uint32(len(pctab)) - } - writer(f.Pcsp) - if f.Pcfile != nil { - _f.pcfile = uint32(len(pctab)) - } - writer(f.Pcfile) - if f.Pcline != nil { - _f.pcln = uint32(len(pctab)) - } - writer(f.Pcline) - writer(f.PcUnsafePoint) - writer(f.PcStackMapIndex) - writer(f.PcInlTreeIndex) - writer(f.PcArgLiveIndex) - - _f.entry = addr + uintptr(f.EntryOff) - _f.nameOff = nameOffset[i] - _f.args = f.ArgsSize - _f.deferreturn = f.DeferReturn - // NOTICE: _func.pcdata is always as [PCDATA_UnsafePoint(0) : PCDATA_ArgLiveIndex(3)] - _f.npcdata = uint32(_N_PCDATA) - _f.cuOffset = cuOffset[i] - _f.funcID = f.ID - _f.nfuncdata = uint8(_N_FUNCDATA) - } - - return + _funcs = make([]_func, len(funcs)) + + // Pctab offsets of 0 are considered invalid in the runtime. We respect + // that by just padding a single byte at the beginning of runtime.pctab, + // that way no real offsets can be zero. + pctab = make([]byte, 1, 12*len(funcs)+1) + pcdataOffs = make([][]uint32, len(funcs)) + + for i, f := range funcs { + _f := &_funcs[i] + + var writer = func(pc *Pcdata) { + var ab []byte + var err error + if pc != nil { + ab, err = pc.MarshalBinary() + if err != nil { + panic(err) + } + pcdataOffs[i] = append(pcdataOffs[i], uint32(len(pctab))) + } else { + ab = []byte{0} + pcdataOffs[i] = append(pcdataOffs[i], _PCDATA_INVALID_OFFSET) + } + pctab = append(pctab, ab...) + } + + if f.Pcsp != nil { + _f.pcsp = uint32(len(pctab)) + } + writer(f.Pcsp) + if f.Pcfile != nil { + _f.pcfile = uint32(len(pctab)) + } + writer(f.Pcfile) + if f.Pcline != nil { + _f.pcln = uint32(len(pctab)) + } + writer(f.Pcline) + writer(f.PcUnsafePoint) + writer(f.PcStackMapIndex) + writer(f.PcInlTreeIndex) + writer(f.PcArgLiveIndex) + + _f.entry = addr + uintptr(f.EntryOff) + _f.nameOff = nameOffset[i] + _f.args = f.ArgsSize + _f.deferreturn = f.DeferReturn + // NOTICE: _func.pcdata is always as [PCDATA_UnsafePoint(0) : PCDATA_ArgLiveIndex(3)] + _f.npcdata = uint32(_N_PCDATA) + _f.cuOffset = cuOffset[i] + _f.funcID = f.ID + _f.nfuncdata = uint8(_N_FUNCDATA) + } + + return } -func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argptrs uintptr, localptrs uintptr) {} \ No newline at end of file +func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argptrs uintptr, localptrs uintptr) { +} diff --git a/vendor/github.com/bytedance/sonic/loader/funcdata_go116.go b/vendor/github.com/bytedance/sonic/loader/funcdata_go116.go index 508268e7d..bc328434a 100644 --- a/vendor/github.com/bytedance/sonic/loader/funcdata_go116.go +++ b/vendor/github.com/bytedance/sonic/loader/funcdata_go116.go @@ -20,137 +20,137 @@ package loader import ( - `encoding` - `os` - `unsafe` + "encoding" + "os" + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) const ( - _Magic uint32 = 0xfffffffa + _Magic uint32 = 0xfffffffa ) type pcHeader struct { - magic uint32 // 0xFFFFFFF0 - pad1, pad2 uint8 // 0,0 - minLC uint8 // min instruction size - ptrSize uint8 // size of a ptr in bytes - nfunc int // number of functions in the module - nfiles uint // number of entries in the file tab - funcnameOffset uintptr // offset to the funcnametab variable from pcHeader - cuOffset uintptr // offset to the cutab variable from pcHeader - filetabOffset uintptr // offset to the filetab variable from pcHeader - pctabOffset uintptr // offset to the pctab variable from pcHeader - pclnOffset uintptr // offset to the pclntab variable from pcHeader + magic uint32 // 0xFFFFFFF0 + pad1, pad2 uint8 // 0,0 + minLC uint8 // min instruction size + ptrSize uint8 // size of a ptr in bytes + nfunc int // number of functions in the module + nfiles uint // number of entries in the file tab + funcnameOffset uintptr // offset to the funcnametab variable from pcHeader + cuOffset uintptr // offset to the cutab variable from pcHeader + filetabOffset uintptr // offset to the filetab variable from pcHeader + pctabOffset uintptr // offset to the pctab variable from pcHeader + pclnOffset uintptr // offset to the pclntab variable from pcHeader } type moduledata struct { - pcHeader *pcHeader - funcnametab []byte - cutab []uint32 - filetab []byte - pctab []byte - pclntable []byte - ftab []funcTab - findfunctab uintptr - minpc, maxpc uintptr // first func address, last func address + last func size + pcHeader *pcHeader + funcnametab []byte + cutab []uint32 + filetab []byte + pctab []byte + pclntable []byte + ftab []funcTab + findfunctab uintptr + minpc, maxpc uintptr // first func address, last func address + last func size - text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC - noptrdata, enoptrdata uintptr - data, edata uintptr - bss, ebss uintptr - noptrbss, enoptrbss uintptr - end, gcdata, gcbss uintptr - types, etypes uintptr - - textsectmap []textSection // see runtime/symtab.go: textAddr() - typelinks []int32 // offsets from types - itablinks []*rt.GoItab + text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC + noptrdata, enoptrdata uintptr + data, edata uintptr + bss, ebss uintptr + noptrbss, enoptrbss uintptr + end, gcdata, gcbss uintptr + types, etypes uintptr - ptab []ptabEntry + textsectmap []textSection // see runtime/symtab.go: textAddr() + typelinks []int32 // offsets from types + itablinks []*rt.GoItab - pluginpath string - pkghashes []modulehash + ptab []ptabEntry - modulename string - modulehashes []modulehash + pluginpath string + pkghashes []modulehash - hasmain uint8 // 1 if module contains the main function, 0 otherwise + modulename string + modulehashes []modulehash - gcdatamask, gcbssmask bitVector + hasmain uint8 // 1 if module contains the main function, 0 otherwise - typemap map[int32]*rt.GoType // offset to *_rtype in previous module + gcdatamask, gcbssmask bitVector - bad bool // module failed to load and should be ignored + typemap map[int32]*rt.GoType // offset to *_rtype in previous module - next *moduledata + bad bool // module failed to load and should be ignored + + next *moduledata } type _func struct { - entry uintptr // start pc, as offset from moduledata.text/pcHeader.textStart - nameOff int32 // function name, as index into moduledata.funcnametab. - - args int32 // in/out args size - deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. - - pcsp uint32 - pcfile uint32 - pcln uint32 - npcdata uint32 - cuOffset uint32 // runtime.cutab offset of this function's CU - funcID uint8 // set for certain special runtime functions - _ [2]byte // pad - nfuncdata uint8 // - - // The end of the struct is followed immediately by two variable-length - // arrays that reference the pcdata and funcdata locations for this - // function. - - // pcdata contains the offset into moduledata.pctab for the start of - // that index's table. e.g., - // &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of - // the unsafe point table. - // - // An offset of 0 indicates that there is no table. - // - // pcdata [npcdata]uint32 - - // funcdata contains the offset past moduledata.gofunc which contains a - // pointer to that index's funcdata. e.g., - // *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is - // the argument pointer map. - // - // An offset of ^uint32(0) indicates that there is no entry. - // - // funcdata [nfuncdata]uint32 + entry uintptr // start pc, as offset from moduledata.text/pcHeader.textStart + nameOff int32 // function name, as index into moduledata.funcnametab. + + args int32 // in/out args size + deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. + + pcsp uint32 + pcfile uint32 + pcln uint32 + npcdata uint32 + cuOffset uint32 // runtime.cutab offset of this function's CU + funcID uint8 // set for certain special runtime functions + _ [2]byte // pad + nfuncdata uint8 // + + // The end of the struct is followed immediately by two variable-length + // arrays that reference the pcdata and funcdata locations for this + // function. + + // pcdata contains the offset into moduledata.pctab for the start of + // that index's table. e.g., + // &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of + // the unsafe point table. + // + // An offset of 0 indicates that there is no table. + // + // pcdata [npcdata]uint32 + + // funcdata contains the offset past moduledata.gofunc which contains a + // pointer to that index's funcdata. e.g., + // *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is + // the argument pointer map. + // + // An offset of ^uint32(0) indicates that there is no entry. + // + // funcdata [nfuncdata]uint32 } type funcTab struct { - entry uintptr - funcoff uintptr + entry uintptr + funcoff uintptr } type bitVector struct { - n int32 // # of bits - bytedata *uint8 + n int32 // # of bits + bytedata *uint8 } type ptabEntry struct { - name int32 - typ int32 + name int32 + typ int32 } type textSection struct { - vaddr uintptr // prelinked section vaddr - end uintptr // vaddr + section length - baseaddr uintptr // relocated section address + vaddr uintptr // prelinked section vaddr + end uintptr // vaddr + section length + baseaddr uintptr // relocated section address } type modulehash struct { - modulename string - linktimehash string - runtimehash *string + modulename string + linktimehash string + runtimehash *string } // findfuncbucket is an array of these structures. @@ -162,388 +162,395 @@ type modulehash struct { // index to find the target function. // This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead. type findfuncbucket struct { - idx uint32 - _SUBBUCKETS [16]byte + idx uint32 + _SUBBUCKETS [16]byte } - type compilationUnit struct { - fileNames []string + fileNames []string } -// func name table format: -// nameOff[0] -> namePartA namePartB namePartC \x00 -// nameOff[1] -> namePartA namePartB namePartC \x00 -// ... +// func name table format: +// +// nameOff[0] -> namePartA namePartB namePartC \x00 +// nameOff[1] -> namePartA namePartB namePartC \x00 +// ... func makeFuncnameTab(funcs []Func) (tab []byte, offs []int32) { - offs = make([]int32, len(funcs)) - offset := 0 + offs = make([]int32, len(funcs)) + offset := 0 - for i, f := range funcs { - offs[i] = int32(offset) + for i, f := range funcs { + offs[i] = int32(offset) - a, b, c := funcNameParts(f.Name) - tab = append(tab, a...) - tab = append(tab, b...) - tab = append(tab, c...) - tab = append(tab, 0) - offset += len(a) + len(b) + len(c) + 1 - } + a, b, c := funcNameParts(f.Name) + tab = append(tab, a...) + tab = append(tab, b...) + tab = append(tab, c...) + tab = append(tab, 0) + offset += len(a) + len(b) + len(c) + 1 + } - return + return } // CU table format: -// cuOffsets[0] -> filetabOffset[0] filetabOffset[1] ... filetabOffset[len(CUs[0].fileNames)-1] -// cuOffsets[1] -> filetabOffset[len(CUs[0].fileNames)] ... filetabOffset[len(CUs[0].fileNames) + len(CUs[1].fileNames)-1] -// ... +// +// cuOffsets[0] -> filetabOffset[0] filetabOffset[1] ... filetabOffset[len(CUs[0].fileNames)-1] +// cuOffsets[1] -> filetabOffset[len(CUs[0].fileNames)] ... filetabOffset[len(CUs[0].fileNames) + len(CUs[1].fileNames)-1] +// ... // // file name table format: -// filetabOffset[0] -> CUs[0].fileNames[0] \x00 -// ... -// filetabOffset[len(CUs[0]-1)] -> CUs[0].fileNames[len(CUs[0].fileNames)-1] \x00 -// ... -// filetabOffset[SUM(CUs,fileNames)-1] -> CUs[len(CU)-1].fileNames[len(CUs[len(CU)-1].fileNames)-1] \x00 +// +// filetabOffset[0] -> CUs[0].fileNames[0] \x00 +// ... +// filetabOffset[len(CUs[0]-1)] -> CUs[0].fileNames[len(CUs[0].fileNames)-1] \x00 +// ... +// filetabOffset[SUM(CUs,fileNames)-1] -> CUs[len(CU)-1].fileNames[len(CUs[len(CU)-1].fileNames)-1] \x00 func makeFilenametab(cus []compilationUnit) (cutab []uint32, filetab []byte, cuOffsets []uint32) { - cuOffsets = make([]uint32, len(cus)) - cuOffset := 0 - fileOffset := 0 + cuOffsets = make([]uint32, len(cus)) + cuOffset := 0 + fileOffset := 0 - for i, cu := range cus { - cuOffsets[i] = uint32(cuOffset) + for i, cu := range cus { + cuOffsets[i] = uint32(cuOffset) - for _, name := range cu.fileNames { - cutab = append(cutab, uint32(fileOffset)) + for _, name := range cu.fileNames { + cutab = append(cutab, uint32(fileOffset)) - fileOffset += len(name) + 1 - filetab = append(filetab, name...) - filetab = append(filetab, 0) - } + fileOffset += len(name) + 1 + filetab = append(filetab, name...) + filetab = append(filetab, 0) + } - cuOffset += len(cu.fileNames) - } + cuOffset += len(cu.fileNames) + } - return + return } func writeFuncdata(out *[]byte, funcs []Func) (fstart int, funcdataOffs [][]uint32) { - fstart = len(*out) - *out = append(*out, byte(0)) - offs := uint32(1) - - funcdataOffs = make([][]uint32, len(funcs)) - for i, f := range funcs { - - var writer = func(fd encoding.BinaryMarshaler) { - var ab []byte - var err error - if fd != nil { - ab, err = fd.MarshalBinary() - if err != nil { - panic(err) - } - funcdataOffs[i] = append(funcdataOffs[i], offs) - } else { - ab = []byte{0} - funcdataOffs[i] = append(funcdataOffs[i], _INVALID_FUNCDATA_OFFSET) - } - *out = append(*out, ab...) - offs += uint32(len(ab)) - } - - writer(f.ArgsPointerMaps) - writer(f.LocalsPointerMaps) - writer(f.StackObjects) - writer(f.InlTree) - writer(f.OpenCodedDeferInfo) - writer(f.ArgInfo) - writer(f.ArgLiveInfo) - writer(f.WrapInfo) - } - return + fstart = len(*out) + *out = append(*out, byte(0)) + offs := uint32(1) + + funcdataOffs = make([][]uint32, len(funcs)) + for i, f := range funcs { + + var writer = func(fd encoding.BinaryMarshaler) { + var ab []byte + var err error + if fd != nil { + ab, err = fd.MarshalBinary() + if err != nil { + panic(err) + } + funcdataOffs[i] = append(funcdataOffs[i], offs) + } else { + ab = []byte{0} + funcdataOffs[i] = append(funcdataOffs[i], _INVALID_FUNCDATA_OFFSET) + } + *out = append(*out, ab...) + offs += uint32(len(ab)) + } + + writer(f.ArgsPointerMaps) + writer(f.LocalsPointerMaps) + writer(f.StackObjects) + writer(f.InlTree) + writer(f.OpenCodedDeferInfo) + writer(f.ArgInfo) + writer(f.ArgLiveInfo) + writer(f.WrapInfo) + } + return } func makeFtab(funcs []_func, lastFuncSize uint32) (ftab []funcTab, pclntabSize int64, startLocations []uint32) { - // Allocate space for the pc->func table. This structure consists of a pc offset - // and an offset to the func structure. After that, we have a single pc - // value that marks the end of the last function in the binary. - pclntabSize = int64(len(funcs)*2*int(_PtrSize) + int(_PtrSize)) - startLocations = make([]uint32, len(funcs)) - for i, f := range funcs { - pclntabSize = rnd(pclntabSize, int64(_PtrSize)) - //writePCToFunc - startLocations[i] = uint32(pclntabSize) - pclntabSize += int64(uint8(_FUNC_SIZE) + f.nfuncdata*_PtrSize + uint8(f.npcdata)*4) - } - ftab = make([]funcTab, 0, len(funcs)+1) - - // write a map of pc->func info offsets - for i, f := range funcs { - ftab = append(ftab, funcTab{uintptr(f.entry), uintptr(startLocations[i])}) - } - - // Final entry of table is just end pc offset. - lastFunc := funcs[len(funcs)-1] - ftab = append(ftab, funcTab{lastFunc.entry + uintptr(lastFuncSize), 0}) - - return + // Allocate space for the pc->func table. This structure consists of a pc offset + // and an offset to the func structure. After that, we have a single pc + // value that marks the end of the last function in the binary. + pclntabSize = int64(len(funcs)*2*int(_PtrSize) + int(_PtrSize)) + startLocations = make([]uint32, len(funcs)) + for i, f := range funcs { + pclntabSize = rnd(pclntabSize, int64(_PtrSize)) + //writePCToFunc + startLocations[i] = uint32(pclntabSize) + pclntabSize += int64(uint8(_FUNC_SIZE) + f.nfuncdata*_PtrSize + uint8(f.npcdata)*4) + } + ftab = make([]funcTab, 0, len(funcs)+1) + + // write a map of pc->func info offsets + for i, f := range funcs { + ftab = append(ftab, funcTab{uintptr(f.entry), uintptr(startLocations[i])}) + } + + // Final entry of table is just end pc offset. + lastFunc := funcs[len(funcs)-1] + ftab = append(ftab, funcTab{lastFunc.entry + uintptr(lastFuncSize), 0}) + + return } // Pcln table format: [...]funcTab + [...]_Func func makePclntable(size int64, startLocations []uint32, funcs []_func, lastFuncSize uint32, pcdataOffs [][]uint32, funcdataAddr uintptr, funcdataOffs [][]uint32) (pclntab []byte) { - pclntab = make([]byte, size, size) - - // write a map of pc->func info offsets - offs := 0 - for i, f := range funcs { - byteOrder.PutUint64(pclntab[offs:offs+8], uint64(f.entry)) - byteOrder.PutUint64(pclntab[offs+8:offs+16], uint64(startLocations[i])) - offs += 16 - } - // Final entry of table is just end pc offset. - lastFunc := funcs[len(funcs)-1] - byteOrder.PutUint64(pclntab[offs:offs+8], uint64(lastFunc.entry)+uint64(lastFuncSize)) - offs += 8 - - // write func info table - for i, f := range funcs { - off := startLocations[i] - - // write _func structure to pclntab - byteOrder.PutUint64(pclntab[off:off+8], uint64(f.entry)) - off += 8 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.nameOff)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.args)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.deferreturn)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcsp)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcfile)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcln)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.npcdata)) - off += 4 - byteOrder.PutUint32(pclntab[off:off+4], uint32(f.cuOffset)) - off += 4 - pclntab[off] = f.funcID - // NOTICE: _[2]byte alignment - off += 3 - pclntab[off] = f.nfuncdata - off += 1 - - // NOTICE: _func.pcdata always starts from PcUnsafePoint, which is index 3 - for j := 3; j < len(pcdataOffs[i]); j++ { - byteOrder.PutUint32(pclntab[off:off+4], uint32(pcdataOffs[i][j])) - off += 4 - } - - off = uint32(rnd(int64(off), int64(_PtrSize))) - - // funcdata refs as offsets from gofunc - for _, funcdata := range funcdataOffs[i] { - if funcdata == _INVALID_FUNCDATA_OFFSET { - byteOrder.PutUint64(pclntab[off:off+8], 0) - } else { - byteOrder.PutUint64(pclntab[off:off+8], uint64(funcdataAddr)+uint64(funcdata)) - } - off += 8 - } - } - - return + pclntab = make([]byte, size, size) + + // write a map of pc->func info offsets + offs := 0 + for i, f := range funcs { + byteOrder.PutUint64(pclntab[offs:offs+8], uint64(f.entry)) + byteOrder.PutUint64(pclntab[offs+8:offs+16], uint64(startLocations[i])) + offs += 16 + } + // Final entry of table is just end pc offset. + lastFunc := funcs[len(funcs)-1] + byteOrder.PutUint64(pclntab[offs:offs+8], uint64(lastFunc.entry)+uint64(lastFuncSize)) + offs += 8 + + // write func info table + for i, f := range funcs { + off := startLocations[i] + + // write _func structure to pclntab + byteOrder.PutUint64(pclntab[off:off+8], uint64(f.entry)) + off += 8 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.nameOff)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.args)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.deferreturn)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcsp)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcfile)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcln)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.npcdata)) + off += 4 + byteOrder.PutUint32(pclntab[off:off+4], uint32(f.cuOffset)) + off += 4 + pclntab[off] = f.funcID + // NOTICE: _[2]byte alignment + off += 3 + pclntab[off] = f.nfuncdata + off += 1 + + // NOTICE: _func.pcdata always starts from PcUnsafePoint, which is index 3 + for j := 3; j < len(pcdataOffs[i]); j++ { + byteOrder.PutUint32(pclntab[off:off+4], uint32(pcdataOffs[i][j])) + off += 4 + } + + off = uint32(rnd(int64(off), int64(_PtrSize))) + + // funcdata refs as offsets from gofunc + for _, funcdata := range funcdataOffs[i] { + if funcdata == _INVALID_FUNCDATA_OFFSET { + byteOrder.PutUint64(pclntab[off:off+8], 0) + } else { + byteOrder.PutUint64(pclntab[off:off+8], uint64(funcdataAddr)+uint64(funcdata)) + } + off += 8 + } + } + + return } -// findfunc table used to map pc to belonging func, +// findfunc table used to map pc to belonging func, // returns the index in the func table. // // All text section are divided into buckets sized _BUCKETSIZE(4K): -// every bucket is divided into _SUBBUCKETS sized _SUB_BUCKETSIZE(64), -// and it has a base idx to plus the offset stored in jth subbucket. +// +// every bucket is divided into _SUBBUCKETS sized _SUB_BUCKETSIZE(64), +// and it has a base idx to plus the offset stored in jth subbucket. +// // see findfunc() in runtime/symtab.go func writeFindfunctab(out *[]byte, ftab []funcTab) (start int) { - start = len(*out) - - max := ftab[len(ftab)-1].entry - min := ftab[0].entry - nbuckets := (max - min + _BUCKETSIZE - 1) / _BUCKETSIZE - n := (max - min + _SUB_BUCKETSIZE - 1) / _SUB_BUCKETSIZE - - tab := make([]findfuncbucket, 0, nbuckets) - var s, e = 0, 0 - for i := 0; i<int(nbuckets); i++ { - var pc = min + uintptr((i+1)*_BUCKETSIZE) - // find the end func of the bucket - for ; e < len(ftab)-1 && ftab[e+1].entry <= pc; e++ {} - // store the start func of the bucket - var fb = findfuncbucket{idx: uint32(s)} - - for j := 0; j<_SUBBUCKETS && (i*_SUBBUCKETS+j)<int(n); j++ { - pc = min + uintptr(i*_BUCKETSIZE) + uintptr((j+1)*_SUB_BUCKETSIZE) - var ss = s - // find the end func of the subbucket - for ; ss < len(ftab)-1 && ftab[ss+1].entry <= pc; ss++ {} - // store the start func of the subbucket - fb._SUBBUCKETS[j] = byte(uint32(s) - fb.idx) - s = ss - } - s = e - tab = append(tab, fb) - } - - // write findfuncbucket - if len(tab) > 0 { - size := int(unsafe.Sizeof(findfuncbucket{}))*len(tab) - *out = append(*out, rt.BytesFrom(unsafe.Pointer(&tab[0]), size, size)...) - } - return + start = len(*out) + + max := ftab[len(ftab)-1].entry + min := ftab[0].entry + nbuckets := (max - min + _BUCKETSIZE - 1) / _BUCKETSIZE + n := (max - min + _SUB_BUCKETSIZE - 1) / _SUB_BUCKETSIZE + + tab := make([]findfuncbucket, 0, nbuckets) + var s, e = 0, 0 + for i := 0; i < int(nbuckets); i++ { + var pc = min + uintptr((i+1)*_BUCKETSIZE) + // find the end func of the bucket + for ; e < len(ftab)-1 && ftab[e+1].entry <= pc; e++ { + } + // store the start func of the bucket + var fb = findfuncbucket{idx: uint32(s)} + + for j := 0; j < _SUBBUCKETS && (i*_SUBBUCKETS+j) < int(n); j++ { + pc = min + uintptr(i*_BUCKETSIZE) + uintptr((j+1)*_SUB_BUCKETSIZE) + var ss = s + // find the end func of the subbucket + for ; ss < len(ftab)-1 && ftab[ss+1].entry <= pc; ss++ { + } + // store the start func of the subbucket + fb._SUBBUCKETS[j] = byte(uint32(s) - fb.idx) + s = ss + } + s = e + tab = append(tab, fb) + } + + // write findfuncbucket + if len(tab) > 0 { + size := int(unsafe.Sizeof(findfuncbucket{})) * len(tab) + *out = append(*out, rt.BytesFrom(unsafe.Pointer(&tab[0]), size, size)...) + } + return } func makeModuledata(name string, filenames []string, funcs []Func, text []byte) (mod *moduledata) { - mod = new(moduledata) - mod.modulename = name - - // make filename table - cu := make([]string, 0, len(filenames)) - for _, f := range filenames { - cu = append(cu, f) - } - cutab, filetab, cuOffs := makeFilenametab([]compilationUnit{{cu}}) - mod.cutab = cutab - mod.filetab = filetab - - // make funcname table - funcnametab, nameOffs := makeFuncnameTab(funcs) - mod.funcnametab = funcnametab - - // mmap() text and funcdata segements - p := os.Getpagesize() - size := int(rnd(int64(len(text)), int64(p))) - addr := mmap(size) - // copy the machine code - s := rt.BytesFrom(unsafe.Pointer(addr), len(text), size) - copy(s, text) - // make it executable - mprotect(addr, size) - - // make pcdata table - // NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata - pctab, pcdataOffs, _funcs := makePctab(funcs, addr, cuOffs, nameOffs) - mod.pctab = pctab - - // write func data - // NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata - // TODO: estimate accurate capacity - cache := make([]byte, 0, len(funcs)*int(_PtrSize)) - fstart, funcdataOffs := writeFuncdata(&cache, funcs) - - // make pc->func (binary search) func table - lastFuncsize := funcs[len(funcs)-1].TextSize - ftab, pclntSize, startLocations := makeFtab(_funcs, lastFuncsize) - mod.ftab = ftab - - // write pc->func (modmap) findfunc table - ffstart := writeFindfunctab(&cache, ftab) - - // cache funcdata and findfuncbucket - moduleCache.Lock() - moduleCache.m[mod] = cache - moduleCache.Unlock() - mod.findfunctab = uintptr(rt.IndexByte(cache, ffstart)) - funcdataAddr := uintptr(rt.IndexByte(cache, fstart)) - - // make pclnt table - pclntab := makePclntable(pclntSize, startLocations, _funcs, lastFuncsize, pcdataOffs, funcdataAddr, funcdataOffs) - mod.pclntable = pclntab - - // assign addresses - mod.text = addr - mod.etext = addr + uintptr(size) - mod.minpc = addr - mod.maxpc = addr + uintptr(len(text)) - - // make pc header - mod.pcHeader = &pcHeader { - magic : _Magic, - minLC : _MinLC, - ptrSize : _PtrSize, - nfunc : len(funcs), - nfiles: uint(len(cu)), - funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"), - cuOffset: getOffsetOf(moduledata{}, "cutab"), - filetabOffset: getOffsetOf(moduledata{}, "filetab"), - pctabOffset: getOffsetOf(moduledata{}, "pctab"), - pclnOffset: getOffsetOf(moduledata{}, "pclntable"), - } - - // sepecial case: gcdata and gcbss must by non-empty - mod.gcdata = uintptr(unsafe.Pointer(&emptyByte)) - mod.gcbss = uintptr(unsafe.Pointer(&emptyByte)) - - return + mod = new(moduledata) + mod.modulename = name + + // make filename table + cu := make([]string, 0, len(filenames)) + for _, f := range filenames { + cu = append(cu, f) + } + cutab, filetab, cuOffs := makeFilenametab([]compilationUnit{{cu}}) + mod.cutab = cutab + mod.filetab = filetab + + // make funcname table + funcnametab, nameOffs := makeFuncnameTab(funcs) + mod.funcnametab = funcnametab + + // mmap() text and funcdata segements + p := os.Getpagesize() + size := int(rnd(int64(len(text)), int64(p))) + addr := mmap(size) + // copy the machine code + s := rt.BytesFrom(unsafe.Pointer(addr), len(text), size) + copy(s, text) + // make it executable + mprotect(addr, size) + + // make pcdata table + // NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata + pctab, pcdataOffs, _funcs := makePctab(funcs, addr, cuOffs, nameOffs) + mod.pctab = pctab + + // write func data + // NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata + // TODO: estimate accurate capacity + cache := make([]byte, 0, len(funcs)*int(_PtrSize)) + fstart, funcdataOffs := writeFuncdata(&cache, funcs) + + // make pc->func (binary search) func table + lastFuncsize := funcs[len(funcs)-1].TextSize + ftab, pclntSize, startLocations := makeFtab(_funcs, lastFuncsize) + mod.ftab = ftab + + // write pc->func (modmap) findfunc table + ffstart := writeFindfunctab(&cache, ftab) + + // cache funcdata and findfuncbucket + moduleCache.Lock() + moduleCache.m[mod] = cache + moduleCache.Unlock() + mod.findfunctab = uintptr(rt.IndexByte(cache, ffstart)) + funcdataAddr := uintptr(rt.IndexByte(cache, fstart)) + + // make pclnt table + pclntab := makePclntable(pclntSize, startLocations, _funcs, lastFuncsize, pcdataOffs, funcdataAddr, funcdataOffs) + mod.pclntable = pclntab + + // assign addresses + mod.text = addr + mod.etext = addr + uintptr(size) + mod.minpc = addr + mod.maxpc = addr + uintptr(len(text)) + + // make pc header + mod.pcHeader = &pcHeader{ + magic: _Magic, + minLC: _MinLC, + ptrSize: _PtrSize, + nfunc: len(funcs), + nfiles: uint(len(cu)), + funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"), + cuOffset: getOffsetOf(moduledata{}, "cutab"), + filetabOffset: getOffsetOf(moduledata{}, "filetab"), + pctabOffset: getOffsetOf(moduledata{}, "pctab"), + pclnOffset: getOffsetOf(moduledata{}, "pclntable"), + } + + // sepecial case: gcdata and gcbss must by non-empty + mod.gcdata = uintptr(unsafe.Pointer(&emptyByte)) + mod.gcbss = uintptr(unsafe.Pointer(&emptyByte)) + + return } // makePctab generates pcdelta->valuedelta tables for functions, // and returns the table and the entry offset of every kind pcdata in the table. func makePctab(funcs []Func, addr uintptr, cuOffset []uint32, nameOffset []int32) (pctab []byte, pcdataOffs [][]uint32, _funcs []_func) { - _funcs = make([]_func, len(funcs)) - - // Pctab offsets of 0 are considered invalid in the runtime. We respect - // that by just padding a single byte at the beginning of runtime.pctab, - // that way no real offsets can be zero. - pctab = make([]byte, 1, 12*len(funcs)+1) - pcdataOffs = make([][]uint32, len(funcs)) - - for i, f := range funcs { - _f := &_funcs[i] - - var writer = func(pc *Pcdata) { - var ab []byte - var err error - if pc != nil { - ab, err = pc.MarshalBinary() - if err != nil { - panic(err) - } - pcdataOffs[i] = append(pcdataOffs[i], uint32(len(pctab))) - } else { - ab = []byte{0} - pcdataOffs[i] = append(pcdataOffs[i], _PCDATA_INVALID_OFFSET) - } - pctab = append(pctab, ab...) - } - - if f.Pcsp != nil { - _f.pcsp = uint32(len(pctab)) - } - writer(f.Pcsp) - if f.Pcfile != nil { - _f.pcfile = uint32(len(pctab)) - } - writer(f.Pcfile) - if f.Pcline != nil { - _f.pcln = uint32(len(pctab)) - } - writer(f.Pcline) - writer(f.PcUnsafePoint) - writer(f.PcStackMapIndex) - writer(f.PcInlTreeIndex) - writer(f.PcArgLiveIndex) - - _f.entry = addr + uintptr(f.EntryOff) - _f.nameOff = nameOffset[i] - _f.args = f.ArgsSize - _f.deferreturn = f.DeferReturn - // NOTICE: _func.pcdata is always as [PCDATA_UnsafePoint(0) : PCDATA_ArgLiveIndex(3)] - _f.npcdata = uint32(_N_PCDATA) - _f.cuOffset = cuOffset[i] - _f.funcID = f.ID - _f.nfuncdata = uint8(_N_FUNCDATA) - } - - return + _funcs = make([]_func, len(funcs)) + + // Pctab offsets of 0 are considered invalid in the runtime. We respect + // that by just padding a single byte at the beginning of runtime.pctab, + // that way no real offsets can be zero. + pctab = make([]byte, 1, 12*len(funcs)+1) + pcdataOffs = make([][]uint32, len(funcs)) + + for i, f := range funcs { + _f := &_funcs[i] + + var writer = func(pc *Pcdata) { + var ab []byte + var err error + if pc != nil { + ab, err = pc.MarshalBinary() + if err != nil { + panic(err) + } + pcdataOffs[i] = append(pcdataOffs[i], uint32(len(pctab))) + } else { + ab = []byte{0} + pcdataOffs[i] = append(pcdataOffs[i], _PCDATA_INVALID_OFFSET) + } + pctab = append(pctab, ab...) + } + + if f.Pcsp != nil { + _f.pcsp = uint32(len(pctab)) + } + writer(f.Pcsp) + if f.Pcfile != nil { + _f.pcfile = uint32(len(pctab)) + } + writer(f.Pcfile) + if f.Pcline != nil { + _f.pcln = uint32(len(pctab)) + } + writer(f.Pcline) + writer(f.PcUnsafePoint) + writer(f.PcStackMapIndex) + writer(f.PcInlTreeIndex) + writer(f.PcArgLiveIndex) + + _f.entry = addr + uintptr(f.EntryOff) + _f.nameOff = nameOffset[i] + _f.args = f.ArgsSize + _f.deferreturn = f.DeferReturn + // NOTICE: _func.pcdata is always as [PCDATA_UnsafePoint(0) : PCDATA_ArgLiveIndex(3)] + _f.npcdata = uint32(_N_PCDATA) + _f.cuOffset = cuOffset[i] + _f.funcID = f.ID + _f.nfuncdata = uint8(_N_FUNCDATA) + } + + return } -func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argptrs uintptr, localptrs uintptr) {} \ No newline at end of file +func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argptrs uintptr, localptrs uintptr) { +} diff --git a/vendor/github.com/bytedance/sonic/loader/funcdata_go118.go b/vendor/github.com/bytedance/sonic/loader/funcdata_go118.go index a2bac857a..c0547d3c4 100644 --- a/vendor/github.com/bytedance/sonic/loader/funcdata_go118.go +++ b/vendor/github.com/bytedance/sonic/loader/funcdata_go118.go @@ -1,4 +1,5 @@ // go:build go1.18 && !go1.20 +//go:build go1.18 && !go1.20 // +build go1.18,!go1.20 /* @@ -20,141 +21,141 @@ package loader import ( - `encoding` - `os` - `unsafe` + "encoding" + "os" + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) const ( - _Magic uint32 = 0xfffffff0 + _Magic uint32 = 0xfffffff0 ) type pcHeader struct { - magic uint32 // 0xFFFFFFF0 - pad1, pad2 uint8 // 0,0 - minLC uint8 // min instruction size - ptrSize uint8 // size of a ptr in bytes - nfunc int // number of functions in the module - nfiles uint // number of entries in the file tab - textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text - funcnameOffset uintptr // offset to the funcnametab variable from pcHeader - cuOffset uintptr // offset to the cutab variable from pcHeader - filetabOffset uintptr // offset to the filetab variable from pcHeader - pctabOffset uintptr // offset to the pctab variable from pcHeader - pclnOffset uintptr // offset to the pclntab variable from pcHeader + magic uint32 // 0xFFFFFFF0 + pad1, pad2 uint8 // 0,0 + minLC uint8 // min instruction size + ptrSize uint8 // size of a ptr in bytes + nfunc int // number of functions in the module + nfiles uint // number of entries in the file tab + textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text + funcnameOffset uintptr // offset to the funcnametab variable from pcHeader + cuOffset uintptr // offset to the cutab variable from pcHeader + filetabOffset uintptr // offset to the filetab variable from pcHeader + pctabOffset uintptr // offset to the pctab variable from pcHeader + pclnOffset uintptr // offset to the pclntab variable from pcHeader } type moduledata struct { - pcHeader *pcHeader - funcnametab []byte - cutab []uint32 - filetab []byte - pctab []byte - pclntable []byte - ftab []funcTab - findfunctab uintptr - minpc, maxpc uintptr // first func address, last func address + last func size + pcHeader *pcHeader + funcnametab []byte + cutab []uint32 + filetab []byte + pctab []byte + pclntable []byte + ftab []funcTab + findfunctab uintptr + minpc, maxpc uintptr // first func address, last func address + last func size - text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC - noptrdata, enoptrdata uintptr - data, edata uintptr - bss, ebss uintptr - noptrbss, enoptrbss uintptr - end, gcdata, gcbss uintptr - types, etypes uintptr - rodata uintptr - gofunc uintptr // go.func.* is actual funcinfo object in image + text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC + noptrdata, enoptrdata uintptr + data, edata uintptr + bss, ebss uintptr + noptrbss, enoptrbss uintptr + end, gcdata, gcbss uintptr + types, etypes uintptr + rodata uintptr + gofunc uintptr // go.func.* is actual funcinfo object in image - textsectmap []textSection // see runtime/symtab.go: textAddr() - typelinks []int32 // offsets from types - itablinks []*rt.GoItab + textsectmap []textSection // see runtime/symtab.go: textAddr() + typelinks []int32 // offsets from types + itablinks []*rt.GoItab - ptab []ptabEntry + ptab []ptabEntry - pluginpath string - pkghashes []modulehash + pluginpath string + pkghashes []modulehash - modulename string - modulehashes []modulehash + modulename string + modulehashes []modulehash - hasmain uint8 // 1 if module contains the main function, 0 otherwise + hasmain uint8 // 1 if module contains the main function, 0 otherwise - gcdatamask, gcbssmask bitVector + gcdatamask, gcbssmask bitVector - typemap map[int32]*rt.GoType // offset to *_rtype in previous module + typemap map[int32]*rt.GoType // offset to *_rtype in previous module - bad bool // module failed to load and should be ignored + bad bool // module failed to load and should be ignored - next *moduledata + next *moduledata } type _func struct { - entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart - nameOff int32 // function name, as index into moduledata.funcnametab. - - args int32 // in/out args size - deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. - - pcsp uint32 - pcfile uint32 - pcln uint32 - npcdata uint32 - cuOffset uint32 // runtime.cutab offset of this function's CU - funcID uint8 // set for certain special runtime functions - flag uint8 - _ [1]byte // pad - nfuncdata uint8 // - - // The end of the struct is followed immediately by two variable-length - // arrays that reference the pcdata and funcdata locations for this - // function. - - // pcdata contains the offset into moduledata.pctab for the start of - // that index's table. e.g., - // &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of - // the unsafe point table. - // - // An offset of 0 indicates that there is no table. - // - // pcdata [npcdata]uint32 - - // funcdata contains the offset past moduledata.gofunc which contains a - // pointer to that index's funcdata. e.g., - // *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is - // the argument pointer map. - // - // An offset of ^uint32(0) indicates that there is no entry. - // - // funcdata [nfuncdata]uint32 + entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart + nameOff int32 // function name, as index into moduledata.funcnametab. + + args int32 // in/out args size + deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. + + pcsp uint32 + pcfile uint32 + pcln uint32 + npcdata uint32 + cuOffset uint32 // runtime.cutab offset of this function's CU + funcID uint8 // set for certain special runtime functions + flag uint8 + _ [1]byte // pad + nfuncdata uint8 // + + // The end of the struct is followed immediately by two variable-length + // arrays that reference the pcdata and funcdata locations for this + // function. + + // pcdata contains the offset into moduledata.pctab for the start of + // that index's table. e.g., + // &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of + // the unsafe point table. + // + // An offset of 0 indicates that there is no table. + // + // pcdata [npcdata]uint32 + + // funcdata contains the offset past moduledata.gofunc which contains a + // pointer to that index's funcdata. e.g., + // *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is + // the argument pointer map. + // + // An offset of ^uint32(0) indicates that there is no entry. + // + // funcdata [nfuncdata]uint32 } type funcTab struct { - entry uint32 - funcoff uint32 + entry uint32 + funcoff uint32 } type bitVector struct { - n int32 // # of bits - bytedata *uint8 + n int32 // # of bits + bytedata *uint8 } type ptabEntry struct { - name int32 - typ int32 + name int32 + typ int32 } type textSection struct { - vaddr uintptr // prelinked section vaddr - end uintptr // vaddr + section length - baseaddr uintptr // relocated section address + vaddr uintptr // prelinked section vaddr + end uintptr // vaddr + section length + baseaddr uintptr // relocated section address } type modulehash struct { - modulename string - linktimehash string - runtimehash *string + modulename string + linktimehash string + runtimehash *string } // findfuncbucket is an array of these structures. @@ -166,376 +167,384 @@ type modulehash struct { // index to find the target function. // This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead. type findfuncbucket struct { - idx uint32 - _SUBBUCKETS [16]byte + idx uint32 + _SUBBUCKETS [16]byte } -// func name table format: -// nameOff[0] -> namePartA namePartB namePartC \x00 -// nameOff[1] -> namePartA namePartB namePartC \x00 -// ... +// func name table format: +// +// nameOff[0] -> namePartA namePartB namePartC \x00 +// nameOff[1] -> namePartA namePartB namePartC \x00 +// ... func makeFuncnameTab(funcs []Func) (tab []byte, offs []int32) { - offs = make([]int32, len(funcs)) - offset := 0 + offs = make([]int32, len(funcs)) + offset := 0 - for i, f := range funcs { - offs[i] = int32(offset) + for i, f := range funcs { + offs[i] = int32(offset) - a, b, c := funcNameParts(f.Name) - tab = append(tab, a...) - tab = append(tab, b...) - tab = append(tab, c...) - tab = append(tab, 0) - offset += len(a) + len(b) + len(c) + 1 - } + a, b, c := funcNameParts(f.Name) + tab = append(tab, a...) + tab = append(tab, b...) + tab = append(tab, c...) + tab = append(tab, 0) + offset += len(a) + len(b) + len(c) + 1 + } - return + return } type compilationUnit struct { - fileNames []string + fileNames []string } // CU table format: -// cuOffsets[0] -> filetabOffset[0] filetabOffset[1] ... filetabOffset[len(CUs[0].fileNames)-1] -// cuOffsets[1] -> filetabOffset[len(CUs[0].fileNames)] ... filetabOffset[len(CUs[0].fileNames) + len(CUs[1].fileNames)-1] -// ... +// +// cuOffsets[0] -> filetabOffset[0] filetabOffset[1] ... filetabOffset[len(CUs[0].fileNames)-1] +// cuOffsets[1] -> filetabOffset[len(CUs[0].fileNames)] ... filetabOffset[len(CUs[0].fileNames) + len(CUs[1].fileNames)-1] +// ... // // file name table format: -// filetabOffset[0] -> CUs[0].fileNames[0] \x00 -// ... -// filetabOffset[len(CUs[0]-1)] -> CUs[0].fileNames[len(CUs[0].fileNames)-1] \x00 -// ... -// filetabOffset[SUM(CUs,fileNames)-1] -> CUs[len(CU)-1].fileNames[len(CUs[len(CU)-1].fileNames)-1] \x00 +// +// filetabOffset[0] -> CUs[0].fileNames[0] \x00 +// ... +// filetabOffset[len(CUs[0]-1)] -> CUs[0].fileNames[len(CUs[0].fileNames)-1] \x00 +// ... +// filetabOffset[SUM(CUs,fileNames)-1] -> CUs[len(CU)-1].fileNames[len(CUs[len(CU)-1].fileNames)-1] \x00 func makeFilenametab(cus []compilationUnit) (cutab []uint32, filetab []byte, cuOffsets []uint32) { - cuOffsets = make([]uint32, len(cus)) - cuOffset := 0 - fileOffset := 0 + cuOffsets = make([]uint32, len(cus)) + cuOffset := 0 + fileOffset := 0 - for i, cu := range cus { - cuOffsets[i] = uint32(cuOffset) + for i, cu := range cus { + cuOffsets[i] = uint32(cuOffset) - for _, name := range cu.fileNames { - cutab = append(cutab, uint32(fileOffset)) + for _, name := range cu.fileNames { + cutab = append(cutab, uint32(fileOffset)) - fileOffset += len(name) + 1 - filetab = append(filetab, name...) - filetab = append(filetab, 0) - } + fileOffset += len(name) + 1 + filetab = append(filetab, name...) + filetab = append(filetab, 0) + } - cuOffset += len(cu.fileNames) - } + cuOffset += len(cu.fileNames) + } - return + return } func writeFuncdata(out *[]byte, funcs []Func) (fstart int, funcdataOffs [][]uint32) { - fstart = len(*out) - *out = append(*out, byte(0)) - offs := uint32(1) - - funcdataOffs = make([][]uint32, len(funcs)) - for i, f := range funcs { - - var writer = func(fd encoding.BinaryMarshaler) { - var ab []byte - var err error - if fd != nil { - ab, err = fd.MarshalBinary() - if err != nil { - panic(err) - } - funcdataOffs[i] = append(funcdataOffs[i], offs) - } else { - ab = []byte{0} - funcdataOffs[i] = append(funcdataOffs[i], _INVALID_FUNCDATA_OFFSET) - } - *out = append(*out, ab...) - offs += uint32(len(ab)) - } - - writer(f.ArgsPointerMaps) - writer(f.LocalsPointerMaps) - writer(f.StackObjects) - writer(f.InlTree) - writer(f.OpenCodedDeferInfo) - writer(f.ArgInfo) - writer(f.ArgLiveInfo) - writer(f.WrapInfo) - } - return + fstart = len(*out) + *out = append(*out, byte(0)) + offs := uint32(1) + + funcdataOffs = make([][]uint32, len(funcs)) + for i, f := range funcs { + + var writer = func(fd encoding.BinaryMarshaler) { + var ab []byte + var err error + if fd != nil { + ab, err = fd.MarshalBinary() + if err != nil { + panic(err) + } + funcdataOffs[i] = append(funcdataOffs[i], offs) + } else { + ab = []byte{0} + funcdataOffs[i] = append(funcdataOffs[i], _INVALID_FUNCDATA_OFFSET) + } + *out = append(*out, ab...) + offs += uint32(len(ab)) + } + + writer(f.ArgsPointerMaps) + writer(f.LocalsPointerMaps) + writer(f.StackObjects) + writer(f.InlTree) + writer(f.OpenCodedDeferInfo) + writer(f.ArgInfo) + writer(f.ArgLiveInfo) + writer(f.WrapInfo) + } + return } func makeFtab(funcs []_func, lastFuncSize uint32) (ftab []funcTab) { - // Allocate space for the pc->func table. This structure consists of a pc offset - // and an offset to the func structure. After that, we have a single pc - // value that marks the end of the last function in the binary. - var size int64 = int64(len(funcs)*2*4 + 4) - var startLocations = make([]uint32, len(funcs)) - for i, f := range funcs { - size = rnd(size, int64(_PtrSize)) - //writePCToFunc - startLocations[i] = uint32(size) - size += int64(uint8(_FUNC_SIZE)+f.nfuncdata*4+uint8(f.npcdata)*4) - } - - ftab = make([]funcTab, 0, len(funcs)+1) - - // write a map of pc->func info offsets - for i, f := range funcs { - ftab = append(ftab, funcTab{uint32(f.entryOff), uint32(startLocations[i])}) - } - - // Final entry of table is just end pc offset. - lastFunc := funcs[len(funcs)-1] - ftab = append(ftab, funcTab{uint32(lastFunc.entryOff + lastFuncSize), 0}) - - return + // Allocate space for the pc->func table. This structure consists of a pc offset + // and an offset to the func structure. After that, we have a single pc + // value that marks the end of the last function in the binary. + var size int64 = int64(len(funcs)*2*4 + 4) + var startLocations = make([]uint32, len(funcs)) + for i, f := range funcs { + size = rnd(size, int64(_PtrSize)) + //writePCToFunc + startLocations[i] = uint32(size) + size += int64(uint8(_FUNC_SIZE) + f.nfuncdata*4 + uint8(f.npcdata)*4) + } + + ftab = make([]funcTab, 0, len(funcs)+1) + + // write a map of pc->func info offsets + for i, f := range funcs { + ftab = append(ftab, funcTab{uint32(f.entryOff), uint32(startLocations[i])}) + } + + // Final entry of table is just end pc offset. + lastFunc := funcs[len(funcs)-1] + ftab = append(ftab, funcTab{uint32(lastFunc.entryOff + lastFuncSize), 0}) + + return } // Pcln table format: [...]funcTab + [...]_Func func makePclntable(funcs []_func, lastFuncSize uint32, pcdataOffs [][]uint32, funcdataOffs [][]uint32) (pclntab []byte) { - // Allocate space for the pc->func table. This structure consists of a pc offset - // and an offset to the func structure. After that, we have a single pc - // value that marks the end of the last function in the binary. - var size int64 = int64(len(funcs)*2*4 + 4) - var startLocations = make([]uint32, len(funcs)) - for i := range funcs { - size = rnd(size, int64(_PtrSize)) - //writePCToFunc - startLocations[i] = uint32(size) - size += int64(int(_FUNC_SIZE)+len(funcdataOffs[i])*4+len(pcdataOffs[i])*4) - } - - pclntab = make([]byte, size, size) - - // write a map of pc->func info offsets - offs := 0 - for i, f := range funcs { - byteOrder.PutUint32(pclntab[offs:offs+4], uint32(f.entryOff)) - byteOrder.PutUint32(pclntab[offs+4:offs+8], uint32(startLocations[i])) - offs += 8 - } - // Final entry of table is just end pc offset. - lastFunc := funcs[len(funcs)-1] - byteOrder.PutUint32(pclntab[offs:offs+4], uint32(lastFunc.entryOff+lastFuncSize)) - - // write func info table - for i, f := range funcs { - off := startLocations[i] - - // write _func structure to pclntab - fb := rt.BytesFrom(unsafe.Pointer(&f), int(_FUNC_SIZE), int(_FUNC_SIZE)) - copy(pclntab[off:off+uint32(_FUNC_SIZE)], fb) - off += uint32(_FUNC_SIZE) - - // NOTICE: _func.pcdata always starts from PcUnsafePoint, which is index 3 - for j := 3; j < len(pcdataOffs[i]); j++ { - byteOrder.PutUint32(pclntab[off:off+4], uint32(pcdataOffs[i][j])) - off += 4 - } - - // funcdata refs as offsets from gofunc - for _, funcdata := range funcdataOffs[i] { - byteOrder.PutUint32(pclntab[off:off+4], uint32(funcdata)) - off += 4 - } - - } - - return + // Allocate space for the pc->func table. This structure consists of a pc offset + // and an offset to the func structure. After that, we have a single pc + // value that marks the end of the last function in the binary. + var size int64 = int64(len(funcs)*2*4 + 4) + var startLocations = make([]uint32, len(funcs)) + for i := range funcs { + size = rnd(size, int64(_PtrSize)) + //writePCToFunc + startLocations[i] = uint32(size) + size += int64(int(_FUNC_SIZE) + len(funcdataOffs[i])*4 + len(pcdataOffs[i])*4) + } + + pclntab = make([]byte, size, size) + + // write a map of pc->func info offsets + offs := 0 + for i, f := range funcs { + byteOrder.PutUint32(pclntab[offs:offs+4], uint32(f.entryOff)) + byteOrder.PutUint32(pclntab[offs+4:offs+8], uint32(startLocations[i])) + offs += 8 + } + // Final entry of table is just end pc offset. + lastFunc := funcs[len(funcs)-1] + byteOrder.PutUint32(pclntab[offs:offs+4], uint32(lastFunc.entryOff+lastFuncSize)) + + // write func info table + for i, f := range funcs { + off := startLocations[i] + + // write _func structure to pclntab + fb := rt.BytesFrom(unsafe.Pointer(&f), int(_FUNC_SIZE), int(_FUNC_SIZE)) + copy(pclntab[off:off+uint32(_FUNC_SIZE)], fb) + off += uint32(_FUNC_SIZE) + + // NOTICE: _func.pcdata always starts from PcUnsafePoint, which is index 3 + for j := 3; j < len(pcdataOffs[i]); j++ { + byteOrder.PutUint32(pclntab[off:off+4], uint32(pcdataOffs[i][j])) + off += 4 + } + + // funcdata refs as offsets from gofunc + for _, funcdata := range funcdataOffs[i] { + byteOrder.PutUint32(pclntab[off:off+4], uint32(funcdata)) + off += 4 + } + + } + + return } -// findfunc table used to map pc to belonging func, +// findfunc table used to map pc to belonging func, // returns the index in the func table. // // All text section are divided into buckets sized _BUCKETSIZE(4K): -// every bucket is divided into _SUBBUCKETS sized _SUB_BUCKETSIZE(64), -// and it has a base idx to plus the offset stored in jth subbucket. +// +// every bucket is divided into _SUBBUCKETS sized _SUB_BUCKETSIZE(64), +// and it has a base idx to plus the offset stored in jth subbucket. +// // see findfunc() in runtime/symtab.go func writeFindfunctab(out *[]byte, ftab []funcTab) (start int) { - start = len(*out) - - max := ftab[len(ftab)-1].entry - min := ftab[0].entry - nbuckets := (max - min + _BUCKETSIZE - 1) / _BUCKETSIZE - n := (max - min + _SUB_BUCKETSIZE - 1) / _SUB_BUCKETSIZE - - tab := make([]findfuncbucket, 0, nbuckets) - var s, e = 0, 0 - for i := 0; i<int(nbuckets); i++ { - var pc = min + uint32((i+1)*_BUCKETSIZE) - // find the end func of the bucket - for ; e < len(ftab)-1 && ftab[e+1].entry <= pc; e++ {} - // store the start func of the bucket - var fb = findfuncbucket{idx: uint32(s)} - - for j := 0; j<_SUBBUCKETS && (i*_SUBBUCKETS+j)<int(n); j++ { - pc = min + uint32(i*_BUCKETSIZE) + uint32((j+1)*_SUB_BUCKETSIZE) - var ss = s - // find the end func of the subbucket - for ; ss < len(ftab)-1 && ftab[ss+1].entry <= pc; ss++ {} - // store the start func of the subbucket - fb._SUBBUCKETS[j] = byte(uint32(s) - fb.idx) - s = ss - } - s = e - tab = append(tab, fb) - } - - // write findfuncbucket - if len(tab) > 0 { - size := int(unsafe.Sizeof(findfuncbucket{}))*len(tab) - *out = append(*out, rt.BytesFrom(unsafe.Pointer(&tab[0]), size, size)...) - } - return + start = len(*out) + + max := ftab[len(ftab)-1].entry + min := ftab[0].entry + nbuckets := (max - min + _BUCKETSIZE - 1) / _BUCKETSIZE + n := (max - min + _SUB_BUCKETSIZE - 1) / _SUB_BUCKETSIZE + + tab := make([]findfuncbucket, 0, nbuckets) + var s, e = 0, 0 + for i := 0; i < int(nbuckets); i++ { + var pc = min + uint32((i+1)*_BUCKETSIZE) + // find the end func of the bucket + for ; e < len(ftab)-1 && ftab[e+1].entry <= pc; e++ { + } + // store the start func of the bucket + var fb = findfuncbucket{idx: uint32(s)} + + for j := 0; j < _SUBBUCKETS && (i*_SUBBUCKETS+j) < int(n); j++ { + pc = min + uint32(i*_BUCKETSIZE) + uint32((j+1)*_SUB_BUCKETSIZE) + var ss = s + // find the end func of the subbucket + for ; ss < len(ftab)-1 && ftab[ss+1].entry <= pc; ss++ { + } + // store the start func of the subbucket + fb._SUBBUCKETS[j] = byte(uint32(s) - fb.idx) + s = ss + } + s = e + tab = append(tab, fb) + } + + // write findfuncbucket + if len(tab) > 0 { + size := int(unsafe.Sizeof(findfuncbucket{})) * len(tab) + *out = append(*out, rt.BytesFrom(unsafe.Pointer(&tab[0]), size, size)...) + } + return } func makeModuledata(name string, filenames []string, funcs []Func, text []byte) (mod *moduledata) { - mod = new(moduledata) - mod.modulename = name - - // make filename table - cu := make([]string, 0, len(filenames)) - for _, f := range filenames { - cu = append(cu, f) - } - cutab, filetab, cuOffs := makeFilenametab([]compilationUnit{{cu}}) - mod.cutab = cutab - mod.filetab = filetab - - // make funcname table - funcnametab, nameOffs := makeFuncnameTab(funcs) - mod.funcnametab = funcnametab - - // make pcdata table - // NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata - pctab, pcdataOffs, _funcs := makePctab(funcs, cuOffs, nameOffs) - mod.pctab = pctab - - // write func data - // NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata - // TODO: estimate accurate capacity - cache := make([]byte, 0, len(funcs)*int(_PtrSize)) - fstart, funcdataOffs := writeFuncdata(&cache, funcs) - - // make pc->func (binary search) func table - lastFuncsize := funcs[len(funcs)-1].TextSize - ftab := makeFtab(_funcs, lastFuncsize) - mod.ftab = ftab - - // write pc->func (modmap) findfunc table - ffstart := writeFindfunctab(&cache, ftab) - - // make pclnt table - pclntab := makePclntable(_funcs, lastFuncsize, pcdataOffs, funcdataOffs) - mod.pclntable = pclntab - - // mmap() text and funcdata segements - p := os.Getpagesize() - size := int(rnd(int64(len(text)), int64(p))) - addr := mmap(size) - // copy the machine code - s := rt.BytesFrom(unsafe.Pointer(addr), len(text), size) - copy(s, text) - // make it executable - mprotect(addr, size) - - // assign addresses - mod.text = addr - mod.etext = addr + uintptr(size) - mod.minpc = addr - mod.maxpc = addr + uintptr(len(text)) - - // cache funcdata and findfuncbucket - moduleCache.Lock() - moduleCache.m[mod] = cache - moduleCache.Unlock() - mod.gofunc = uintptr(unsafe.Pointer(&cache[fstart])) - mod.findfunctab = uintptr(unsafe.Pointer(&cache[ffstart])) - - // make pc header - mod.pcHeader = &pcHeader { - magic : _Magic, - minLC : _MinLC, - ptrSize : _PtrSize, - nfunc : len(funcs), - nfiles: uint(len(cu)), - textStart: mod.text, - funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"), - cuOffset: getOffsetOf(moduledata{}, "cutab"), - filetabOffset: getOffsetOf(moduledata{}, "filetab"), - pctabOffset: getOffsetOf(moduledata{}, "pctab"), - pclnOffset: getOffsetOf(moduledata{}, "pclntable"), - } - - // sepecial case: gcdata and gcbss must by non-empty - mod.gcdata = uintptr(unsafe.Pointer(&emptyByte)) - mod.gcbss = uintptr(unsafe.Pointer(&emptyByte)) - - return + mod = new(moduledata) + mod.modulename = name + + // make filename table + cu := make([]string, 0, len(filenames)) + for _, f := range filenames { + cu = append(cu, f) + } + cutab, filetab, cuOffs := makeFilenametab([]compilationUnit{{cu}}) + mod.cutab = cutab + mod.filetab = filetab + + // make funcname table + funcnametab, nameOffs := makeFuncnameTab(funcs) + mod.funcnametab = funcnametab + + // make pcdata table + // NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata + pctab, pcdataOffs, _funcs := makePctab(funcs, cuOffs, nameOffs) + mod.pctab = pctab + + // write func data + // NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata + // TODO: estimate accurate capacity + cache := make([]byte, 0, len(funcs)*int(_PtrSize)) + fstart, funcdataOffs := writeFuncdata(&cache, funcs) + + // make pc->func (binary search) func table + lastFuncsize := funcs[len(funcs)-1].TextSize + ftab := makeFtab(_funcs, lastFuncsize) + mod.ftab = ftab + + // write pc->func (modmap) findfunc table + ffstart := writeFindfunctab(&cache, ftab) + + // make pclnt table + pclntab := makePclntable(_funcs, lastFuncsize, pcdataOffs, funcdataOffs) + mod.pclntable = pclntab + + // mmap() text and funcdata segements + p := os.Getpagesize() + size := int(rnd(int64(len(text)), int64(p))) + addr := mmap(size) + // copy the machine code + s := rt.BytesFrom(unsafe.Pointer(addr), len(text), size) + copy(s, text) + // make it executable + mprotect(addr, size) + + // assign addresses + mod.text = addr + mod.etext = addr + uintptr(size) + mod.minpc = addr + mod.maxpc = addr + uintptr(len(text)) + + // cache funcdata and findfuncbucket + moduleCache.Lock() + moduleCache.m[mod] = cache + moduleCache.Unlock() + mod.gofunc = uintptr(unsafe.Pointer(&cache[fstart])) + mod.findfunctab = uintptr(unsafe.Pointer(&cache[ffstart])) + + // make pc header + mod.pcHeader = &pcHeader{ + magic: _Magic, + minLC: _MinLC, + ptrSize: _PtrSize, + nfunc: len(funcs), + nfiles: uint(len(cu)), + textStart: mod.text, + funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"), + cuOffset: getOffsetOf(moduledata{}, "cutab"), + filetabOffset: getOffsetOf(moduledata{}, "filetab"), + pctabOffset: getOffsetOf(moduledata{}, "pctab"), + pclnOffset: getOffsetOf(moduledata{}, "pclntable"), + } + + // sepecial case: gcdata and gcbss must by non-empty + mod.gcdata = uintptr(unsafe.Pointer(&emptyByte)) + mod.gcbss = uintptr(unsafe.Pointer(&emptyByte)) + + return } // makePctab generates pcdelta->valuedelta tables for functions, // and returns the table and the entry offset of every kind pcdata in the table. func makePctab(funcs []Func, cuOffset []uint32, nameOffset []int32) (pctab []byte, pcdataOffs [][]uint32, _funcs []_func) { - _funcs = make([]_func, len(funcs)) - - // Pctab offsets of 0 are considered invalid in the runtime. We respect - // that by just padding a single byte at the beginning of runtime.pctab, - // that way no real offsets can be zero. - pctab = make([]byte, 1, 12*len(funcs)+1) - pcdataOffs = make([][]uint32, len(funcs)) - - for i, f := range funcs { - _f := &_funcs[i] - - var writer = func(pc *Pcdata) { - var ab []byte - var err error - if pc != nil { - ab, err = pc.MarshalBinary() - if err != nil { - panic(err) - } - pcdataOffs[i] = append(pcdataOffs[i], uint32(len(pctab))) - } else { - ab = []byte{0} - pcdataOffs[i] = append(pcdataOffs[i], _PCDATA_INVALID_OFFSET) - } - pctab = append(pctab, ab...) - } - - if f.Pcsp != nil { - _f.pcsp = uint32(len(pctab)) - } - writer(f.Pcsp) - if f.Pcfile != nil { - _f.pcfile = uint32(len(pctab)) - } - writer(f.Pcfile) - if f.Pcline != nil { - _f.pcln = uint32(len(pctab)) - } - writer(f.Pcline) - writer(f.PcUnsafePoint) - writer(f.PcStackMapIndex) - writer(f.PcInlTreeIndex) - writer(f.PcArgLiveIndex) - - _f.entryOff = f.EntryOff - _f.nameOff = nameOffset[i] - _f.args = f.ArgsSize - _f.deferreturn = f.DeferReturn - // NOTICE: _func.pcdata is always as [PCDATA_UnsafePoint(0) : PCDATA_ArgLiveIndex(3)] - _f.npcdata = uint32(_N_PCDATA) - _f.cuOffset = cuOffset[i] - _f.funcID = f.ID - _f.flag = f.Flag - _f.nfuncdata = uint8(_N_FUNCDATA) - } - - return + _funcs = make([]_func, len(funcs)) + + // Pctab offsets of 0 are considered invalid in the runtime. We respect + // that by just padding a single byte at the beginning of runtime.pctab, + // that way no real offsets can be zero. + pctab = make([]byte, 1, 12*len(funcs)+1) + pcdataOffs = make([][]uint32, len(funcs)) + + for i, f := range funcs { + _f := &_funcs[i] + + var writer = func(pc *Pcdata) { + var ab []byte + var err error + if pc != nil { + ab, err = pc.MarshalBinary() + if err != nil { + panic(err) + } + pcdataOffs[i] = append(pcdataOffs[i], uint32(len(pctab))) + } else { + ab = []byte{0} + pcdataOffs[i] = append(pcdataOffs[i], _PCDATA_INVALID_OFFSET) + } + pctab = append(pctab, ab...) + } + + if f.Pcsp != nil { + _f.pcsp = uint32(len(pctab)) + } + writer(f.Pcsp) + if f.Pcfile != nil { + _f.pcfile = uint32(len(pctab)) + } + writer(f.Pcfile) + if f.Pcline != nil { + _f.pcln = uint32(len(pctab)) + } + writer(f.Pcline) + writer(f.PcUnsafePoint) + writer(f.PcStackMapIndex) + writer(f.PcInlTreeIndex) + writer(f.PcArgLiveIndex) + + _f.entryOff = f.EntryOff + _f.nameOff = nameOffset[i] + _f.args = f.ArgsSize + _f.deferreturn = f.DeferReturn + // NOTICE: _func.pcdata is always as [PCDATA_UnsafePoint(0) : PCDATA_ArgLiveIndex(3)] + _f.npcdata = uint32(_N_PCDATA) + _f.cuOffset = cuOffset[i] + _f.funcID = f.ID + _f.flag = f.Flag + _f.nfuncdata = uint8(_N_FUNCDATA) + } + + return } -func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argptrs uintptr, localptrs uintptr) {} \ No newline at end of file +func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argptrs uintptr, localptrs uintptr) { +} diff --git a/vendor/github.com/bytedance/sonic/loader/funcdata_go120.go b/vendor/github.com/bytedance/sonic/loader/funcdata_go120.go index 906fe375d..6436a25d5 100644 --- a/vendor/github.com/bytedance/sonic/loader/funcdata_go120.go +++ b/vendor/github.com/bytedance/sonic/loader/funcdata_go120.go @@ -20,145 +20,145 @@ package loader import ( - `encoding` - `os` - `unsafe` + "encoding" + "os" + "unsafe" - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) const ( - _Magic uint32 = 0xFFFFFFF1 + _Magic uint32 = 0xFFFFFFF1 ) type moduledata struct { - pcHeader *pcHeader - funcnametab []byte - cutab []uint32 - filetab []byte - pctab []byte - pclntable []byte - ftab []funcTab - findfunctab uintptr - minpc, maxpc uintptr // first func address, last func address + last func size + pcHeader *pcHeader + funcnametab []byte + cutab []uint32 + filetab []byte + pctab []byte + pclntable []byte + ftab []funcTab + findfunctab uintptr + minpc, maxpc uintptr // first func address, last func address + last func size - text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC - noptrdata, enoptrdata uintptr - data, edata uintptr - bss, ebss uintptr - noptrbss, enoptrbss uintptr - covctrs, ecovctrs uintptr - end, gcdata, gcbss uintptr - types, etypes uintptr - rodata uintptr + text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC + noptrdata, enoptrdata uintptr + data, edata uintptr + bss, ebss uintptr + noptrbss, enoptrbss uintptr + covctrs, ecovctrs uintptr + end, gcdata, gcbss uintptr + types, etypes uintptr + rodata uintptr - // TODO: generate funcinfo object to memory - gofunc uintptr // go.func.* is actual funcinfo object in image + // TODO: generate funcinfo object to memory + gofunc uintptr // go.func.* is actual funcinfo object in image - textsectmap []textSection // see runtime/symtab.go: textAddr() - typelinks []int32 // offsets from types - itablinks []*rt.GoItab + textsectmap []textSection // see runtime/symtab.go: textAddr() + typelinks []int32 // offsets from types + itablinks []*rt.GoItab - ptab []ptabEntry + ptab []ptabEntry - pluginpath string - pkghashes []modulehash + pluginpath string + pkghashes []modulehash - modulename string - modulehashes []modulehash + modulename string + modulehashes []modulehash - hasmain uint8 // 1 if module contains the main function, 0 otherwise + hasmain uint8 // 1 if module contains the main function, 0 otherwise - gcdatamask, gcbssmask bitVector + gcdatamask, gcbssmask bitVector - typemap map[int32]*rt.GoType // offset to *_rtype in previous module + typemap map[int32]*rt.GoType // offset to *_rtype in previous module - bad bool // module failed to load and should be ignored + bad bool // module failed to load and should be ignored - next *moduledata + next *moduledata } type _func struct { - entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart - nameOff int32 // function name, as index into moduledata.funcnametab. - - args int32 // in/out args size - deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. - - pcsp uint32 - pcfile uint32 - pcln uint32 - npcdata uint32 - cuOffset uint32 // runtime.cutab offset of this function's CU - startLine int32 // line number of start of function (func keyword/TEXT directive) - funcID uint8 // set for certain special runtime functions - flag uint8 - _ [1]byte // pad - nfuncdata uint8 // - - // The end of the struct is followed immediately by two variable-length - // arrays that reference the pcdata and funcdata locations for this - // function. - - // pcdata contains the offset into moduledata.pctab for the start of - // that index's table. e.g., - // &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of - // the unsafe point table. - // - // An offset of 0 indicates that there is no table. - // - // pcdata [npcdata]uint32 - - // funcdata contains the offset past moduledata.gofunc which contains a - // pointer to that index's funcdata. e.g., - // *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is - // the argument pointer map. - // - // An offset of ^uint32(0) indicates that there is no entry. - // - // funcdata [nfuncdata]uint32 + entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart + nameOff int32 // function name, as index into moduledata.funcnametab. + + args int32 // in/out args size + deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. + + pcsp uint32 + pcfile uint32 + pcln uint32 + npcdata uint32 + cuOffset uint32 // runtime.cutab offset of this function's CU + startLine int32 // line number of start of function (func keyword/TEXT directive) + funcID uint8 // set for certain special runtime functions + flag uint8 + _ [1]byte // pad + nfuncdata uint8 // + + // The end of the struct is followed immediately by two variable-length + // arrays that reference the pcdata and funcdata locations for this + // function. + + // pcdata contains the offset into moduledata.pctab for the start of + // that index's table. e.g., + // &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of + // the unsafe point table. + // + // An offset of 0 indicates that there is no table. + // + // pcdata [npcdata]uint32 + + // funcdata contains the offset past moduledata.gofunc which contains a + // pointer to that index's funcdata. e.g., + // *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is + // the argument pointer map. + // + // An offset of ^uint32(0) indicates that there is no entry. + // + // funcdata [nfuncdata]uint32 } type funcTab struct { - entry uint32 - funcoff uint32 + entry uint32 + funcoff uint32 } type pcHeader struct { - magic uint32 // 0xFFFFFFF0 - pad1, pad2 uint8 // 0,0 - minLC uint8 // min instruction size - ptrSize uint8 // size of a ptr in bytes - nfunc int // number of functions in the module - nfiles uint // number of entries in the file tab - textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text - funcnameOffset uintptr // offset to the funcnametab variable from pcHeader - cuOffset uintptr // offset to the cutab variable from pcHeader - filetabOffset uintptr // offset to the filetab variable from pcHeader - pctabOffset uintptr // offset to the pctab variable from pcHeader - pclnOffset uintptr // offset to the pclntab variable from pcHeader + magic uint32 // 0xFFFFFFF0 + pad1, pad2 uint8 // 0,0 + minLC uint8 // min instruction size + ptrSize uint8 // size of a ptr in bytes + nfunc int // number of functions in the module + nfiles uint // number of entries in the file tab + textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text + funcnameOffset uintptr // offset to the funcnametab variable from pcHeader + cuOffset uintptr // offset to the cutab variable from pcHeader + filetabOffset uintptr // offset to the filetab variable from pcHeader + pctabOffset uintptr // offset to the pctab variable from pcHeader + pclnOffset uintptr // offset to the pclntab variable from pcHeader } type bitVector struct { - n int32 // # of bits - bytedata *uint8 + n int32 // # of bits + bytedata *uint8 } type ptabEntry struct { - name int32 - typ int32 + name int32 + typ int32 } type textSection struct { - vaddr uintptr // prelinked section vaddr - end uintptr // vaddr + section length - baseaddr uintptr // relocated section address + vaddr uintptr // prelinked section vaddr + end uintptr // vaddr + section length + baseaddr uintptr // relocated section address } type modulehash struct { - modulename string - linktimehash string - runtimehash *string + modulename string + linktimehash string + runtimehash *string } // findfuncbucket is an array of these structures. @@ -170,376 +170,384 @@ type modulehash struct { // index to find the target function. // This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead. type findfuncbucket struct { - idx uint32 - _SUBBUCKETS [16]byte + idx uint32 + _SUBBUCKETS [16]byte } -// func name table format: -// nameOff[0] -> namePartA namePartB namePartC \x00 -// nameOff[1] -> namePartA namePartB namePartC \x00 -// ... +// func name table format: +// +// nameOff[0] -> namePartA namePartB namePartC \x00 +// nameOff[1] -> namePartA namePartB namePartC \x00 +// ... func makeFuncnameTab(funcs []Func) (tab []byte, offs []int32) { - offs = make([]int32, len(funcs)) - offset := 0 + offs = make([]int32, len(funcs)) + offset := 0 - for i, f := range funcs { - offs[i] = int32(offset) + for i, f := range funcs { + offs[i] = int32(offset) - a, b, c := funcNameParts(f.Name) - tab = append(tab, a...) - tab = append(tab, b...) - tab = append(tab, c...) - tab = append(tab, 0) - offset += len(a) + len(b) + len(c) + 1 - } + a, b, c := funcNameParts(f.Name) + tab = append(tab, a...) + tab = append(tab, b...) + tab = append(tab, c...) + tab = append(tab, 0) + offset += len(a) + len(b) + len(c) + 1 + } - return + return } type compilationUnit struct { - fileNames []string + fileNames []string } // CU table format: -// cuOffsets[0] -> filetabOffset[0] filetabOffset[1] ... filetabOffset[len(CUs[0].fileNames)-1] -// cuOffsets[1] -> filetabOffset[len(CUs[0].fileNames)] ... filetabOffset[len(CUs[0].fileNames) + len(CUs[1].fileNames)-1] -// ... +// +// cuOffsets[0] -> filetabOffset[0] filetabOffset[1] ... filetabOffset[len(CUs[0].fileNames)-1] +// cuOffsets[1] -> filetabOffset[len(CUs[0].fileNames)] ... filetabOffset[len(CUs[0].fileNames) + len(CUs[1].fileNames)-1] +// ... // // file name table format: -// filetabOffset[0] -> CUs[0].fileNames[0] \x00 -// ... -// filetabOffset[len(CUs[0]-1)] -> CUs[0].fileNames[len(CUs[0].fileNames)-1] \x00 -// ... -// filetabOffset[SUM(CUs,fileNames)-1] -> CUs[len(CU)-1].fileNames[len(CUs[len(CU)-1].fileNames)-1] \x00 +// +// filetabOffset[0] -> CUs[0].fileNames[0] \x00 +// ... +// filetabOffset[len(CUs[0]-1)] -> CUs[0].fileNames[len(CUs[0].fileNames)-1] \x00 +// ... +// filetabOffset[SUM(CUs,fileNames)-1] -> CUs[len(CU)-1].fileNames[len(CUs[len(CU)-1].fileNames)-1] \x00 func makeFilenametab(cus []compilationUnit) (cutab []uint32, filetab []byte, cuOffsets []uint32) { - cuOffsets = make([]uint32, len(cus)) - cuOffset := 0 - fileOffset := 0 + cuOffsets = make([]uint32, len(cus)) + cuOffset := 0 + fileOffset := 0 - for i, cu := range cus { - cuOffsets[i] = uint32(cuOffset) + for i, cu := range cus { + cuOffsets[i] = uint32(cuOffset) - for _, name := range cu.fileNames { - cutab = append(cutab, uint32(fileOffset)) + for _, name := range cu.fileNames { + cutab = append(cutab, uint32(fileOffset)) - fileOffset += len(name) + 1 - filetab = append(filetab, name...) - filetab = append(filetab, 0) - } + fileOffset += len(name) + 1 + filetab = append(filetab, name...) + filetab = append(filetab, 0) + } - cuOffset += len(cu.fileNames) - } + cuOffset += len(cu.fileNames) + } - return + return } func writeFuncdata(out *[]byte, funcs []Func) (fstart int, funcdataOffs [][]uint32) { - fstart = len(*out) - *out = append(*out, byte(0)) - offs := uint32(1) - - funcdataOffs = make([][]uint32, len(funcs)) - for i, f := range funcs { - - var writer = func(fd encoding.BinaryMarshaler) { - var ab []byte - var err error - if fd != nil { - ab, err = fd.MarshalBinary() - if err != nil { - panic(err) - } - funcdataOffs[i] = append(funcdataOffs[i], offs) - } else { - ab = []byte{0} - funcdataOffs[i] = append(funcdataOffs[i], _INVALID_FUNCDATA_OFFSET) - } - *out = append(*out, ab...) - offs += uint32(len(ab)) - } - - writer(f.ArgsPointerMaps) - writer(f.LocalsPointerMaps) - writer(f.StackObjects) - writer(f.InlTree) - writer(f.OpenCodedDeferInfo) - writer(f.ArgInfo) - writer(f.ArgLiveInfo) - writer(f.WrapInfo) - } - return + fstart = len(*out) + *out = append(*out, byte(0)) + offs := uint32(1) + + funcdataOffs = make([][]uint32, len(funcs)) + for i, f := range funcs { + + var writer = func(fd encoding.BinaryMarshaler) { + var ab []byte + var err error + if fd != nil { + ab, err = fd.MarshalBinary() + if err != nil { + panic(err) + } + funcdataOffs[i] = append(funcdataOffs[i], offs) + } else { + ab = []byte{0} + funcdataOffs[i] = append(funcdataOffs[i], _INVALID_FUNCDATA_OFFSET) + } + *out = append(*out, ab...) + offs += uint32(len(ab)) + } + + writer(f.ArgsPointerMaps) + writer(f.LocalsPointerMaps) + writer(f.StackObjects) + writer(f.InlTree) + writer(f.OpenCodedDeferInfo) + writer(f.ArgInfo) + writer(f.ArgLiveInfo) + writer(f.WrapInfo) + } + return } func makeFtab(funcs []_func, lastFuncSize uint32) (ftab []funcTab) { - // Allocate space for the pc->func table. This structure consists of a pc offset - // and an offset to the func structure. After that, we have a single pc - // value that marks the end of the last function in the binary. - var size int64 = int64(len(funcs)*2*4 + 4) - var startLocations = make([]uint32, len(funcs)) - for i, f := range funcs { - size = rnd(size, int64(_PtrSize)) - //writePCToFunc - startLocations[i] = uint32(size) - size += int64(uint8(_FUNC_SIZE)+f.nfuncdata*4+uint8(f.npcdata)*4) - } - - ftab = make([]funcTab, 0, len(funcs)+1) - - // write a map of pc->func info offsets - for i, f := range funcs { - ftab = append(ftab, funcTab{uint32(f.entryOff), uint32(startLocations[i])}) - } - - // Final entry of table is just end pc offset. - lastFunc := funcs[len(funcs)-1] - ftab = append(ftab, funcTab{uint32(lastFunc.entryOff + lastFuncSize), 0}) - - return + // Allocate space for the pc->func table. This structure consists of a pc offset + // and an offset to the func structure. After that, we have a single pc + // value that marks the end of the last function in the binary. + var size int64 = int64(len(funcs)*2*4 + 4) + var startLocations = make([]uint32, len(funcs)) + for i, f := range funcs { + size = rnd(size, int64(_PtrSize)) + //writePCToFunc + startLocations[i] = uint32(size) + size += int64(uint8(_FUNC_SIZE) + f.nfuncdata*4 + uint8(f.npcdata)*4) + } + + ftab = make([]funcTab, 0, len(funcs)+1) + + // write a map of pc->func info offsets + for i, f := range funcs { + ftab = append(ftab, funcTab{uint32(f.entryOff), uint32(startLocations[i])}) + } + + // Final entry of table is just end pc offset. + lastFunc := funcs[len(funcs)-1] + ftab = append(ftab, funcTab{uint32(lastFunc.entryOff + lastFuncSize), 0}) + + return } // Pcln table format: [...]funcTab + [...]_Func func makePclntable(funcs []_func, lastFuncSize uint32, pcdataOffs [][]uint32, funcdataOffs [][]uint32) (pclntab []byte) { - // Allocate space for the pc->func table. This structure consists of a pc offset - // and an offset to the func structure. After that, we have a single pc - // value that marks the end of the last function in the binary. - var size int64 = int64(len(funcs)*2*4 + 4) - var startLocations = make([]uint32, len(funcs)) - for i := range funcs { - size = rnd(size, int64(_PtrSize)) - //writePCToFunc - startLocations[i] = uint32(size) - size += int64(int(_FUNC_SIZE)+len(funcdataOffs[i])*4+len(pcdataOffs[i])*4) - } - - pclntab = make([]byte, size, size) - - // write a map of pc->func info offsets - offs := 0 - for i, f := range funcs { - byteOrder.PutUint32(pclntab[offs:offs+4], uint32(f.entryOff)) - byteOrder.PutUint32(pclntab[offs+4:offs+8], uint32(startLocations[i])) - offs += 8 - } - // Final entry of table is just end pc offset. - lastFunc := funcs[len(funcs)-1] - byteOrder.PutUint32(pclntab[offs:offs+4], uint32(lastFunc.entryOff+lastFuncSize)) - - // write func info table - for i, f := range funcs { - off := startLocations[i] - - // write _func structure to pclntab - fb := rt.BytesFrom(unsafe.Pointer(&f), int(_FUNC_SIZE), int(_FUNC_SIZE)) - copy(pclntab[off:off+uint32(_FUNC_SIZE)], fb) - off += uint32(_FUNC_SIZE) - - // NOTICE: _func.pcdata always starts from PcUnsafePoint, which is index 3 - for j := 3; j < len(pcdataOffs[i]); j++ { - byteOrder.PutUint32(pclntab[off:off+4], uint32(pcdataOffs[i][j])) - off += 4 - } - - // funcdata refs as offsets from gofunc - for _, funcdata := range funcdataOffs[i] { - byteOrder.PutUint32(pclntab[off:off+4], uint32(funcdata)) - off += 4 - } - - } - - return + // Allocate space for the pc->func table. This structure consists of a pc offset + // and an offset to the func structure. After that, we have a single pc + // value that marks the end of the last function in the binary. + var size int64 = int64(len(funcs)*2*4 + 4) + var startLocations = make([]uint32, len(funcs)) + for i := range funcs { + size = rnd(size, int64(_PtrSize)) + //writePCToFunc + startLocations[i] = uint32(size) + size += int64(int(_FUNC_SIZE) + len(funcdataOffs[i])*4 + len(pcdataOffs[i])*4) + } + + pclntab = make([]byte, size, size) + + // write a map of pc->func info offsets + offs := 0 + for i, f := range funcs { + byteOrder.PutUint32(pclntab[offs:offs+4], uint32(f.entryOff)) + byteOrder.PutUint32(pclntab[offs+4:offs+8], uint32(startLocations[i])) + offs += 8 + } + // Final entry of table is just end pc offset. + lastFunc := funcs[len(funcs)-1] + byteOrder.PutUint32(pclntab[offs:offs+4], uint32(lastFunc.entryOff+lastFuncSize)) + + // write func info table + for i, f := range funcs { + off := startLocations[i] + + // write _func structure to pclntab + fb := rt.BytesFrom(unsafe.Pointer(&f), int(_FUNC_SIZE), int(_FUNC_SIZE)) + copy(pclntab[off:off+uint32(_FUNC_SIZE)], fb) + off += uint32(_FUNC_SIZE) + + // NOTICE: _func.pcdata always starts from PcUnsafePoint, which is index 3 + for j := 3; j < len(pcdataOffs[i]); j++ { + byteOrder.PutUint32(pclntab[off:off+4], uint32(pcdataOffs[i][j])) + off += 4 + } + + // funcdata refs as offsets from gofunc + for _, funcdata := range funcdataOffs[i] { + byteOrder.PutUint32(pclntab[off:off+4], uint32(funcdata)) + off += 4 + } + + } + + return } -// findfunc table used to map pc to belonging func, +// findfunc table used to map pc to belonging func, // returns the index in the func table. // // All text section are divided into buckets sized _BUCKETSIZE(4K): -// every bucket is divided into _SUBBUCKETS sized _SUB_BUCKETSIZE(64), -// and it has a base idx to plus the offset stored in jth subbucket. +// +// every bucket is divided into _SUBBUCKETS sized _SUB_BUCKETSIZE(64), +// and it has a base idx to plus the offset stored in jth subbucket. +// // see findfunc() in runtime/symtab.go func writeFindfunctab(out *[]byte, ftab []funcTab) (start int) { - start = len(*out) - - max := ftab[len(ftab)-1].entry - min := ftab[0].entry - nbuckets := (max - min + _BUCKETSIZE - 1) / _BUCKETSIZE - n := (max - min + _SUB_BUCKETSIZE - 1) / _SUB_BUCKETSIZE - - tab := make([]findfuncbucket, 0, nbuckets) - var s, e = 0, 0 - for i := 0; i<int(nbuckets); i++ { - var pc = min + uint32((i+1)*_BUCKETSIZE) - // find the end func of the bucket - for ; e < len(ftab)-1 && ftab[e+1].entry <= pc; e++ {} - // store the start func of the bucket - var fb = findfuncbucket{idx: uint32(s)} - - for j := 0; j<_SUBBUCKETS && (i*_SUBBUCKETS+j)<int(n); j++ { - pc = min + uint32(i*_BUCKETSIZE) + uint32((j+1)*_SUB_BUCKETSIZE) - var ss = s - // find the end func of the subbucket - for ; ss < len(ftab)-1 && ftab[ss+1].entry <= pc; ss++ {} - // store the start func of the subbucket - fb._SUBBUCKETS[j] = byte(uint32(s) - fb.idx) - s = ss - } - s = e - tab = append(tab, fb) - } - - // write findfuncbucket - if len(tab) > 0 { - size := int(unsafe.Sizeof(findfuncbucket{}))*len(tab) - *out = append(*out, rt.BytesFrom(unsafe.Pointer(&tab[0]), size, size)...) - } - return + start = len(*out) + + max := ftab[len(ftab)-1].entry + min := ftab[0].entry + nbuckets := (max - min + _BUCKETSIZE - 1) / _BUCKETSIZE + n := (max - min + _SUB_BUCKETSIZE - 1) / _SUB_BUCKETSIZE + + tab := make([]findfuncbucket, 0, nbuckets) + var s, e = 0, 0 + for i := 0; i < int(nbuckets); i++ { + var pc = min + uint32((i+1)*_BUCKETSIZE) + // find the end func of the bucket + for ; e < len(ftab)-1 && ftab[e+1].entry <= pc; e++ { + } + // store the start func of the bucket + var fb = findfuncbucket{idx: uint32(s)} + + for j := 0; j < _SUBBUCKETS && (i*_SUBBUCKETS+j) < int(n); j++ { + pc = min + uint32(i*_BUCKETSIZE) + uint32((j+1)*_SUB_BUCKETSIZE) + var ss = s + // find the end func of the subbucket + for ; ss < len(ftab)-1 && ftab[ss+1].entry <= pc; ss++ { + } + // store the start func of the subbucket + fb._SUBBUCKETS[j] = byte(uint32(s) - fb.idx) + s = ss + } + s = e + tab = append(tab, fb) + } + + // write findfuncbucket + if len(tab) > 0 { + size := int(unsafe.Sizeof(findfuncbucket{})) * len(tab) + *out = append(*out, rt.BytesFrom(unsafe.Pointer(&tab[0]), size, size)...) + } + return } func makeModuledata(name string, filenames []string, funcs []Func, text []byte) (mod *moduledata) { - mod = new(moduledata) - mod.modulename = name - - // make filename table - cu := make([]string, 0, len(filenames)) - for _, f := range filenames { - cu = append(cu, f) - } - cutab, filetab, cuOffs := makeFilenametab([]compilationUnit{{cu}}) - mod.cutab = cutab - mod.filetab = filetab - - // make funcname table - funcnametab, nameOffs := makeFuncnameTab(funcs) - mod.funcnametab = funcnametab - - // make pcdata table - // NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata - pctab, pcdataOffs, _funcs := makePctab(funcs, cuOffs, nameOffs) - mod.pctab = pctab - - // write func data - // NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata - // TODO: estimate accurate capacity - cache := make([]byte, 0, len(funcs)*int(_PtrSize)) - fstart, funcdataOffs := writeFuncdata(&cache, funcs) - - // make pc->func (binary search) func table - lastFuncsize := funcs[len(funcs)-1].TextSize - ftab := makeFtab(_funcs, lastFuncsize) - mod.ftab = ftab - - // write pc->func (modmap) findfunc table - ffstart := writeFindfunctab(&cache, ftab) - - // make pclnt table - pclntab := makePclntable(_funcs, lastFuncsize, pcdataOffs, funcdataOffs) - mod.pclntable = pclntab - - // mmap() text and funcdata segements - p := os.Getpagesize() - size := int(rnd(int64(len(text)), int64(p))) - addr := mmap(size) - // copy the machine code - s := rt.BytesFrom(unsafe.Pointer(addr), len(text), size) - copy(s, text) - // make it executable - mprotect(addr, size) - - // assign addresses - mod.text = addr - mod.etext = addr + uintptr(size) - mod.minpc = addr - mod.maxpc = addr + uintptr(len(text)) - - // cache funcdata and findfuncbucket - moduleCache.Lock() - moduleCache.m[mod] = cache - moduleCache.Unlock() - mod.gofunc = uintptr(unsafe.Pointer(&cache[fstart])) - mod.findfunctab = uintptr(unsafe.Pointer(&cache[ffstart])) - - // make pc header - mod.pcHeader = &pcHeader { - magic : _Magic, - minLC : _MinLC, - ptrSize : _PtrSize, - nfunc : len(funcs), - nfiles: uint(len(cu)), - textStart: mod.text, - funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"), - cuOffset: getOffsetOf(moduledata{}, "cutab"), - filetabOffset: getOffsetOf(moduledata{}, "filetab"), - pctabOffset: getOffsetOf(moduledata{}, "pctab"), - pclnOffset: getOffsetOf(moduledata{}, "pclntable"), - } - - // sepecial case: gcdata and gcbss must by non-empty - mod.gcdata = uintptr(unsafe.Pointer(&emptyByte)) - mod.gcbss = uintptr(unsafe.Pointer(&emptyByte)) - - return + mod = new(moduledata) + mod.modulename = name + + // make filename table + cu := make([]string, 0, len(filenames)) + for _, f := range filenames { + cu = append(cu, f) + } + cutab, filetab, cuOffs := makeFilenametab([]compilationUnit{{cu}}) + mod.cutab = cutab + mod.filetab = filetab + + // make funcname table + funcnametab, nameOffs := makeFuncnameTab(funcs) + mod.funcnametab = funcnametab + + // make pcdata table + // NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata + pctab, pcdataOffs, _funcs := makePctab(funcs, cuOffs, nameOffs) + mod.pctab = pctab + + // write func data + // NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata + // TODO: estimate accurate capacity + cache := make([]byte, 0, len(funcs)*int(_PtrSize)) + fstart, funcdataOffs := writeFuncdata(&cache, funcs) + + // make pc->func (binary search) func table + lastFuncsize := funcs[len(funcs)-1].TextSize + ftab := makeFtab(_funcs, lastFuncsize) + mod.ftab = ftab + + // write pc->func (modmap) findfunc table + ffstart := writeFindfunctab(&cache, ftab) + + // make pclnt table + pclntab := makePclntable(_funcs, lastFuncsize, pcdataOffs, funcdataOffs) + mod.pclntable = pclntab + + // mmap() text and funcdata segements + p := os.Getpagesize() + size := int(rnd(int64(len(text)), int64(p))) + addr := mmap(size) + // copy the machine code + s := rt.BytesFrom(unsafe.Pointer(addr), len(text), size) + copy(s, text) + // make it executable + mprotect(addr, size) + + // assign addresses + mod.text = addr + mod.etext = addr + uintptr(size) + mod.minpc = addr + mod.maxpc = addr + uintptr(len(text)) + + // cache funcdata and findfuncbucket + moduleCache.Lock() + moduleCache.m[mod] = cache + moduleCache.Unlock() + mod.gofunc = uintptr(unsafe.Pointer(&cache[fstart])) + mod.findfunctab = uintptr(unsafe.Pointer(&cache[ffstart])) + + // make pc header + mod.pcHeader = &pcHeader{ + magic: _Magic, + minLC: _MinLC, + ptrSize: _PtrSize, + nfunc: len(funcs), + nfiles: uint(len(cu)), + textStart: mod.text, + funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"), + cuOffset: getOffsetOf(moduledata{}, "cutab"), + filetabOffset: getOffsetOf(moduledata{}, "filetab"), + pctabOffset: getOffsetOf(moduledata{}, "pctab"), + pclnOffset: getOffsetOf(moduledata{}, "pclntable"), + } + + // sepecial case: gcdata and gcbss must by non-empty + mod.gcdata = uintptr(unsafe.Pointer(&emptyByte)) + mod.gcbss = uintptr(unsafe.Pointer(&emptyByte)) + + return } // makePctab generates pcdelta->valuedelta tables for functions, // and returns the table and the entry offset of every kind pcdata in the table. func makePctab(funcs []Func, cuOffset []uint32, nameOffset []int32) (pctab []byte, pcdataOffs [][]uint32, _funcs []_func) { - _funcs = make([]_func, len(funcs)) - - // Pctab offsets of 0 are considered invalid in the runtime. We respect - // that by just padding a single byte at the beginning of runtime.pctab, - // that way no real offsets can be zero. - pctab = make([]byte, 1, 12*len(funcs)+1) - pcdataOffs = make([][]uint32, len(funcs)) - - for i, f := range funcs { - _f := &_funcs[i] - - var writer = func(pc *Pcdata) { - var ab []byte - var err error - if pc != nil { - ab, err = pc.MarshalBinary() - if err != nil { - panic(err) - } - pcdataOffs[i] = append(pcdataOffs[i], uint32(len(pctab))) - } else { - ab = []byte{0} - pcdataOffs[i] = append(pcdataOffs[i], _PCDATA_INVALID_OFFSET) - } - pctab = append(pctab, ab...) - } - - if f.Pcsp != nil { - _f.pcsp = uint32(len(pctab)) - } - writer(f.Pcsp) - if f.Pcfile != nil { - _f.pcfile = uint32(len(pctab)) - } - writer(f.Pcfile) - if f.Pcline != nil { - _f.pcln = uint32(len(pctab)) - } - writer(f.Pcline) - writer(f.PcUnsafePoint) - writer(f.PcStackMapIndex) - writer(f.PcInlTreeIndex) - writer(f.PcArgLiveIndex) - - _f.entryOff = f.EntryOff - _f.nameOff = nameOffset[i] - _f.args = f.ArgsSize - _f.deferreturn = f.DeferReturn - // NOTICE: _func.pcdata is always as [PCDATA_UnsafePoint(0) : PCDATA_ArgLiveIndex(3)] - _f.npcdata = uint32(_N_PCDATA) - _f.cuOffset = cuOffset[i] - _f.funcID = f.ID - _f.flag = f.Flag - _f.nfuncdata = uint8(_N_FUNCDATA) - } - - return + _funcs = make([]_func, len(funcs)) + + // Pctab offsets of 0 are considered invalid in the runtime. We respect + // that by just padding a single byte at the beginning of runtime.pctab, + // that way no real offsets can be zero. + pctab = make([]byte, 1, 12*len(funcs)+1) + pcdataOffs = make([][]uint32, len(funcs)) + + for i, f := range funcs { + _f := &_funcs[i] + + var writer = func(pc *Pcdata) { + var ab []byte + var err error + if pc != nil { + ab, err = pc.MarshalBinary() + if err != nil { + panic(err) + } + pcdataOffs[i] = append(pcdataOffs[i], uint32(len(pctab))) + } else { + ab = []byte{0} + pcdataOffs[i] = append(pcdataOffs[i], _PCDATA_INVALID_OFFSET) + } + pctab = append(pctab, ab...) + } + + if f.Pcsp != nil { + _f.pcsp = uint32(len(pctab)) + } + writer(f.Pcsp) + if f.Pcfile != nil { + _f.pcfile = uint32(len(pctab)) + } + writer(f.Pcfile) + if f.Pcline != nil { + _f.pcln = uint32(len(pctab)) + } + writer(f.Pcline) + writer(f.PcUnsafePoint) + writer(f.PcStackMapIndex) + writer(f.PcInlTreeIndex) + writer(f.PcArgLiveIndex) + + _f.entryOff = f.EntryOff + _f.nameOff = nameOffset[i] + _f.args = f.ArgsSize + _f.deferreturn = f.DeferReturn + // NOTICE: _func.pcdata is always as [PCDATA_UnsafePoint(0) : PCDATA_ArgLiveIndex(3)] + _f.npcdata = uint32(_N_PCDATA) + _f.cuOffset = cuOffset[i] + _f.funcID = f.ID + _f.flag = f.Flag + _f.nfuncdata = uint8(_N_FUNCDATA) + } + + return } -func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argptrs uintptr, localptrs uintptr) {} \ No newline at end of file +func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argptrs uintptr, localptrs uintptr) { +} diff --git a/vendor/github.com/bytedance/sonic/loader/loader.go b/vendor/github.com/bytedance/sonic/loader/loader.go index 929d8c23d..8bfd81d35 100644 --- a/vendor/github.com/bytedance/sonic/loader/loader.go +++ b/vendor/github.com/bytedance/sonic/loader/loader.go @@ -17,7 +17,7 @@ package loader import ( - `unsafe` + "unsafe" ) // Function is a function pointer @@ -25,13 +25,13 @@ type Function unsafe.Pointer // Options used to load a module type Options struct { - // NoPreempt is used to disable async preemption for this module - NoPreempt bool + // NoPreempt is used to disable async preemption for this module + NoPreempt bool } // Loader is a helper used to load a module simply type Loader struct { - Name string // module name - File string // file name - Options -} \ No newline at end of file + Name string // module name + File string // file name + Options +} diff --git a/vendor/github.com/bytedance/sonic/loader/loader_go115.go b/vendor/github.com/bytedance/sonic/loader/loader_go115.go index a1d4d7892..6f060277b 100644 --- a/vendor/github.com/bytedance/sonic/loader/loader_go115.go +++ b/vendor/github.com/bytedance/sonic/loader/loader_go115.go @@ -20,9 +20,9 @@ package loader import ( - `github.com/bytedance/sonic/internal/loader` + "github.com/bytedance/sonic/internal/loader" ) func (self Loader) LoadOne(text []byte, funcName string, frameSize int, argSize int, argStackmap []bool, localStackmap []bool) Function { - return Function(loader.Loader(text).Load(funcName, frameSize, argSize, argStackmap, localStackmap)) -} \ No newline at end of file + return Function(loader.Loader(text).Load(funcName, frameSize, argSize, argStackmap, localStackmap)) +} diff --git a/vendor/github.com/bytedance/sonic/loader/loader_go116.go b/vendor/github.com/bytedance/sonic/loader/loader_go116.go index ea30ec9a5..2e9463801 100644 --- a/vendor/github.com/bytedance/sonic/loader/loader_go116.go +++ b/vendor/github.com/bytedance/sonic/loader/loader_go116.go @@ -20,85 +20,85 @@ package loader import ( - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/rt" ) // LoadFuncs loads only one function as module, and returns the function pointer // - text: machine code // - funcName: function name -// - frameSize: stack frame size. +// - frameSize: stack frame size. // - argSize: argument total size (in bytes) // - argPtrs: indicates if a slot (8 Bytes) of arguments memory stores pointer, from low to high // - localPtrs: indicates if a slot (8 Bytes) of local variants memory stores pointer, from low to high -// -// WARN: +// +// WARN: // - the function MUST has fixed SP offset equaling to this, otherwise it go.gentraceback will fail // - the function MUST has only one stack map for all arguments and local variants func (self Loader) LoadOne(text []byte, funcName string, frameSize int, argSize int, argPtrs []bool, localPtrs []bool) Function { - size := uint32(len(text)) + size := uint32(len(text)) - fn := Func{ - Name: funcName, - TextSize: size, - ArgsSize: int32(argSize), - } + fn := Func{ + Name: funcName, + TextSize: size, + ArgsSize: int32(argSize), + } - // NOTICE: suppose the function has fixed SP offset equaling to frameSize, thus make only one pcsp pair - fn.Pcsp = &Pcdata{ - {PC: size, Val: int32(frameSize)}, - } + // NOTICE: suppose the function has fixed SP offset equaling to frameSize, thus make only one pcsp pair + fn.Pcsp = &Pcdata{ + {PC: size, Val: int32(frameSize)}, + } - if self.NoPreempt { - fn.PcUnsafePoint = &Pcdata{ - {PC: size, Val: PCDATA_UnsafePointUnsafe}, - } - } else { - fn.PcUnsafePoint = &Pcdata{ - {PC: size, Val: PCDATA_UnsafePointSafe}, - } - } + if self.NoPreempt { + fn.PcUnsafePoint = &Pcdata{ + {PC: size, Val: PCDATA_UnsafePointUnsafe}, + } + } else { + fn.PcUnsafePoint = &Pcdata{ + {PC: size, Val: PCDATA_UnsafePointSafe}, + } + } - // NOTICE: suppose the function has only one stack map at index 0 - fn.PcStackMapIndex = &Pcdata{ - {PC: size, Val: 0}, - } + // NOTICE: suppose the function has only one stack map at index 0 + fn.PcStackMapIndex = &Pcdata{ + {PC: size, Val: 0}, + } - if argPtrs != nil { - args := rt.StackMapBuilder{} - for _, b := range argPtrs { - args.AddField(b) - } - fn.ArgsPointerMaps = args.Build() - } - - if localPtrs != nil { - locals := rt .StackMapBuilder{} - for _, b := range localPtrs { - locals.AddField(b) - } - fn.LocalsPointerMaps = locals.Build() - } + if argPtrs != nil { + args := rt.StackMapBuilder{} + for _, b := range argPtrs { + args.AddField(b) + } + fn.ArgsPointerMaps = args.Build() + } - out := Load(text, []Func{fn}, self.Name + funcName, []string{self.File}) - return out[0] + if localPtrs != nil { + locals := rt.StackMapBuilder{} + for _, b := range localPtrs { + locals.AddField(b) + } + fn.LocalsPointerMaps = locals.Build() + } + + out := Load(text, []Func{fn}, self.Name+funcName, []string{self.File}) + return out[0] } // Load loads given machine codes and corresponding function information into go moduledata // and returns runnable function pointer // WARN: this API is experimental, use it carefully func Load(text []byte, funcs []Func, modulename string, filenames []string) (out []Function) { - // generate module data and allocate memory address - mod := makeModuledata(modulename, filenames, funcs, text) + // generate module data and allocate memory address + mod := makeModuledata(modulename, filenames, funcs, text) - // verify and register the new module - moduledataverify1(mod) - registerModule(mod) + // verify and register the new module + moduledataverify1(mod) + registerModule(mod) - // encapsulate function address - out = make([]Function, len(funcs)) - for i, f := range funcs { - m := uintptr(mod.text + uintptr(f.EntryOff)) - out[i] = Function(&m) - } - return -} \ No newline at end of file + // encapsulate function address + out = make([]Function, len(funcs)) + for i, f := range funcs { + m := uintptr(mod.text + uintptr(f.EntryOff)) + out[i] = Function(&m) + } + return +} diff --git a/vendor/github.com/bytedance/sonic/loader/mmap_unix.go b/vendor/github.com/bytedance/sonic/loader/mmap_unix.go index 50b80bf20..8b128d120 100644 --- a/vendor/github.com/bytedance/sonic/loader/mmap_unix.go +++ b/vendor/github.com/bytedance/sonic/loader/mmap_unix.go @@ -20,26 +20,25 @@ package loader import ( - `syscall` + "syscall" ) const ( - _AP = syscall.MAP_ANON | syscall.MAP_PRIVATE - _RX = syscall.PROT_READ | syscall.PROT_EXEC - _RW = syscall.PROT_READ | syscall.PROT_WRITE + _AP = syscall.MAP_ANON | syscall.MAP_PRIVATE + _RX = syscall.PROT_READ | syscall.PROT_EXEC + _RW = syscall.PROT_READ | syscall.PROT_WRITE ) - func mmap(nb int) uintptr { - if m, _, e := syscall.RawSyscall6(syscall.SYS_MMAP, 0, uintptr(nb), _RW, _AP, 0, 0); e != 0 { - panic(e) - } else { - return m - } + if m, _, e := syscall.RawSyscall6(syscall.SYS_MMAP, 0, uintptr(nb), _RW, _AP, 0, 0); e != 0 { + panic(e) + } else { + return m + } } func mprotect(p uintptr, nb int) { - if _, _, err := syscall.RawSyscall(syscall.SYS_MPROTECT, p, uintptr(nb), _RX); err != 0 { - panic(err) - } -} \ No newline at end of file + if _, _, err := syscall.RawSyscall(syscall.SYS_MPROTECT, p, uintptr(nb), _RX); err != 0 { + panic(err) + } +} diff --git a/vendor/github.com/bytedance/sonic/loader/mmap_windows.go b/vendor/github.com/bytedance/sonic/loader/mmap_windows.go index 1760a7117..322f3025e 100644 --- a/vendor/github.com/bytedance/sonic/loader/mmap_windows.go +++ b/vendor/github.com/bytedance/sonic/loader/mmap_windows.go @@ -22,63 +22,63 @@ package loader import ( - `syscall` - `unsafe` + "syscall" + "unsafe" ) const ( - MEM_COMMIT = 0x00001000 - MEM_RESERVE = 0x00002000 + MEM_COMMIT = 0x00001000 + MEM_RESERVE = 0x00002000 ) var ( - libKernel32 = syscall.NewLazyDLL("KERNEL32.DLL") - libKernel32_VirtualAlloc = libKernel32.NewProc("VirtualAlloc") - libKernel32_VirtualProtect = libKernel32.NewProc("VirtualProtect") + libKernel32 = syscall.NewLazyDLL("KERNEL32.DLL") + libKernel32_VirtualAlloc = libKernel32.NewProc("VirtualAlloc") + libKernel32_VirtualProtect = libKernel32.NewProc("VirtualProtect") ) func mmap(nb int) uintptr { - addr, err := winapi_VirtualAlloc(0, nb, MEM_COMMIT|MEM_RESERVE, syscall.PAGE_READWRITE) - if err != nil { - panic(err) - } - return addr + addr, err := winapi_VirtualAlloc(0, nb, MEM_COMMIT|MEM_RESERVE, syscall.PAGE_READWRITE) + if err != nil { + panic(err) + } + return addr } func mprotect(p uintptr, nb int) (oldProtect int) { - err := winapi_VirtualProtect(p, nb, syscall.PAGE_EXECUTE_READ, &oldProtect) - if err != nil { - panic(err) - } - return + err := winapi_VirtualProtect(p, nb, syscall.PAGE_EXECUTE_READ, &oldProtect) + if err != nil { + panic(err) + } + return } // winapi_VirtualAlloc allocate memory // Doc: https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc func winapi_VirtualAlloc(lpAddr uintptr, dwSize int, flAllocationType int, flProtect int) (uintptr, error) { - r1, _, err := libKernel32_VirtualAlloc.Call( - lpAddr, - uintptr(dwSize), - uintptr(flAllocationType), - uintptr(flProtect), - ) - if r1 == 0 { - return 0, err - } - return r1, nil + r1, _, err := libKernel32_VirtualAlloc.Call( + lpAddr, + uintptr(dwSize), + uintptr(flAllocationType), + uintptr(flProtect), + ) + if r1 == 0 { + return 0, err + } + return r1, nil } // winapi_VirtualProtect change memory protection // Doc: https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualprotect func winapi_VirtualProtect(lpAddr uintptr, dwSize int, flNewProtect int, lpflOldProtect *int) error { - r1, _, err := libKernel32_VirtualProtect.Call( - lpAddr, - uintptr(dwSize), - uintptr(flNewProtect), - uintptr(unsafe.Pointer(lpflOldProtect)), - ) - if r1 == 0 { - return err - } - return nil + r1, _, err := libKernel32_VirtualProtect.Call( + lpAddr, + uintptr(dwSize), + uintptr(flNewProtect), + uintptr(unsafe.Pointer(lpflOldProtect)), + ) + if r1 == 0 { + return err + } + return nil } diff --git a/vendor/github.com/bytedance/sonic/loader/pcdata.go b/vendor/github.com/bytedance/sonic/loader/pcdata.go index b5c62d17b..8e0ecf288 100644 --- a/vendor/github.com/bytedance/sonic/loader/pcdata.go +++ b/vendor/github.com/bytedance/sonic/loader/pcdata.go @@ -17,84 +17,84 @@ package loader const ( - _N_PCDATA = 4 + _N_PCDATA = 4 - _PCDATA_UnsafePoint = 0 - _PCDATA_StackMapIndex = 1 - _PCDATA_InlTreeIndex = 2 - _PCDATA_ArgLiveIndex = 3 + _PCDATA_UnsafePoint = 0 + _PCDATA_StackMapIndex = 1 + _PCDATA_InlTreeIndex = 2 + _PCDATA_ArgLiveIndex = 3 - _PCDATA_INVALID_OFFSET = 0 + _PCDATA_INVALID_OFFSET = 0 ) const ( - // PCDATA_UnsafePoint values. - PCDATA_UnsafePointSafe = -1 // Safe for async preemption - PCDATA_UnsafePointUnsafe = -2 // Unsafe for async preemption - - // PCDATA_Restart1(2) apply on a sequence of instructions, within - // which if an async preemption happens, we should back off the PC - // to the start of the sequence when resume. - // We need two so we can distinguish the start/end of the sequence - // in case that two sequences are next to each other. - PCDATA_Restart1 = -3 - PCDATA_Restart2 = -4 - - // Like PCDATA_RestartAtEntry, but back to function entry if async - // preempted. - PCDATA_RestartAtEntry = -5 - - _PCDATA_START_VAL = -1 + // PCDATA_UnsafePoint values. + PCDATA_UnsafePointSafe = -1 // Safe for async preemption + PCDATA_UnsafePointUnsafe = -2 // Unsafe for async preemption + + // PCDATA_Restart1(2) apply on a sequence of instructions, within + // which if an async preemption happens, we should back off the PC + // to the start of the sequence when resume. + // We need two so we can distinguish the start/end of the sequence + // in case that two sequences are next to each other. + PCDATA_Restart1 = -3 + PCDATA_Restart2 = -4 + + // Like PCDATA_RestartAtEntry, but back to function entry if async + // preempted. + PCDATA_RestartAtEntry = -5 + + _PCDATA_START_VAL = -1 ) var emptyByte byte func encodeValue(v int) []byte { - return encodeVariant(toZigzag(v)) + return encodeVariant(toZigzag(v)) } func toZigzag(v int) int { - return (v << 1) ^ (v >> 31) + return (v << 1) ^ (v >> 31) } func encodeVariant(v int) []byte { - var u int - var r []byte - - /* split every 7 bits */ - for v > 127 { - u = v & 0x7f - v = v >> 7 - r = append(r, byte(u) | 0x80) - } - - /* check for last one */ - if v == 0 { - return r - } - - /* add the last one */ - r = append(r, byte(v)) - return r + var u int + var r []byte + + /* split every 7 bits */ + for v > 127 { + u = v & 0x7f + v = v >> 7 + r = append(r, byte(u)|0x80) + } + + /* check for last one */ + if v == 0 { + return r + } + + /* add the last one */ + r = append(r, byte(v)) + return r } type Pcvalue struct { - PC uint32 // PC offset from func entry - Val int32 + PC uint32 // PC offset from func entry + Val int32 } type Pcdata []Pcvalue // see https://docs.google.com/document/d/1lyPIbmsYbXnpNj57a261hgOYVpNRcgydurVQIyZOz_o/pub func (self Pcdata) MarshalBinary() (data []byte, err error) { - // delta value always starts from -1 - sv := int32(_PCDATA_START_VAL) - sp := uint32(0) - for _, v := range self { - data = append(data, encodeVariant(toZigzag(int(v.Val - sv)))...) - data = append(data, encodeVariant(int(v.PC - sp))...) - sp = v.PC - sv = v.Val - } - return -} \ No newline at end of file + // delta value always starts from -1 + sv := int32(_PCDATA_START_VAL) + sp := uint32(0) + for _, v := range self { + data = append(data, encodeVariant(toZigzag(int(v.Val-sv)))...) + data = append(data, encodeVariant(int(v.PC-sp))...) + sp = v.PC + sv = v.Val + } + return +} diff --git a/vendor/github.com/bytedance/sonic/loader/stubs.go b/vendor/github.com/bytedance/sonic/loader/stubs.go index 8377649b7..11472c6e0 100644 --- a/vendor/github.com/bytedance/sonic/loader/stubs.go +++ b/vendor/github.com/bytedance/sonic/loader/stubs.go @@ -17,8 +17,8 @@ package loader import ( - `sync` - _ `unsafe` + "sync" + _ "unsafe" ) //go:linkname lastmoduledatap runtime.lastmoduledatap @@ -28,13 +28,11 @@ var lastmoduledatap *moduledata var moduledataMux sync.Mutex func registerModule(mod *moduledata) { - moduledataMux.Lock() - lastmoduledatap.next = mod - lastmoduledatap = mod - moduledataMux.Unlock() + moduledataMux.Lock() + lastmoduledatap.next = mod + lastmoduledatap = mod + moduledataMux.Unlock() } //go:linkname moduledataverify1 runtime.moduledataverify1 func moduledataverify1(_ *moduledata) - - diff --git a/vendor/github.com/bytedance/sonic/option/option.go b/vendor/github.com/bytedance/sonic/option/option.go index 71527cdf0..c8b1aa152 100644 --- a/vendor/github.com/bytedance/sonic/option/option.go +++ b/vendor/github.com/bytedance/sonic/option/option.go @@ -17,70 +17,69 @@ package option var ( - // DefaultDecoderBufferSize is the initial buffer size of StreamDecoder - DefaultDecoderBufferSize uint = 128 * 1024 + // DefaultDecoderBufferSize is the initial buffer size of StreamDecoder + DefaultDecoderBufferSize uint = 128 * 1024 - // DefaultEncoderBufferSize is the initial buffer size of Encoder - DefaultEncoderBufferSize uint = 128 * 1024 + // DefaultEncoderBufferSize is the initial buffer size of Encoder + DefaultEncoderBufferSize uint = 128 * 1024 ) // CompileOptions includes all options for encoder or decoder compiler. type CompileOptions struct { - // the maximum depth for compilation inline - MaxInlineDepth int + // the maximum depth for compilation inline + MaxInlineDepth int - // the loop times for recursively pretouch - RecursiveDepth int + // the loop times for recursively pretouch + RecursiveDepth int } var ( - // Default value(3) means the compiler only inline 3 layers of nested struct. - // when the depth exceeds, the compiler will recurse - // and compile subsequent structs when they are decoded - DefaultMaxInlineDepth = 3 + // Default value(3) means the compiler only inline 3 layers of nested struct. + // when the depth exceeds, the compiler will recurse + // and compile subsequent structs when they are decoded + DefaultMaxInlineDepth = 3 - // Default value(1) means `Pretouch()` will be recursively executed once, - // if any nested struct is left (depth exceeds MaxInlineDepth) - DefaultRecursiveDepth = 1 + // Default value(1) means `Pretouch()` will be recursively executed once, + // if any nested struct is left (depth exceeds MaxInlineDepth) + DefaultRecursiveDepth = 1 ) // DefaultCompileOptions set default compile options. func DefaultCompileOptions() CompileOptions { - return CompileOptions{ - RecursiveDepth: DefaultRecursiveDepth, - MaxInlineDepth: DefaultMaxInlineDepth, - } + return CompileOptions{ + RecursiveDepth: DefaultRecursiveDepth, + MaxInlineDepth: DefaultMaxInlineDepth, + } } // CompileOption is a function used to change DefaultCompileOptions. type CompileOption func(o *CompileOptions) -// WithCompileRecursiveDepth sets the loop times of recursive pretouch +// WithCompileRecursiveDepth sets the loop times of recursive pretouch // in both decoder and encoder, // for both concrete type and its pointer type. // -// For deep nested struct (depth exceeds MaxInlineDepth), -// try to set more loops to completely compile, +// For deep nested struct (depth exceeds MaxInlineDepth), +// try to set more loops to completely compile, // thus reduce JIT unstability in the first hit. func WithCompileRecursiveDepth(loop int) CompileOption { - return func(o *CompileOptions) { - if loop < 0 { - panic("loop must be >= 0") - } - o.RecursiveDepth = loop - } + return func(o *CompileOptions) { + if loop < 0 { + panic("loop must be >= 0") + } + o.RecursiveDepth = loop + } } -// WithCompileMaxInlineDepth sets the max depth of inline compile +// WithCompileMaxInlineDepth sets the max depth of inline compile // in decoder and encoder. // // For large nested struct, try to set smaller depth to reduce compiling time. func WithCompileMaxInlineDepth(depth int) CompileOption { - return func(o *CompileOptions) { - if depth <= 0 { - panic("depth must be > 0") - } - o.MaxInlineDepth = depth - } + return func(o *CompileOptions) { + if depth <= 0 { + panic("depth must be > 0") + } + o.MaxInlineDepth = depth + } } - \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/sonic.go b/vendor/github.com/bytedance/sonic/sonic.go index 6cbb1ad41..4aa81eb57 100644 --- a/vendor/github.com/bytedance/sonic/sonic.go +++ b/vendor/github.com/bytedance/sonic/sonic.go @@ -1,3 +1,4 @@ +//go:build amd64 && go1.15 && !go1.21 // +build amd64,go1.15,!go1.21 /* @@ -20,117 +21,117 @@ package sonic import ( - `io` - `reflect` + "io" + "reflect" - `github.com/bytedance/sonic/decoder` - `github.com/bytedance/sonic/encoder` - `github.com/bytedance/sonic/option` - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/decoder" + "github.com/bytedance/sonic/encoder" + "github.com/bytedance/sonic/internal/rt" + "github.com/bytedance/sonic/option" ) type frozenConfig struct { - Config - encoderOpts encoder.Options - decoderOpts decoder.Options + Config + encoderOpts encoder.Options + decoderOpts decoder.Options } // Froze convert the Config to API func (cfg Config) Froze() API { - api := &frozenConfig{Config: cfg} - - // configure encoder options: - if cfg.EscapeHTML { - api.encoderOpts |= encoder.EscapeHTML - } - if cfg.SortMapKeys { - api.encoderOpts |= encoder.SortMapKeys - } - if cfg.CompactMarshaler { - api.encoderOpts |= encoder.CompactMarshaler - } - if cfg.NoQuoteTextMarshaler { - api.encoderOpts |= encoder.NoQuoteTextMarshaler - } - if cfg.NoNullSliceOrMap { - api.encoderOpts |= encoder.NoNullSliceOrMap - } - if cfg.ValidateString { - api.encoderOpts |= encoder.ValidateString - } - - // configure decoder options: - if cfg.UseInt64 { - api.decoderOpts |= decoder.OptionUseInt64 - } - if cfg.UseNumber { - api.decoderOpts |= decoder.OptionUseNumber - } - if cfg.DisallowUnknownFields { - api.decoderOpts |= decoder.OptionDisableUnknown - } - if cfg.CopyString { - api.decoderOpts |= decoder.OptionCopyString - } - if cfg.ValidateString { - api.decoderOpts |= decoder.OptionValidateString - } - return api + api := &frozenConfig{Config: cfg} + + // configure encoder options: + if cfg.EscapeHTML { + api.encoderOpts |= encoder.EscapeHTML + } + if cfg.SortMapKeys { + api.encoderOpts |= encoder.SortMapKeys + } + if cfg.CompactMarshaler { + api.encoderOpts |= encoder.CompactMarshaler + } + if cfg.NoQuoteTextMarshaler { + api.encoderOpts |= encoder.NoQuoteTextMarshaler + } + if cfg.NoNullSliceOrMap { + api.encoderOpts |= encoder.NoNullSliceOrMap + } + if cfg.ValidateString { + api.encoderOpts |= encoder.ValidateString + } + + // configure decoder options: + if cfg.UseInt64 { + api.decoderOpts |= decoder.OptionUseInt64 + } + if cfg.UseNumber { + api.decoderOpts |= decoder.OptionUseNumber + } + if cfg.DisallowUnknownFields { + api.decoderOpts |= decoder.OptionDisableUnknown + } + if cfg.CopyString { + api.decoderOpts |= decoder.OptionCopyString + } + if cfg.ValidateString { + api.decoderOpts |= decoder.OptionValidateString + } + return api } // Marshal is implemented by sonic func (cfg frozenConfig) Marshal(val interface{}) ([]byte, error) { - return encoder.Encode(val, cfg.encoderOpts) + return encoder.Encode(val, cfg.encoderOpts) } // MarshalToString is implemented by sonic func (cfg frozenConfig) MarshalToString(val interface{}) (string, error) { - buf, err := encoder.Encode(val, cfg.encoderOpts) - return rt.Mem2Str(buf), err + buf, err := encoder.Encode(val, cfg.encoderOpts) + return rt.Mem2Str(buf), err } // MarshalIndent is implemented by sonic func (cfg frozenConfig) MarshalIndent(val interface{}, prefix, indent string) ([]byte, error) { - return encoder.EncodeIndented(val, prefix, indent, cfg.encoderOpts) + return encoder.EncodeIndented(val, prefix, indent, cfg.encoderOpts) } // UnmarshalFromString is implemented by sonic func (cfg frozenConfig) UnmarshalFromString(buf string, val interface{}) error { - dec := decoder.NewDecoder(buf) - dec.SetOptions(cfg.decoderOpts) - err := dec.Decode(val) + dec := decoder.NewDecoder(buf) + dec.SetOptions(cfg.decoderOpts) + err := dec.Decode(val) - /* check for errors */ - if err != nil { - return err - } + /* check for errors */ + if err != nil { + return err + } - return dec.CheckTrailings() + return dec.CheckTrailings() } // Unmarshal is implemented by sonic func (cfg frozenConfig) Unmarshal(buf []byte, val interface{}) error { - return cfg.UnmarshalFromString(string(buf), val) + return cfg.UnmarshalFromString(string(buf), val) } // NewEncoder is implemented by sonic func (cfg frozenConfig) NewEncoder(writer io.Writer) Encoder { - enc := encoder.NewStreamEncoder(writer) - enc.Opts = cfg.encoderOpts - return enc + enc := encoder.NewStreamEncoder(writer) + enc.Opts = cfg.encoderOpts + return enc } // NewDecoder is implemented by sonic func (cfg frozenConfig) NewDecoder(reader io.Reader) Decoder { - dec := decoder.NewStreamDecoder(reader) - dec.SetOptions(cfg.decoderOpts) - return dec + dec := decoder.NewStreamDecoder(reader) + dec.SetOptions(cfg.decoderOpts) + return dec } // Valid is implemented by sonic func (cfg frozenConfig) Valid(data []byte) bool { - ok, _ := encoder.Valid(data) - return ok + ok, _ := encoder.Valid(data) + return ok } // Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in @@ -140,8 +141,8 @@ func (cfg frozenConfig) Valid(data []byte) bool { // a compile option to set the depth of recursive compile for the nested struct type. func Pretouch(vt reflect.Type, opts ...option.CompileOption) error { if err := encoder.Pretouch(vt, opts...); err != nil { - return err - } + return err + } if err := decoder.Pretouch(vt, opts...); err != nil { return err } @@ -152,8 +153,8 @@ func Pretouch(vt reflect.Type, opts ...option.CompileOption) error { vt = reflect.PtrTo(vt) } if err := encoder.Pretouch(vt, opts...); err != nil { - return err - } + return err + } if err := decoder.Pretouch(vt, opts...); err != nil { return err } diff --git a/vendor/github.com/bytedance/sonic/unquote/unquote.go b/vendor/github.com/bytedance/sonic/unquote/unquote.go index 23fca736e..690c0fb01 100644 --- a/vendor/github.com/bytedance/sonic/unquote/unquote.go +++ b/vendor/github.com/bytedance/sonic/unquote/unquote.go @@ -17,43 +17,43 @@ package unquote import ( - `unsafe` - `runtime` + "runtime" + "unsafe" - `github.com/bytedance/sonic/internal/native` - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/internal/rt` + "github.com/bytedance/sonic/internal/native" + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/internal/rt" ) func String(s string) (ret string, err types.ParsingError) { - mm := make([]byte, 0, len(s)) - err = intoBytesUnsafe(s, &mm) - ret = rt.Mem2Str(mm) - return + mm := make([]byte, 0, len(s)) + err = intoBytesUnsafe(s, &mm) + ret = rt.Mem2Str(mm) + return } func IntoBytes(s string, m *[]byte) types.ParsingError { - if cap(*m) < len(s) { - return types.ERR_EOF - } else { - return intoBytesUnsafe(s, m) - } + if cap(*m) < len(s) { + return types.ERR_EOF + } else { + return intoBytesUnsafe(s, m) + } } func intoBytesUnsafe(s string, m *[]byte) types.ParsingError { - pos := -1 - slv := (*rt.GoSlice)(unsafe.Pointer(m)) - str := (*rt.GoString)(unsafe.Pointer(&s)) - /* unquote as the default configuration, replace invalid unicode with \ufffd */ - ret := native.Unquote(str.Ptr, str.Len, slv.Ptr, &pos, types.F_UNICODE_REPLACE) - - /* check for errors */ - if ret < 0 { - return types.ParsingError(-ret) - } - - /* update the length */ - slv.Len = ret - runtime.KeepAlive(s) - return 0 + pos := -1 + slv := (*rt.GoSlice)(unsafe.Pointer(m)) + str := (*rt.GoString)(unsafe.Pointer(&s)) + /* unquote as the default configuration, replace invalid unicode with \ufffd */ + ret := native.Unquote(str.Ptr, str.Len, slv.Ptr, &pos, types.F_UNICODE_REPLACE) + + /* check for errors */ + if ret < 0 { + return types.ParsingError(-ret) + } + + /* update the length */ + slv.Len = ret + runtime.KeepAlive(s) + return 0 } diff --git a/vendor/github.com/bytedance/sonic/utf8/utf8.go b/vendor/github.com/bytedance/sonic/utf8/utf8.go index 59d2caefe..b60bda722 100644 --- a/vendor/github.com/bytedance/sonic/utf8/utf8.go +++ b/vendor/github.com/bytedance/sonic/utf8/utf8.go @@ -17,55 +17,55 @@ package utf8 import ( - `github.com/bytedance/sonic/internal/rt` - `github.com/bytedance/sonic/internal/native/types` - `github.com/bytedance/sonic/internal/native` + "github.com/bytedance/sonic/internal/native" + "github.com/bytedance/sonic/internal/native/types" + "github.com/bytedance/sonic/internal/rt" ) // CorrectWith corrects the invalid utf8 byte with repl string. func CorrectWith(dst []byte, src []byte, repl string) []byte { - sstr := rt.Mem2Str(src) - sidx := 0 + sstr := rt.Mem2Str(src) + sidx := 0 - /* state machine records the invalid postions */ - m := types.NewStateMachine() - m.Sp = 0 // invalid utf8 numbers + /* state machine records the invalid postions */ + m := types.NewStateMachine() + m.Sp = 0 // invalid utf8 numbers - for sidx < len(sstr) { - scur := sidx - ecode := native.ValidateUTF8(&sstr, &sidx, m) + for sidx < len(sstr) { + scur := sidx + ecode := native.ValidateUTF8(&sstr, &sidx, m) - if m.Sp != 0 { - if m.Sp > len(sstr) { - panic("numbers of invalid utf8 exceed the string len!") - } - } - - for i := 0; i < m.Sp; i++ { - ipos := m.Vt[i] // invalid utf8 position - dst = append(dst, sstr[scur:ipos]...) - dst = append(dst, repl...) - scur = m.Vt[i] + 1 - } - /* append the remained valid utf8 bytes */ - dst = append(dst, sstr[scur:sidx]...) + if m.Sp != 0 { + if m.Sp > len(sstr) { + panic("numbers of invalid utf8 exceed the string len!") + } + } - /* not enough space, reset and continue */ - if ecode != 0 { - m.Sp = 0 - } - } + for i := 0; i < m.Sp; i++ { + ipos := m.Vt[i] // invalid utf8 position + dst = append(dst, sstr[scur:ipos]...) + dst = append(dst, repl...) + scur = m.Vt[i] + 1 + } + /* append the remained valid utf8 bytes */ + dst = append(dst, sstr[scur:sidx]...) - types.FreeStateMachine(m) - return dst + /* not enough space, reset and continue */ + if ecode != 0 { + m.Sp = 0 + } + } + + types.FreeStateMachine(m) + return dst } // Validate is a simd-accelereated drop-in replacement for the standard library's utf8.Valid. func Validate(src []byte) bool { - return ValidateString(rt.Mem2Str(src)) + return ValidateString(rt.Mem2Str(src)) } // ValidateString as Validate, but for string. func ValidateString(src string) bool { - return native.ValidateUTF8Fast(&src) == 0 -} \ No newline at end of file + return native.ValidateUTF8Fast(&src) == 0 +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go index ad14b807f..0ae847f75 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go @@ -1,6 +1,5 @@ -// +build !appengine -// +build gc -// +build !purego +//go:build !appengine && gc && !purego +// +build !appengine,gc,!purego package xxhash diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a82160..1f52f296e 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,3 +1,4 @@ +//go:build !amd64 || appengine || !gc || purego // +build !amd64 appengine !gc purego package xxhash diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7a3..e86f1b5fd 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 376e0ca2e..dfdeaf3ce 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. diff --git a/vendor/github.com/chenzhuoyu/base64x/base64x.go b/vendor/github.com/chenzhuoyu/base64x/base64x.go index 3d457176b..e756898f6 100644 --- a/vendor/github.com/chenzhuoyu/base64x/base64x.go +++ b/vendor/github.com/chenzhuoyu/base64x/base64x.go @@ -1,7 +1,7 @@ package base64x import ( - `encoding/base64` + "encoding/base64" ) // An Encoding is a radix 64 encoding/decoding scheme, defined by a @@ -12,10 +12,10 @@ import ( type Encoding int const ( - _MODE_URL = 1 << 0 - _MODE_RAW = 1 << 1 - _MODE_AVX2 = 1 << 2 - _MODE_JSON = 1 << 3 + _MODE_URL = 1 << 0 + _MODE_RAW = 1 << 1 + _MODE_AVX2 = 1 << 2 + _MODE_JSON = 1 << 3 ) // StdEncoding is the standard base64 encoding, as defined in @@ -39,10 +39,10 @@ const RawStdEncoding Encoding = _MODE_RAW const RawURLEncoding Encoding = _MODE_RAW | _MODE_URL // JSONStdEncoding is the StdEncoding and encoded as JSON string as RFC 8259. -const JSONStdEncoding Encoding = _MODE_JSON; +const JSONStdEncoding Encoding = _MODE_JSON var ( - archFlags = 0 + archFlags = 0 ) /** Encoder Functions **/ @@ -57,13 +57,13 @@ var ( // If out is not large enough to contain the encoded result, // it will panic. func (self Encoding) Encode(out []byte, src []byte) { - if len(src) != 0 { - if buf := out[:0:len(out)]; self.EncodedLen(len(src)) <= len(out) { - self.EncodeUnsafe(&buf, src) - } else { - panic("encoder output buffer is too small") - } - } + if len(src) != 0 { + if buf := out[:0:len(out)]; self.EncodedLen(len(src)) <= len(out) { + self.EncodeUnsafe(&buf, src) + } else { + panic("encoder output buffer is too small") + } + } } // EncodeUnsafe behaves like Encode, except it does NOT check if @@ -71,27 +71,27 @@ func (self Encoding) Encode(out []byte, src []byte) { // // It will also update the length of out. func (self Encoding) EncodeUnsafe(out *[]byte, src []byte) { - __b64encode(out, &src, int(self) | archFlags) + __b64encode(out, &src, int(self)|archFlags) } // EncodeToString returns the base64 encoding of src. func (self Encoding) EncodeToString(src []byte) string { - nbs := len(src) - ret := make([]byte, 0, self.EncodedLen(nbs)) + nbs := len(src) + ret := make([]byte, 0, self.EncodedLen(nbs)) - /* encode in native code */ - self.EncodeUnsafe(&ret, src) - return mem2str(ret) + /* encode in native code */ + self.EncodeUnsafe(&ret, src) + return mem2str(ret) } // EncodedLen returns the length in bytes of the base64 encoding // of an input buffer of length n. func (self Encoding) EncodedLen(n int) int { - if (self & _MODE_RAW) == 0 { - return (n + 2) / 3 * 4 - } else { - return (n * 8 + 5) / 6 - } + if (self & _MODE_RAW) == 0 { + return (n + 2) / 3 * 4 + } else { + return (n*8 + 5) / 6 + } } /** Decoder Functions **/ @@ -106,13 +106,13 @@ func (self Encoding) EncodedLen(n int) int { // If out is not large enough to contain the encoded result, // it will panic. func (self Encoding) Decode(out []byte, src []byte) (int, error) { - if len(src) == 0 { - return 0, nil - } else if buf := out[:0:len(out)]; self.DecodedLen(len(src)) <= len(out) { - return self.DecodeUnsafe(&buf, src) - } else { - panic("decoder output buffer is too small") - } + if len(src) == 0 { + return 0, nil + } else if buf := out[:0:len(out)]; self.DecodedLen(len(src)) <= len(out) { + return self.DecodeUnsafe(&buf, src) + } else { + panic("decoder output buffer is too small") + } } // DecodeUnsafe behaves like Decode, except it does NOT check if @@ -120,38 +120,38 @@ func (self Encoding) Decode(out []byte, src []byte) (int, error) { // // It will also update the length of out. func (self Encoding) DecodeUnsafe(out *[]byte, src []byte) (int, error) { - if n := __b64decode(out, mem2addr(src), len(src), int(self) | archFlags); n >= 0 { - return n, nil - } else { - return 0, base64.CorruptInputError(-n - 1) - } + if n := __b64decode(out, mem2addr(src), len(src), int(self)|archFlags); n >= 0 { + return n, nil + } else { + return 0, base64.CorruptInputError(-n - 1) + } } // DecodeString returns the bytes represented by the base64 string s. func (self Encoding) DecodeString(s string) ([]byte, error) { - src := str2mem(s) - ret := make([]byte, 0, self.DecodedLen(len(s))) - - /* decode into the allocated buffer */ - if _, err := self.DecodeUnsafe(&ret, src); err != nil { - return nil, err - } else { - return ret, nil - } + src := str2mem(s) + ret := make([]byte, 0, self.DecodedLen(len(s))) + + /* decode into the allocated buffer */ + if _, err := self.DecodeUnsafe(&ret, src); err != nil { + return nil, err + } else { + return ret, nil + } } // DecodedLen returns the maximum length in bytes of the decoded data // corresponding to n bytes of base64-encoded data. func (self Encoding) DecodedLen(n int) int { - if (self & _MODE_RAW) == 0 { - return n / 4 * 3 - } else { - return n * 6 / 8 - } + if (self & _MODE_RAW) == 0 { + return n / 4 * 3 + } else { + return n * 6 / 8 + } } func init() { - if hasAVX2() { - archFlags = _MODE_AVX2 - } + if hasAVX2() { + archFlags = _MODE_AVX2 + } } diff --git a/vendor/github.com/chenzhuoyu/base64x/cpuid.go b/vendor/github.com/chenzhuoyu/base64x/cpuid.go index a768c768d..999a63199 100644 --- a/vendor/github.com/chenzhuoyu/base64x/cpuid.go +++ b/vendor/github.com/chenzhuoyu/base64x/cpuid.go @@ -1,17 +1,21 @@ package base64x import ( - `fmt` - `os` + "fmt" + "os" - `github.com/klauspost/cpuid/v2` + "github.com/klauspost/cpuid/v2" ) func hasAVX2() bool { - switch v := os.Getenv("B64X_MODE"); v { - case "" : fallthrough - case "auto" : return cpuid.CPU.Has(cpuid.AVX2) - case "noavx2" : return false - default : panic(fmt.Sprintf("invalid mode: '%s', should be one of 'auto', 'noavx2'", v)) - } -} \ No newline at end of file + switch v := os.Getenv("B64X_MODE"); v { + case "": + fallthrough + case "auto": + return cpuid.CPU.Has(cpuid.AVX2) + case "noavx2": + return false + default: + panic(fmt.Sprintf("invalid mode: '%s', should be one of 'auto', 'noavx2'", v)) + } +} diff --git a/vendor/github.com/chenzhuoyu/base64x/faststr.go b/vendor/github.com/chenzhuoyu/base64x/faststr.go index 83b58ea1f..35a45a48d 100644 --- a/vendor/github.com/chenzhuoyu/base64x/faststr.go +++ b/vendor/github.com/chenzhuoyu/base64x/faststr.go @@ -1,23 +1,23 @@ package base64x import ( - `reflect` - `unsafe` + "reflect" + "unsafe" ) func mem2str(v []byte) (s string) { - (*reflect.StringHeader)(unsafe.Pointer(&s)).Len = (*reflect.SliceHeader)(unsafe.Pointer(&v)).Len - (*reflect.StringHeader)(unsafe.Pointer(&s)).Data = (*reflect.SliceHeader)(unsafe.Pointer(&v)).Data - return + (*reflect.StringHeader)(unsafe.Pointer(&s)).Len = (*reflect.SliceHeader)(unsafe.Pointer(&v)).Len + (*reflect.StringHeader)(unsafe.Pointer(&s)).Data = (*reflect.SliceHeader)(unsafe.Pointer(&v)).Data + return } func str2mem(s string) (v []byte) { - (*reflect.SliceHeader)(unsafe.Pointer(&v)).Cap = (*reflect.StringHeader)(unsafe.Pointer(&s)).Len - (*reflect.SliceHeader)(unsafe.Pointer(&v)).Len = (*reflect.StringHeader)(unsafe.Pointer(&s)).Len - (*reflect.SliceHeader)(unsafe.Pointer(&v)).Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - return + (*reflect.SliceHeader)(unsafe.Pointer(&v)).Cap = (*reflect.StringHeader)(unsafe.Pointer(&s)).Len + (*reflect.SliceHeader)(unsafe.Pointer(&v)).Len = (*reflect.StringHeader)(unsafe.Pointer(&s)).Len + (*reflect.SliceHeader)(unsafe.Pointer(&v)).Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + return } func mem2addr(v []byte) unsafe.Pointer { - return *(*unsafe.Pointer)(unsafe.Pointer(&v)) + return *(*unsafe.Pointer)(unsafe.Pointer(&v)) } diff --git a/vendor/github.com/chenzhuoyu/base64x/native_amd64.go b/vendor/github.com/chenzhuoyu/base64x/native_amd64.go index a6f95575a..60602d14b 100644 --- a/vendor/github.com/chenzhuoyu/base64x/native_amd64.go +++ b/vendor/github.com/chenzhuoyu/base64x/native_amd64.go @@ -2,7 +2,7 @@ package base64x import ( - `unsafe` + "unsafe" ) //go:nosplit diff --git a/vendor/github.com/chenzhuoyu/base64x/native_subr_amd64.go b/vendor/github.com/chenzhuoyu/base64x/native_subr_amd64.go index f18de543e..d35ecea7f 100644 --- a/vendor/github.com/chenzhuoyu/base64x/native_subr_amd64.go +++ b/vendor/github.com/chenzhuoyu/base64x/native_subr_amd64.go @@ -1,4 +1,6 @@ +//go:build !noasm || !appengine // +build !noasm !appengine + // Code generated by asm2asm, DO NOT EDIT. package base64x @@ -9,21 +11,21 @@ package base64x func __native_entry__() uintptr var ( - _subr__b64decode = __native_entry__() + 1563 - _subr__b64encode = __native_entry__() + 301 + _subr__b64decode = __native_entry__() + 1563 + _subr__b64encode = __native_entry__() + 301 ) const ( - _stack__b64decode = 128 - _stack__b64encode = 40 + _stack__b64decode = 128 + _stack__b64encode = 40 ) var ( - _ = _subr__b64decode - _ = _subr__b64encode + _ = _subr__b64decode + _ = _subr__b64encode ) const ( - _ = _stack__b64decode - _ = _stack__b64encode + _ = _stack__b64decode + _ = _stack__b64encode ) diff --git a/vendor/github.com/creack/pty/ioctl.go b/vendor/github.com/creack/pty/ioctl.go index 067643795..45ec757a0 100644 --- a/vendor/github.com/creack/pty/ioctl.go +++ b/vendor/github.com/creack/pty/ioctl.go @@ -1,5 +1,5 @@ //go:build !windows && !solaris -//+build !windows,!solaris +// +build !windows,!solaris package pty diff --git a/vendor/github.com/creack/pty/ioctl_bsd.go b/vendor/github.com/creack/pty/ioctl_bsd.go index ab53e2db0..db3bf845b 100644 --- a/vendor/github.com/creack/pty/ioctl_bsd.go +++ b/vendor/github.com/creack/pty/ioctl_bsd.go @@ -1,5 +1,5 @@ -//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) -//+build darwin dragonfly freebsd netbsd openbsd +//go:build darwin || dragonfly || freebsd || netbsd || openbsd +// +build darwin dragonfly freebsd netbsd openbsd package pty diff --git a/vendor/github.com/creack/pty/ioctl_solaris.go b/vendor/github.com/creack/pty/ioctl_solaris.go index 8b6cc0ec0..bff22dad0 100644 --- a/vendor/github.com/creack/pty/ioctl_solaris.go +++ b/vendor/github.com/creack/pty/ioctl_solaris.go @@ -1,5 +1,5 @@ //go:build solaris -//+build solaris +// +build solaris package pty diff --git a/vendor/github.com/creack/pty/pty_darwin.go b/vendor/github.com/creack/pty/pty_darwin.go index cca0971f1..9bdd71d08 100644 --- a/vendor/github.com/creack/pty/pty_darwin.go +++ b/vendor/github.com/creack/pty/pty_darwin.go @@ -1,5 +1,5 @@ //go:build darwin -//+build darwin +// +build darwin package pty diff --git a/vendor/github.com/creack/pty/pty_dragonfly.go b/vendor/github.com/creack/pty/pty_dragonfly.go index 7a1fec3a9..aa916aadf 100644 --- a/vendor/github.com/creack/pty/pty_dragonfly.go +++ b/vendor/github.com/creack/pty/pty_dragonfly.go @@ -1,5 +1,5 @@ //go:build dragonfly -//+build dragonfly +// +build dragonfly package pty diff --git a/vendor/github.com/creack/pty/pty_freebsd.go b/vendor/github.com/creack/pty/pty_freebsd.go index a4cfd925c..bcd3b6f90 100644 --- a/vendor/github.com/creack/pty/pty_freebsd.go +++ b/vendor/github.com/creack/pty/pty_freebsd.go @@ -1,5 +1,5 @@ //go:build freebsd -//+build freebsd +// +build freebsd package pty diff --git a/vendor/github.com/creack/pty/pty_linux.go b/vendor/github.com/creack/pty/pty_linux.go index 22ccbe128..a3b368f56 100644 --- a/vendor/github.com/creack/pty/pty_linux.go +++ b/vendor/github.com/creack/pty/pty_linux.go @@ -1,5 +1,5 @@ //go:build linux -//+build linux +// +build linux package pty diff --git a/vendor/github.com/creack/pty/pty_netbsd.go b/vendor/github.com/creack/pty/pty_netbsd.go index 98c089c8c..2b20d944c 100644 --- a/vendor/github.com/creack/pty/pty_netbsd.go +++ b/vendor/github.com/creack/pty/pty_netbsd.go @@ -1,5 +1,5 @@ //go:build netbsd -//+build netbsd +// +build netbsd package pty diff --git a/vendor/github.com/creack/pty/pty_openbsd.go b/vendor/github.com/creack/pty/pty_openbsd.go index d72b9d8d8..031367a85 100644 --- a/vendor/github.com/creack/pty/pty_openbsd.go +++ b/vendor/github.com/creack/pty/pty_openbsd.go @@ -1,5 +1,5 @@ //go:build openbsd -//+build openbsd +// +build openbsd package pty diff --git a/vendor/github.com/creack/pty/pty_solaris.go b/vendor/github.com/creack/pty/pty_solaris.go index 17e47461f..37f933e60 100644 --- a/vendor/github.com/creack/pty/pty_solaris.go +++ b/vendor/github.com/creack/pty/pty_solaris.go @@ -1,5 +1,5 @@ //go:build solaris -//+build solaris +// +build solaris package pty diff --git a/vendor/github.com/creack/pty/pty_unsupported.go b/vendor/github.com/creack/pty/pty_unsupported.go index 765523abc..c771020fa 100644 --- a/vendor/github.com/creack/pty/pty_unsupported.go +++ b/vendor/github.com/creack/pty/pty_unsupported.go @@ -1,5 +1,5 @@ //go:build !linux && !darwin && !freebsd && !dragonfly && !netbsd && !openbsd && !solaris -//+build !linux,!darwin,!freebsd,!dragonfly,!netbsd,!openbsd,!solaris +// +build !linux,!darwin,!freebsd,!dragonfly,!netbsd,!openbsd,!solaris package pty diff --git a/vendor/github.com/creack/pty/run.go b/vendor/github.com/creack/pty/run.go index 160001f9d..3e2b6ec33 100644 --- a/vendor/github.com/creack/pty/run.go +++ b/vendor/github.com/creack/pty/run.go @@ -1,5 +1,5 @@ //go:build !windows -//+build !windows +// +build !windows package pty diff --git a/vendor/github.com/creack/pty/winsize_unix.go b/vendor/github.com/creack/pty/winsize_unix.go index f358e9081..5d99c3dd9 100644 --- a/vendor/github.com/creack/pty/winsize_unix.go +++ b/vendor/github.com/creack/pty/winsize_unix.go @@ -1,5 +1,5 @@ //go:build !windows -//+build !windows +// +build !windows package pty diff --git a/vendor/github.com/creack/pty/winsize_unsupported.go b/vendor/github.com/creack/pty/winsize_unsupported.go index c4bff44e7..f1f1e1b67 100644 --- a/vendor/github.com/creack/pty/winsize_unsupported.go +++ b/vendor/github.com/creack/pty/winsize_unsupported.go @@ -1,5 +1,5 @@ //go:build windows -//+build windows +// +build windows package pty diff --git a/vendor/github.com/creack/pty/ztypes_386.go b/vendor/github.com/creack/pty/ztypes_386.go index 794515b4c..d126f4aa5 100644 --- a/vendor/github.com/creack/pty/ztypes_386.go +++ b/vendor/github.com/creack/pty/ztypes_386.go @@ -1,5 +1,5 @@ //go:build 386 -//+build 386 +// +build 386 // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types.go diff --git a/vendor/github.com/creack/pty/ztypes_amd64.go b/vendor/github.com/creack/pty/ztypes_amd64.go index dc6c52528..6c4a7677f 100644 --- a/vendor/github.com/creack/pty/ztypes_amd64.go +++ b/vendor/github.com/creack/pty/ztypes_amd64.go @@ -1,5 +1,5 @@ //go:build amd64 -//+build amd64 +// +build amd64 // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types.go diff --git a/vendor/github.com/creack/pty/ztypes_arm.go b/vendor/github.com/creack/pty/ztypes_arm.go index eac9b1ef7..de6fe160e 100644 --- a/vendor/github.com/creack/pty/ztypes_arm.go +++ b/vendor/github.com/creack/pty/ztypes_arm.go @@ -1,5 +1,5 @@ //go:build arm -//+build arm +// +build arm // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types.go diff --git a/vendor/github.com/creack/pty/ztypes_arm64.go b/vendor/github.com/creack/pty/ztypes_arm64.go index ecb3ddcab..c4f315cac 100644 --- a/vendor/github.com/creack/pty/ztypes_arm64.go +++ b/vendor/github.com/creack/pty/ztypes_arm64.go @@ -1,5 +1,5 @@ //go:build arm64 -//+build arm64 +// +build arm64 // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types.go diff --git a/vendor/github.com/creack/pty/ztypes_dragonfly_amd64.go b/vendor/github.com/creack/pty/ztypes_dragonfly_amd64.go index f4054cb60..183c42147 100644 --- a/vendor/github.com/creack/pty/ztypes_dragonfly_amd64.go +++ b/vendor/github.com/creack/pty/ztypes_dragonfly_amd64.go @@ -1,5 +1,5 @@ //go:build amd64 && dragonfly -//+build amd64,dragonfly +// +build amd64,dragonfly // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_dragonfly.go diff --git a/vendor/github.com/creack/pty/ztypes_freebsd_386.go b/vendor/github.com/creack/pty/ztypes_freebsd_386.go index 95a20ab3a..d80dbf717 100644 --- a/vendor/github.com/creack/pty/ztypes_freebsd_386.go +++ b/vendor/github.com/creack/pty/ztypes_freebsd_386.go @@ -1,5 +1,5 @@ //go:build 386 && freebsd -//+build 386,freebsd +// +build 386,freebsd // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go diff --git a/vendor/github.com/creack/pty/ztypes_freebsd_amd64.go b/vendor/github.com/creack/pty/ztypes_freebsd_amd64.go index e03a071c0..bfab4e458 100644 --- a/vendor/github.com/creack/pty/ztypes_freebsd_amd64.go +++ b/vendor/github.com/creack/pty/ztypes_freebsd_amd64.go @@ -1,5 +1,5 @@ //go:build amd64 && freebsd -//+build amd64,freebsd +// +build amd64,freebsd // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go diff --git a/vendor/github.com/creack/pty/ztypes_freebsd_arm.go b/vendor/github.com/creack/pty/ztypes_freebsd_arm.go index 7665bd3ca..3a8aeae37 100644 --- a/vendor/github.com/creack/pty/ztypes_freebsd_arm.go +++ b/vendor/github.com/creack/pty/ztypes_freebsd_arm.go @@ -1,5 +1,5 @@ //go:build arm && freebsd -//+build arm,freebsd +// +build arm,freebsd // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go diff --git a/vendor/github.com/creack/pty/ztypes_freebsd_arm64.go b/vendor/github.com/creack/pty/ztypes_freebsd_arm64.go index 3f95bb8be..a83924918 100644 --- a/vendor/github.com/creack/pty/ztypes_freebsd_arm64.go +++ b/vendor/github.com/creack/pty/ztypes_freebsd_arm64.go @@ -1,5 +1,5 @@ //go:build arm64 && freebsd -//+build arm64,freebsd +// +build arm64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs types_freebsd.go diff --git a/vendor/github.com/creack/pty/ztypes_loongarchx.go b/vendor/github.com/creack/pty/ztypes_loongarchx.go index 674d2a408..e5142bf32 100644 --- a/vendor/github.com/creack/pty/ztypes_loongarchx.go +++ b/vendor/github.com/creack/pty/ztypes_loongarchx.go @@ -1,6 +1,6 @@ //go:build (loongarch32 || loongarch64) && linux -//+build linux -//+build loongarch32 loongarch64 +// +build loongarch32 loongarch64 +// +build linux // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types.go diff --git a/vendor/github.com/creack/pty/ztypes_mipsx.go b/vendor/github.com/creack/pty/ztypes_mipsx.go index eddad1639..281277977 100644 --- a/vendor/github.com/creack/pty/ztypes_mipsx.go +++ b/vendor/github.com/creack/pty/ztypes_mipsx.go @@ -1,6 +1,6 @@ //go:build (mips || mipsle || mips64 || mips64le) && linux -//+build linux -//+build mips mipsle mips64 mips64le +// +build mips mipsle mips64 mips64le +// +build linux // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types.go diff --git a/vendor/github.com/creack/pty/ztypes_netbsd_32bit_int.go b/vendor/github.com/creack/pty/ztypes_netbsd_32bit_int.go index 5b32e63eb..2ab7c4559 100644 --- a/vendor/github.com/creack/pty/ztypes_netbsd_32bit_int.go +++ b/vendor/github.com/creack/pty/ztypes_netbsd_32bit_int.go @@ -1,6 +1,6 @@ //go:build (386 || amd64 || arm || arm64) && netbsd -//+build netbsd -//+build 386 amd64 arm arm64 +// +build 386 amd64 arm arm64 +// +build netbsd package pty diff --git a/vendor/github.com/creack/pty/ztypes_openbsd_32bit_int.go b/vendor/github.com/creack/pty/ztypes_openbsd_32bit_int.go index c9aa3161b..1eb094816 100644 --- a/vendor/github.com/creack/pty/ztypes_openbsd_32bit_int.go +++ b/vendor/github.com/creack/pty/ztypes_openbsd_32bit_int.go @@ -1,6 +1,6 @@ //go:build (386 || amd64 || arm || arm64 || mips64) && openbsd -//+build openbsd -//+build 386 amd64 arm arm64 mips64 +// +build 386 amd64 arm arm64 mips64 +// +build openbsd package pty diff --git a/vendor/github.com/creack/pty/ztypes_ppc64.go b/vendor/github.com/creack/pty/ztypes_ppc64.go index 68634439b..bbb3da832 100644 --- a/vendor/github.com/creack/pty/ztypes_ppc64.go +++ b/vendor/github.com/creack/pty/ztypes_ppc64.go @@ -1,5 +1,5 @@ //go:build ppc64 -//+build ppc64 +// +build ppc64 // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types.go diff --git a/vendor/github.com/creack/pty/ztypes_ppc64le.go b/vendor/github.com/creack/pty/ztypes_ppc64le.go index 6b5621b17..8a4fac3e9 100644 --- a/vendor/github.com/creack/pty/ztypes_ppc64le.go +++ b/vendor/github.com/creack/pty/ztypes_ppc64le.go @@ -1,5 +1,5 @@ //go:build ppc64le -//+build ppc64le +// +build ppc64le // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types.go diff --git a/vendor/github.com/creack/pty/ztypes_riscvx.go b/vendor/github.com/creack/pty/ztypes_riscvx.go index 1233e75bc..dc5da9050 100644 --- a/vendor/github.com/creack/pty/ztypes_riscvx.go +++ b/vendor/github.com/creack/pty/ztypes_riscvx.go @@ -1,5 +1,5 @@ //go:build riscv || riscv64 -//+build riscv riscv64 +// +build riscv riscv64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs types.go diff --git a/vendor/github.com/creack/pty/ztypes_s390x.go b/vendor/github.com/creack/pty/ztypes_s390x.go index 02facea63..3433be7ca 100644 --- a/vendor/github.com/creack/pty/ztypes_s390x.go +++ b/vendor/github.com/creack/pty/ztypes_s390x.go @@ -1,5 +1,5 @@ //go:build s390x -//+build s390x +// +build s390x // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types.go diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 792994785..70ddeaad3 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -18,6 +18,7 @@ // tag is deprecated and thus should not be used. // Go versions prior to 1.4 are disabled because they use a different layout // for interfaces which make the implementation of unsafeReflectValue more complex. +//go:build !js && !appengine && !safe && !disableunsafe && go1.4 // +build !js,!appengine,!safe,!disableunsafe,go1.4 package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 205c28d68..5e2d890d6 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -16,6 +16,7 @@ // when the code is running on Google App Engine, compiled by GopherJS, or // "-tags safe" is added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. +//go:build js || appengine || safe || disableunsafe || !go1.4 // +build js appengine safe disableunsafe !go1.4 package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go index 2e3d22f31..161895fc6 100644 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -254,15 +254,15 @@ pointer addresses used to indirect to the final value. It provides the following features over the built-in printing facilities provided by the fmt package: - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output + - Pointers are dereferenced and followed + - Circular data structures are detected and handled properly + - Custom Stringer/error interfaces are optionally invoked, including + on unexported types + - Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + - Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output The configuration options are controlled by modifying the public members of c. See ConfigState for options documentation. @@ -295,12 +295,12 @@ func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) // NewDefaultConfig returns a ConfigState with the following default settings. // -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false func NewDefaultConfig() *ConfigState { return &ConfigState{Indent: " "} } diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go index aacaac6f1..722e9aa79 100644 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -21,35 +21,36 @@ debugging. A quick overview of the additional features spew provides over the built-in printing facilities for Go data types are as follows: - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) + - Pointers are dereferenced and followed + - Circular data structures are detected and handled properly + - Custom Stringer/error interfaces are optionally invoked, including + on unexported types + - Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + - Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) There are two different approaches spew allows for dumping Go data structures: - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt + - Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + - A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt -Quick Start +# Quick Start This section demonstrates how to quickly get started with spew. See the sections below for further details on formatting and configuration options. To dump a variable with full newlines, indentation, type, and pointer information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) spew.Fdump(someWriter, myVar1, myVar2, ...) str := spew.Sdump(myVar1, myVar2, ...) @@ -58,12 +59,13 @@ Alternatively, if you would prefer to use format strings with a compacted inline printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) -Configuration Options +# Configuration Options Configuration of spew is handled by fields in the ConfigState type. For convenience, all of the top-level functions use a global state available @@ -74,51 +76,52 @@ equivalent to the top-level functions. This allows concurrent configuration options. See the ConfigState documentation for more details. The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage + + - Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + - MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + - DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + - DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + - DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + - DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + - ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + - SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + - SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +# Dump Usage Simply call spew.Dump with a list of variables you want to dump: @@ -133,7 +136,7 @@ A third option is to call spew.Sdump to get the formatted output as a string: str := spew.Sdump(myVar1, myVar2, ...) -Sample Dump Output +# Sample Dump Output See the Dump example for details on the setup of the types and variables being shown here. @@ -150,13 +153,14 @@ shown here. Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C command as shown. + ([]uint8) (len=32 cap=32) { 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| 00000020 31 32 |12| } -Custom Formatter +# Custom Formatter Spew provides a custom formatter that implements the fmt.Formatter interface so that it integrates cleanly with standard fmt package printing functions. The @@ -170,7 +174,7 @@ standard fmt package for formatting. In addition, the custom formatter ignores the width and precision arguments (however they will still work on the format specifiers not handled by the custom formatter). -Custom Formatter Usage +# Custom Formatter Usage The simplest way to make use of the spew custom formatter is to call one of the convenience functions such as spew.Printf, spew.Println, or spew.Printf. The @@ -184,15 +188,17 @@ functions have syntax you are most likely already familiar with: See the Index for the full list convenience functions. -Sample Formatter Output +# Sample Formatter Output Double pointer to a uint8: + %v: <**>5 %+v: <**>(0xf8400420d0->0xf8400420c8)5 %#v: (**uint8)5 %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*><shown>} %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>} %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>} @@ -201,7 +207,7 @@ Pointer to circular struct with a uint8 field and a pointer to itself: See the Printf example for details on the setup of variables being shown here. -Errors +# Errors Since it is possible for custom Stringer/error interfaces to panic, spew detects them and handles them internally by printing the panic information diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index f78d89fc1..8323041a4 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -488,15 +488,15 @@ pointer addresses used to indirect to the final value. It provides the following features over the built-in printing facilities provided by the fmt package: - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output + - Pointers are dereferenced and followed + - Circular data structures are detected and handled properly + - Custom Stringer/error interfaces are optionally invoked, including + on unexported types + - Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + - Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output The configuration options are controlled by an exported package global, spew.Config. See ConfigState for options documentation. diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go index 04541de78..936fe9da4 100644 --- a/vendor/github.com/fatih/color/doc.go +++ b/vendor/github.com/fatih/color/doc.go @@ -5,106 +5,105 @@ that suits you. Use simple and default helper functions with predefined foreground colors: - color.Cyan("Prints text in cyan.") + color.Cyan("Prints text in cyan.") - // a newline will be appended automatically - color.Blue("Prints %s in blue.", "text") + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") - // More default foreground colors.. - color.Red("We have red") - color.Yellow("Yellow color too!") - color.Magenta("And many others ..") + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") - // Hi-intensity colors - color.HiGreen("Bright green color.") - color.HiBlack("Bright black means gray..") - color.HiWhite("Shiny white color!") + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") However there are times where custom color mixes are required. Below are some examples to create custom color objects and use the print functions of each separate color object. - // Create a new color object - c := color.New(color.FgCyan).Add(color.Underline) - c.Println("Prints cyan text with an underline.") + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") - // Or just add them to New() - d := color.New(color.FgCyan, color.Bold) - d.Printf("This prints bold cyan %s\n", "too!.") + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") - // Mix up foreground and background colors, create new mixes! - red := color.New(color.FgRed) + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) - boldRed := red.Add(color.Bold) - boldRed.Println("This will print text in bold red.") + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") - whiteBackground := red.Add(color.BgWhite) - whiteBackground.Println("Red text with White background.") + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") - // Use your own io.Writer output - color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - blue := color.New(color.FgBlue) - blue.Fprint(myWriter, "This will print text in blue.") + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") You can create PrintXxx functions to simplify even more: - // Create a custom print function for convenient - red := color.New(color.FgRed).PrintfFunc() - red("warning") - red("error: %s", err) + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) - // Mix up multiple attributes - notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() - notice("don't forget this...") + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") You can also FprintXxx functions to pass your own io.Writer: - blue := color.New(FgBlue).FprintfFunc() - blue(myWriter, "important notice: %s", stars) - - // Mix up with multiple attributes - success := color.New(color.Bold, color.FgGreen).FprintlnFunc() - success(myWriter, don't forget this...") + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") Or create SprintXxx functions to mix strings with other non-colorized strings: - yellow := New(FgYellow).SprintFunc() - red := New(FgRed).SprintFunc() + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() - fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Printf("this %s rocks!\n", info("package")) + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) Windows support is enabled by default. All Print functions work as intended. However only for color.SprintXXX functions, user should use fmt.FprintXXX and set the output to color.Output: - fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) Using with existing code is possible. Just use the Set() method to set the standard output to the given parameters. That way a rewrite of an existing code is not required. - // Use handy standard colors. - color.Set(color.FgYellow) + // Use handy standard colors. + color.Set(color.FgYellow) - fmt.Println("Existing text will be now in Yellow") - fmt.Printf("This one %s\n", "too") + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") - color.Unset() // don't forget to unset + color.Unset() // don't forget to unset - // You can mix up parameters - color.Set(color.FgMagenta, color.Bold) - defer color.Unset() // use it in your function + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function - fmt.Println("All text will be now bold magenta.") + fmt.Println("All text will be now bold magenta.") There might be a case where you want to disable color output (for example to pipe the standard output of your app to somewhere else). `Color` has support to @@ -112,24 +111,24 @@ disable colors both globally and for single color definition. For example suppose you have a CLI app and a `--no-color` bool flag. You can easily disable the color output with: - var flagNoColor = flag.Bool("no-color", false, "Disable color output") + var flagNoColor = flag.Bool("no-color", false, "Disable color output") - if *flagNoColor { - color.NoColor = true // disables colorized output - } + if *flagNoColor { + color.NoColor = true // disables colorized output + } You can also disable the color by setting the NO_COLOR environment variable to any value. It also has support for single color definitions (local). You can disable/enable color output on the fly: - c := color.New(color.FgCyan) - c.Println("Prints cyan text") + c := color.New(color.FgCyan) + c.Println("Prints cyan text") - c.DisableColor() - c.Println("This is printed without any color") + c.DisableColor() + c.Println("This is printed without any color") - c.EnableColor() - c.Println("This prints again cyan...") + c.EnableColor() + c.Println("This prints again cyan...") */ package color diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go index 29bdded3e..9ac9c0dbb 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go @@ -153,7 +153,9 @@ func Marc(raw []byte, limit uint32) bool { // GLB is the binary file format representation of 3D models save in // the GL transmission Format (glTF). // see more: https://docs.fileformat.com/3d/glb/ -// https://www.iana.org/assignments/media-types/model/gltf-binary +// +// https://www.iana.org/assignments/media-types/model/gltf-binary +// // GLB file format is based on little endian and its header structure // show below: // diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go index 466058fbe..34b84f401 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go @@ -177,7 +177,9 @@ func newXMLSig(localName, xmlns string) xmlSig { // and, optionally, followed by the arguments for the interpreter. // // Ex: -// #! /usr/bin/env php +// +// #! /usr/bin/env php +// // /usr/bin/env is the interpreter, php is the first and only argument. func shebang(sigs ...[]byte) Detector { return func(raw []byte, limit uint32) bool { diff --git a/vendor/github.com/gabriel-vasile/mimetype/mimetype.go b/vendor/github.com/gabriel-vasile/mimetype/mimetype.go index 08e68e334..1b5909b75 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/mimetype.go +++ b/vendor/github.com/gabriel-vasile/mimetype/mimetype.go @@ -39,7 +39,8 @@ func Detect(in []byte) *MIME { // // DetectReader assumes the reader offset is at the start. If the input is an // io.ReadSeeker you previously read from, it should be rewinded before detection: -// reader.Seek(0, io.SeekStart) +// +// reader.Seek(0, io.SeekStart) func DetectReader(r io.Reader) (*MIME, error) { var in []byte var err error diff --git a/vendor/github.com/go-chi/chi/v5/chi.go b/vendor/github.com/go-chi/chi/v5/chi.go index d2e5354dc..86d0ca7bb 100644 --- a/vendor/github.com/go-chi/chi/v5/chi.go +++ b/vendor/github.com/go-chi/chi/v5/chi.go @@ -1,29 +1,29 @@ -// // Package chi is a small, idiomatic and composable router for building HTTP services. // // chi requires Go 1.10 or newer. // // Example: -// package main // -// import ( -// "net/http" +// package main +// +// import ( +// "net/http" // -// "github.com/go-chi/chi/v5" -// "github.com/go-chi/chi/v5/middleware" -// ) +// "github.com/go-chi/chi/v5" +// "github.com/go-chi/chi/v5/middleware" +// ) // -// func main() { -// r := chi.NewRouter() -// r.Use(middleware.Logger) -// r.Use(middleware.Recoverer) +// func main() { +// r := chi.NewRouter() +// r.Use(middleware.Logger) +// r.Use(middleware.Recoverer) // -// r.Get("/", func(w http.ResponseWriter, r *http.Request) { -// w.Write([]byte("root.")) -// }) +// r.Get("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("root.")) +// }) // -// http.ListenAndServe(":3333", r) -// } +// http.ListenAndServe(":3333", r) +// } // // See github.com/go-chi/chi/_examples/ for more in-depth examples. // @@ -47,12 +47,12 @@ // placeholder which will match / characters. // // Examples: -// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/" -// "/user/{name}/info" matches "/user/jsmith/info" -// "/page/*" matches "/page/intro/latest" -// "/page/*/index" also matches "/page/intro/latest" -// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01" // +// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/" +// "/user/{name}/info" matches "/user/jsmith/info" +// "/page/*" matches "/page/intro/latest" +// "/page/*/index" also matches "/page/intro/latest" +// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01" package chi import "net/http" diff --git a/vendor/github.com/go-chi/chi/v5/context.go b/vendor/github.com/go-chi/chi/v5/context.go index e78a2385d..26960d731 100644 --- a/vendor/github.com/go-chi/chi/v5/context.go +++ b/vendor/github.com/go-chi/chi/v5/context.go @@ -112,13 +112,13 @@ func (x *Context) URLParam(key string) string { // // For example, // -// func Instrument(next http.Handler) http.Handler { -// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -// next.ServeHTTP(w, r) -// routePattern := chi.RouteContext(r.Context()).RoutePattern() -// measure(w, r, routePattern) -// }) -// } +// func Instrument(next http.Handler) http.Handler { +// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// next.ServeHTTP(w, r) +// routePattern := chi.RouteContext(r.Context()).RoutePattern() +// measure(w, r, routePattern) +// }) +// } func (x *Context) RoutePattern() string { routePattern := strings.Join(x.RoutePatterns, "") routePattern = replaceWildcards(routePattern) diff --git a/vendor/github.com/go-chi/chi/v5/middleware/compress.go b/vendor/github.com/go-chi/chi/v5/middleware/compress.go index 52f4e0b53..69bd2192e 100644 --- a/vendor/github.com/go-chi/chi/v5/middleware/compress.go +++ b/vendor/github.com/go-chi/chi/v5/middleware/compress.go @@ -137,14 +137,14 @@ func NewCompressor(level int, types ...string) *Compressor { // // For example, add the Brotli algortithm: // -// import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc" +// import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc" // -// compressor := middleware.NewCompressor(5, "text/html") -// compressor.SetEncoder("br", func(w http.ResponseWriter, level int) io.Writer { -// params := brotli_enc.NewBrotliParams() -// params.SetQuality(level) -// return brotli_enc.NewBrotliWriter(params, w) -// }) +// compressor := middleware.NewCompressor(5, "text/html") +// compressor.SetEncoder("br", func(w http.ResponseWriter, level int) io.Writer { +// params := brotli_enc.NewBrotliParams() +// params.SetQuality(level) +// return brotli_enc.NewBrotliWriter(params, w) +// }) func (c *Compressor) SetEncoder(encoding string, fn EncoderFunc) { encoding = strings.ToLower(encoding) if encoding == "" { diff --git a/vendor/github.com/go-chi/chi/v5/middleware/nocache.go b/vendor/github.com/go-chi/chi/v5/middleware/nocache.go index 2412829e1..dcfb1f764 100644 --- a/vendor/github.com/go-chi/chi/v5/middleware/nocache.go +++ b/vendor/github.com/go-chi/chi/v5/middleware/nocache.go @@ -32,10 +32,11 @@ var etagHeaders = []string{ // a router (or subrouter) from being cached by an upstream proxy and/or client. // // As per http://wiki.nginx.org/HttpProxyModule - NoCache sets: -// Expires: Thu, 01 Jan 1970 00:00:00 UTC -// Cache-Control: no-cache, private, max-age=0 -// X-Accel-Expires: 0 -// Pragma: no-cache (for HTTP/1.0 proxies/clients) +// +// Expires: Thu, 01 Jan 1970 00:00:00 UTC +// Cache-Control: no-cache, private, max-age=0 +// X-Accel-Expires: 0 +// Pragma: no-cache (for HTTP/1.0 proxies/clients) func NoCache(h http.Handler) http.Handler { fn := func(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/github.com/go-chi/chi/v5/middleware/profiler.go b/vendor/github.com/go-chi/chi/v5/middleware/profiler.go index 3c36f878f..c1d8260f0 100644 --- a/vendor/github.com/go-chi/chi/v5/middleware/profiler.go +++ b/vendor/github.com/go-chi/chi/v5/middleware/profiler.go @@ -11,13 +11,13 @@ import ( // Profiler is a convenient subrouter used for mounting net/http/pprof. ie. // -// func MyService() http.Handler { -// r := chi.NewRouter() -// // ..middlewares -// r.Mount("/debug", middleware.Profiler()) -// // ..routes -// return r -// } +// func MyService() http.Handler { +// r := chi.NewRouter() +// // ..middlewares +// r.Mount("/debug", middleware.Profiler()) +// // ..routes +// return r +// } func Profiler() http.Handler { r := chi.NewRouter() r.Use(NoCache) diff --git a/vendor/github.com/go-chi/chi/v5/middleware/route_headers.go b/vendor/github.com/go-chi/chi/v5/middleware/route_headers.go index ea914a1d3..fc8da149d 100644 --- a/vendor/github.com/go-chi/chi/v5/middleware/route_headers.go +++ b/vendor/github.com/go-chi/chi/v5/middleware/route_headers.go @@ -15,14 +15,14 @@ import ( // rSubdomain := chi.NewRouter() // // r.Use(middleware.RouteHeaders(). -// Route("Host", "example.com", middleware.New(r)). -// Route("Host", "*.example.com", middleware.New(rSubdomain)). -// Handler) +// +// Route("Host", "example.com", middleware.New(r)). +// Route("Host", "*.example.com", middleware.New(rSubdomain)). +// Handler) // // r.Get("/", h) // rSubdomain.Get("/", h2) // -// // Another example, imagine you want to setup multiple CORS handlers, where for // your origin servers you allow authorized requests, but for third-party public // requests, authorization is disabled. @@ -30,20 +30,20 @@ import ( // r := chi.NewRouter() // // r.Use(middleware.RouteHeaders(). -// Route("Origin", "https://app.skyweaver.net", cors.Handler(cors.Options{ -// AllowedOrigins: []string{"https://api.skyweaver.net"}, -// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, -// AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"}, -// AllowCredentials: true, // <----------<<< allow credentials -// })). -// Route("Origin", "*", cors.Handler(cors.Options{ -// AllowedOrigins: []string{"*"}, -// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, -// AllowedHeaders: []string{"Accept", "Content-Type"}, -// AllowCredentials: false, // <----------<<< do not allow credentials -// })). -// Handler) // +// Route("Origin", "https://app.skyweaver.net", cors.Handler(cors.Options{ +// AllowedOrigins: []string{"https://api.skyweaver.net"}, +// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, +// AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"}, +// AllowCredentials: true, // <----------<<< allow credentials +// })). +// Route("Origin", "*", cors.Handler(cors.Options{ +// AllowedOrigins: []string{"*"}, +// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, +// AllowedHeaders: []string{"Accept", "Content-Type"}, +// AllowCredentials: false, // <----------<<< do not allow credentials +// })). +// Handler) func RouteHeaders() HeaderRouter { return HeaderRouter{} } diff --git a/vendor/github.com/go-chi/chi/v5/middleware/timeout.go b/vendor/github.com/go-chi/chi/v5/middleware/timeout.go index 8e373536c..26dc54916 100644 --- a/vendor/github.com/go-chi/chi/v5/middleware/timeout.go +++ b/vendor/github.com/go-chi/chi/v5/middleware/timeout.go @@ -15,21 +15,20 @@ import ( // // ie. a route/handler may look like: // -// r.Get("/long", func(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// processTime := time.Duration(rand.Intn(4)+1) * time.Second +// r.Get("/long", func(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// processTime := time.Duration(rand.Intn(4)+1) * time.Second // -// select { -// case <-ctx.Done(): -// return +// select { +// case <-ctx.Done(): +// return // -// case <-time.After(processTime): -// // The above channel simulates some hard work. -// } -// -// w.Write([]byte("done")) -// }) +// case <-time.After(processTime): +// // The above channel simulates some hard work. +// } // +// w.Write([]byte("done")) +// }) func Timeout(timeout time.Duration) func(next http.Handler) http.Handler { return func(next http.Handler) http.Handler { fn := func(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/github.com/go-chi/chi/v5/middleware/url_format.go b/vendor/github.com/go-chi/chi/v5/middleware/url_format.go index 10d7134dc..5f92f17f4 100644 --- a/vendor/github.com/go-chi/chi/v5/middleware/url_format.go +++ b/vendor/github.com/go-chi/chi/v5/middleware/url_format.go @@ -22,28 +22,27 @@ var ( // // Sample usage.. for url paths: `/articles/1`, `/articles/1.json` and `/articles/1.xml` // -// func routes() http.Handler { -// r := chi.NewRouter() -// r.Use(middleware.URLFormat) +// func routes() http.Handler { +// r := chi.NewRouter() +// r.Use(middleware.URLFormat) // -// r.Get("/articles/{id}", ListArticles) +// r.Get("/articles/{id}", ListArticles) // -// return r -// } +// return r +// } // -// func ListArticles(w http.ResponseWriter, r *http.Request) { -// urlFormat, _ := r.Context().Value(middleware.URLFormatCtxKey).(string) -// -// switch urlFormat { -// case "json": -// render.JSON(w, r, articles) -// case "xml:" -// render.XML(w, r, articles) -// default: -// render.JSON(w, r, articles) -// } -// } +// func ListArticles(w http.ResponseWriter, r *http.Request) { +// urlFormat, _ := r.Context().Value(middleware.URLFormatCtxKey).(string) // +// switch urlFormat { +// case "json": +// render.JSON(w, r, articles) +// case "xml:" +// render.XML(w, r, articles) +// default: +// render.JSON(w, r, articles) +// } +// } func URLFormat(next http.Handler) http.Handler { fn := func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go index bbfe089df..512d0ee5e 100644 --- a/vendor/github.com/go-redis/redis/v8/commands.go +++ b/vendor/github.com/go-redis/redis/v8/commands.go @@ -13,7 +13,7 @@ import ( // otherwise you will receive an error: (error) ERR syntax error. // For example: // -// rdb.Set(ctx, key, value, redis.KeepTTL) +// rdb.Set(ctx, key, value, redis.KeepTTL) const KeepTTL = -1 func usePrecise(dur time.Duration) bool { @@ -2049,8 +2049,10 @@ func xClaimArgs(a *XClaimArgs) []interface{} { // xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default). // example: -// XTRIM key MAXLEN/MINID threshold LIMIT limit. -// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit. +// +// XTRIM key MAXLEN/MINID threshold LIMIT limit. +// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit. +// // The redis-server version is lower than 6.2, please set limit to 0. func (c cmdable) xTrim( ctx context.Context, key, strategy string, @@ -2298,6 +2300,7 @@ func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd // ZAddCh Redis `ZADD key CH score member [score member ...]` command. // Deprecated: Use +// // client.ZAddArgs(ctx, ZAddArgs{ // Ch: true, // Members: []Z, @@ -2311,6 +2314,7 @@ func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd // ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command. // Deprecated: Use +// // client.ZAddArgs(ctx, ZAddArgs{ // NX: true, // Ch: true, @@ -2326,6 +2330,7 @@ func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCm // ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command. // Deprecated: Use +// // client.ZAddArgs(ctx, ZAddArgs{ // XX: true, // Ch: true, @@ -2341,6 +2346,7 @@ func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCm // ZIncr Redis `ZADD key INCR score member` command. // Deprecated: Use +// // client.ZAddArgsIncr(ctx, ZAddArgs{ // Members: []Z, // }) @@ -2353,6 +2359,7 @@ func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd { // ZIncrNX Redis `ZADD key NX INCR score member` command. // Deprecated: Use +// // client.ZAddArgsIncr(ctx, ZAddArgs{ // NX: true, // Members: []Z, @@ -2367,6 +2374,7 @@ func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd { // ZIncrXX Redis `ZADD key XX INCR score member` command. // Deprecated: Use +// // client.ZAddArgsIncr(ctx, ZAddArgs{ // XX: true, // Members: []Z, @@ -2488,11 +2496,13 @@ func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSlic // ZRangeArgs is all the options of the ZRange command. // In version> 6.2.0, you can replace the(cmd): -// ZREVRANGE, -// ZRANGEBYSCORE, -// ZREVRANGEBYSCORE, -// ZRANGEBYLEX, -// ZREVRANGEBYLEX. +// +// ZREVRANGE, +// ZRANGEBYSCORE, +// ZREVRANGEBYSCORE, +// ZRANGEBYLEX, +// ZREVRANGEBYLEX. +// // Please pay attention to your redis-server version. // // Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher. @@ -2897,7 +2907,7 @@ func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd { // ClientKillByFilter is new style syntax, while the ClientKill is old // -// CLIENT KILL <option> [value] ... <option> [value] +// CLIENT KILL <option> [value] ... <option> [value] func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd { args := make([]interface{}, 2+len(keys)) args[0] = "client" diff --git a/vendor/github.com/go-redis/redis/v8/internal/once.go b/vendor/github.com/go-redis/redis/v8/internal/once.go index 64f46272a..b81244fd6 100644 --- a/vendor/github.com/go-redis/redis/v8/internal/once.go +++ b/vendor/github.com/go-redis/redis/v8/internal/once.go @@ -32,7 +32,9 @@ type Once struct { // Do calls the function f if and only if Do has not been invoked // without error for this instance of Once. In other words, given -// var once Once +// +// var once Once +// // if once.Do(f) is called multiple times, only the first call will // invoke f, even if f has a different value in each invocation unless // f returns an error. A new instance of Once is required for each @@ -41,7 +43,8 @@ type Once struct { // Do is intended for initialization that must be run exactly once. Since f // is niladic, it may be necessary to use a function literal to capture the // arguments to a function to be invoked by Do: -// err := config.once.Do(func() error { return config.init(filename) }) +// +// err := config.once.Do(func() error { return config.init(filename) }) func (o *Once) Do(f func() error) error { if atomic.LoadUint32(&o.done) == 1 { return nil diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go index 0e994765f..03733c000 100644 --- a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go +++ b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go @@ -10,6 +10,7 @@ import ( ) // Scan parses bytes `b` to `v` with appropriate type. +// //nolint:gocyclo func Scan(b []byte, v interface{}) error { switch v := v.(type) { diff --git a/vendor/github.com/go-redis/redis/v8/options.go b/vendor/github.com/go-redis/redis/v8/options.go index a4abe32c3..22b7e3b9f 100644 --- a/vendor/github.com/go-redis/redis/v8/options.go +++ b/vendor/github.com/go-redis/redis/v8/options.go @@ -193,32 +193,38 @@ func (opt *Options) clone() *Options { // Scheme is required. // There are two connection types: by tcp socket and by unix socket. // Tcp connection: -// redis://<user>:<password>@<host>:<port>/<db_number> +// +// redis://<user>:<password>@<host>:<port>/<db_number> +// // Unix connection: -// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number> +// +// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number> +// // Most Option fields can be set using query parameters, with the following restrictions: -// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries -// - only scalar type fields are supported (bool, int, time.Duration) -// - for time.Duration fields, values must be a valid input for time.ParseDuration(); -// additionally a plain integer as value (i.e. without unit) is intepreted as seconds -// - to disable a duration field, use value less than or equal to 0; to use the default -// value, leave the value blank or remove the parameter -// - only the last value is interpreted if a parameter is given multiple times -// - fields "network", "addr", "username" and "password" can only be set using other -// URL attributes (scheme, host, userinfo, resp.), query paremeters using these -// names will be treated as unknown parameters -// - unknown parameter names will result in an error +// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries +// - only scalar type fields are supported (bool, int, time.Duration) +// - for time.Duration fields, values must be a valid input for time.ParseDuration(); +// additionally a plain integer as value (i.e. without unit) is intepreted as seconds +// - to disable a duration field, use value less than or equal to 0; to use the default +// value, leave the value blank or remove the parameter +// - only the last value is interpreted if a parameter is given multiple times +// - fields "network", "addr", "username" and "password" can only be set using other +// URL attributes (scheme, host, userinfo, resp.), query paremeters using these +// names will be treated as unknown parameters +// - unknown parameter names will result in an error +// // Examples: -// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2 -// is equivalent to: -// &Options{ -// Network: "tcp", -// Addr: "localhost:6789", -// DB: 1, // path "/3" was overridden by "&db=1" -// DialTimeout: 3 * time.Second, // no time unit = seconds -// ReadTimeout: 6 * time.Second, -// MaxRetries: 2, -// } +// +// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2 +// is equivalent to: +// &Options{ +// Network: "tcp", +// Addr: "localhost:6789", +// DB: 1, // path "/3" was overridden by "&db=1" +// DialTimeout: 3 * time.Second, // no time unit = seconds +// ReadTimeout: 6 * time.Second, +// MaxRetries: 2, +// } func ParseURL(redisURL string) (*Options, error) { u, err := url.Parse(redisURL) if err != nil { diff --git a/vendor/github.com/go-redis/redis/v8/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go index bcf8a2a94..f6b2893be 100644 --- a/vendor/github.com/go-redis/redis/v8/redis.go +++ b/vendor/github.com/go-redis/redis/v8/redis.go @@ -663,26 +663,26 @@ func (c *Client) pubSub() *PubSub { // subscription may not be active immediately. To force the connection to wait, // you may call the Receive() method on the returned *PubSub like so: // -// sub := client.Subscribe(queryResp) -// iface, err := sub.Receive() -// if err != nil { -// // handle error -// } +// sub := client.Subscribe(queryResp) +// iface, err := sub.Receive() +// if err != nil { +// // handle error +// } // -// // Should be *Subscription, but others are possible if other actions have been -// // taken on sub since it was created. -// switch iface.(type) { -// case *Subscription: -// // subscribe succeeded -// case *Message: -// // received first message -// case *Pong: -// // pong received -// default: -// // handle error -// } +// // Should be *Subscription, but others are possible if other actions have been +// // taken on sub since it was created. +// switch iface.(type) { +// case *Subscription: +// // subscribe succeeded +// case *Message: +// // received first message +// case *Pong: +// // pong received +// default: +// // handle error +// } // -// ch := sub.Channel() +// ch := sub.Channel() func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub { pubsub := c.pubSub() if len(channels) > 0 { diff --git a/vendor/github.com/goccy/go-json/json.go b/vendor/github.com/goccy/go-json/json.go index 413cb20bf..fb18065a2 100644 --- a/vendor/github.com/goccy/go-json/json.go +++ b/vendor/github.com/goccy/go-json/json.go @@ -89,31 +89,31 @@ type UnmarshalerContext interface { // // Examples of struct field tags and their meanings: // -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` // -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` // -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` // -// // Field is ignored by this package. -// Field int `json:"-"` +// // Field is ignored by this package. +// Field int `json:"-"` // -// // Field appears in JSON as key "-". -// Field int `json:"-,"` +// // Field appears in JSON as key "-". +// Field int `json:"-,"` // // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used // when communicating with JavaScript programs: // -// Int64String int64 `json:",string"` +// Int64String int64 `json:",string"` // // The key name will be used if it's a non-empty string consisting of // only Unicode letters, digits, and ASCII punctuation except quotation @@ -166,7 +166,6 @@ type UnmarshalerContext interface { // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in // an infinite recursion. -// func Marshal(v interface{}) ([]byte, error) { return MarshalWithOption(v) } @@ -264,14 +263,13 @@ func MarshalIndentWithOption(v interface{}, prefix, indent string, optFuncs ...E // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// “not present,” unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. -// func Unmarshal(data []byte, v interface{}) error { return unmarshal(data, v) } @@ -299,7 +297,6 @@ func UnmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) // Number, for JSON numbers // string, for JSON string literals // nil, for JSON null -// type Token = json.Token // A Number represents a JSON number literal. diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go index 9d2d8a4ba..06a91f086 100644 --- a/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package mousetrap diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go index 336142a5e..2d2adac9e 100644 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go @@ -1,5 +1,5 @@ -// +build windows -// +build !go1.4 +//go:build windows && !go1.4 +// +build windows,!go1.4 package mousetrap diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go index 9a28e57c3..c78a98fdc 100644 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go @@ -1,5 +1,5 @@ -// +build windows -// +build go1.4 +//go:build windows && go1.4 +// +build windows,go1.4 package mousetrap diff --git a/vendor/github.com/ivanpirog/coloredcobra/coloredcobra.go b/vendor/github.com/ivanpirog/coloredcobra/coloredcobra.go index f06625ee3..a673dd861 100644 --- a/vendor/github.com/ivanpirog/coloredcobra/coloredcobra.go +++ b/vendor/github.com/ivanpirog/coloredcobra/coloredcobra.go @@ -6,27 +6,23 @@ // // 1. Insert in cmd/root.go file of your project : // -// import cc "github.com/ivanpirog/coloredcobra" -// +// import cc "github.com/ivanpirog/coloredcobra" // // 2. Put the following code to the beginning of the Execute() function: // -// cc.Init(&cc.Config{ -// RootCmd: rootCmd, -// Headings: cc.Bold + cc.Underline, -// Commands: cc.Yellow + cc.Bold, -// ExecName: cc.Bold, -// Flags: cc.Bold, -// }) -// +// cc.Init(&cc.Config{ +// RootCmd: rootCmd, +// Headings: cc.Bold + cc.Underline, +// Commands: cc.Yellow + cc.Bold, +// ExecName: cc.Bold, +// Flags: cc.Bold, +// }) // // 3. Build & execute your code. // -// // Copyright © 2022 Ivan Pirog <ivan.pirog@gmail.com>. // Released under the MIT license. // Project home: https://github.com/ivanpirog/coloredcobra -// package coloredcobra import ( @@ -43,16 +39,16 @@ import ( // // Example: // -// c := &cc.Config{ -// RootCmd: rootCmd, -// Headings: cc.HiWhite + cc.Bold + cc.Underline, -// Commands: cc.Yellow + cc.Bold, -// CmdShortDescr: cc.Cyan, -// ExecName: cc.Bold, -// Flags: cc.Bold, -// Aliases: cc.Bold, -// Example: cc.Italic, -// } +// c := &cc.Config{ +// RootCmd: rootCmd, +// Headings: cc.HiWhite + cc.Bold + cc.Underline, +// Commands: cc.Yellow + cc.Bold, +// CmdShortDescr: cc.Cyan, +// ExecName: cc.Bold, +// Flags: cc.Bold, +// Aliases: cc.Bold, +// Example: cc.Italic, +// } type Config struct { RootCmd *cobra.Command Headings uint8 diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go index f6b8aeab0..4b7e1cf5c 100644 --- a/vendor/github.com/json-iterator/go/any.go +++ b/vendor/github.com/json-iterator/go/any.go @@ -3,11 +3,12 @@ package jsoniter import ( "errors" "fmt" - "github.com/modern-go/reflect2" "io" "reflect" "strconv" "unsafe" + + "github.com/modern-go/reflect2" ) // Any generic object representation. diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go index 8a3d8b6fb..caf16feec 100644 --- a/vendor/github.com/json-iterator/go/iter_float.go +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -66,7 +66,7 @@ func (iter *Iterator) ReadBigInt() (ret *big.Int) { return ret } -//ReadFloat32 read float32 +// ReadFloat32 read float32 func (iter *Iterator) ReadFloat32() (ret float32) { c := iter.nextToken() if c == '-' { diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go index 9303de41e..3d993f277 100644 --- a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -1,4 +1,5 @@ -//+build jsoniter_sloppy +//go:build jsoniter_sloppy +// +build jsoniter_sloppy package jsoniter diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go index 6cf66d043..f1ad6591b 100644 --- a/vendor/github.com/json-iterator/go/iter_skip_strict.go +++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go @@ -1,4 +1,5 @@ -//+build !jsoniter_sloppy +//go:build !jsoniter_sloppy +// +build !jsoniter_sloppy package jsoniter diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go index 13a0b7b08..7eb5b1dc9 100644 --- a/vendor/github.com/json-iterator/go/reflect_array.go +++ b/vendor/github.com/json-iterator/go/reflect_array.go @@ -2,9 +2,10 @@ package jsoniter import ( "fmt" - "github.com/modern-go/reflect2" "io" "unsafe" + + "github.com/modern-go/reflect2" ) func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go index 8b6bc8b43..71a0fe273 100644 --- a/vendor/github.com/json-iterator/go/reflect_dynamic.go +++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -1,9 +1,10 @@ package jsoniter import ( - "github.com/modern-go/reflect2" "reflect" "unsafe" + + "github.com/modern-go/reflect2" ) type dynamicEncoder struct { diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go index 74a97bfe5..a820f10ca 100644 --- a/vendor/github.com/json-iterator/go/reflect_extension.go +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -2,12 +2,13 @@ package jsoniter import ( "fmt" - "github.com/modern-go/reflect2" "reflect" "sort" "strings" "unicode" "unsafe" + + "github.com/modern-go/reflect2" ) var typeDecoders = map[string]ValDecoder{} diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go index 98d45c1ec..52e11bf3f 100644 --- a/vendor/github.com/json-iterator/go/reflect_json_number.go +++ b/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -2,9 +2,10 @@ package jsoniter import ( "encoding/json" - "github.com/modern-go/reflect2" "strconv" "unsafe" + + "github.com/modern-go/reflect2" ) type Number string diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go index eba434f2f..521e38a89 100644 --- a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -2,8 +2,9 @@ package jsoniter import ( "encoding/json" - "github.com/modern-go/reflect2" "unsafe" + + "github.com/modern-go/reflect2" ) var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go index 582967130..696194bde 100644 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -2,11 +2,12 @@ package jsoniter import ( "fmt" - "github.com/modern-go/reflect2" "io" "reflect" "sort" "unsafe" + + "github.com/modern-go/reflect2" ) func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go index fa71f4748..112c110a2 100644 --- a/vendor/github.com/json-iterator/go/reflect_optional.go +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -1,8 +1,9 @@ package jsoniter import ( - "github.com/modern-go/reflect2" "unsafe" + + "github.com/modern-go/reflect2" ) func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go index 9441d79df..f363a7169 100644 --- a/vendor/github.com/json-iterator/go/reflect_slice.go +++ b/vendor/github.com/json-iterator/go/reflect_slice.go @@ -2,9 +2,10 @@ package jsoniter import ( "fmt" - "github.com/modern-go/reflect2" "io" "unsafe" + + "github.com/modern-go/reflect2" ) func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go index 152e3ef5a..edf77bf59 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -2,10 +2,11 @@ package jsoniter import ( "fmt" - "github.com/modern-go/reflect2" "io" "reflect" "unsafe" + + "github.com/modern-go/reflect2" ) func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go index 71c75a065..bb36351a5 100644 --- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -7,19 +7,19 @@ package flate // dictDecoder implements the LZ77 sliding dictionary as used in decompression. // LZ77 decompresses data through sequences of two forms of commands: // -// * Literal insertions: Runs of one or more symbols are inserted into the data -// stream as is. This is accomplished through the writeByte method for a -// single symbol, or combinations of writeSlice/writeMark for multiple symbols. -// Any valid stream must start with a literal insertion if no preset dictionary -// is used. +// - Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. // -// * Backward copies: Runs of one or more symbols are copied from previously -// emitted data. Backward copies come as the tuple (dist, length) where dist -// determines how far back in the stream to copy from and length determines how -// many bytes to copy. Note that it is valid for the length to be greater than -// the distance. Since LZ77 uses forward copies, that situation is used to -// perform a form of run-length encoding on repeated runs of symbols. -// The writeCopy and tryWriteCopy are used to implement this command. +// - Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. // // For performance reasons, this implementation performs little to no sanity // checks about the arguments. As such, the invariants documented for each diff --git a/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/vendor/github.com/klauspost/compress/flate/gen_inflate.go index b26d19ec2..25ba3c714 100644 --- a/vendor/github.com/klauspost/compress/flate/gen_inflate.go +++ b/vendor/github.com/klauspost/compress/flate/gen_inflate.go @@ -1,3 +1,4 @@ +//go:build generate // +build generate //go:generate go run $GOFILE && gofmt -w inflate_gen.go diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 208d66711..5f7a06f05 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -244,9 +244,9 @@ func (w *huffmanBitWriter) writeBytes(bytes []byte) { // Codes 0-15 are single byte codes. Codes 16-18 are followed by additional // information. Code badCode is an end marker // -// numLiterals The number of literals in literalEncoding -// numOffsets The number of offsets in offsetEncoding -// litenc, offenc The literal and offset encoder to use +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { for i := range w.codegenFreq { w.codegenFreq[i] = 0 @@ -440,9 +440,9 @@ func (w *huffmanBitWriter) writeOutBits() { // Write the header of a dynamic Huffman block to the output stream. // -// numLiterals The number of literals specified in codegen -// numOffsets The number of offsets specified in codegen -// numCodegens The number of codegens used in codegen +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { if w.err != nil { return diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index 4c39a3018..f55a2c8c0 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -128,13 +128,18 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int { // The cases of 0, 1, and 2 literals are handled by special case code. // // list An array of the literals with non-zero frequencies -// and their associated frequencies. The array is in order of increasing -// frequency, and has as its last element a special element with frequency -// MaxInt32 +// +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// // maxBits The maximum number of bits that should be used to encode any literal. -// Must be less than 16. +// +// Must be less than 16. +// // return An integer array in which array[i] indicates the number of literals -// that should be encoded in i bits. +// +// that should be encoded in i bits. func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { if maxBits >= maxBitsLimit { panic("flate: maxBits too large") diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go index f477a5d6e..1b7a2cbd7 100644 --- a/vendor/github.com/klauspost/compress/flate/regmask_other.go +++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go @@ -1,4 +1,5 @@ -//+build !amd64 +//go:build !amd64 +// +build !amd64 package flate diff --git a/vendor/github.com/leodido/go-urn/urn.go b/vendor/github.com/leodido/go-urn/urn.go index d51a6c915..641538684 100644 --- a/vendor/github.com/leodido/go-urn/urn.go +++ b/vendor/github.com/leodido/go-urn/urn.go @@ -83,4 +83,4 @@ func (u *URN) UnmarshalJSON(bytes []byte) error { *u = *value } return nil -} \ No newline at end of file +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go index 8e3c99171..367adab99 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_windows.go +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -42,7 +42,8 @@ func IsTerminal(fd uintptr) bool { // Check pipe name is used for cygwin/msys2 pty. // Cygwin/MSYS2 PTY has a name like: -// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +// +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master func isCygwinPipeName(name string) bool { token := strings.Split(name, "-") if len(token) < 5 { diff --git a/vendor/github.com/mgutz/ansi/doc.go b/vendor/github.com/mgutz/ansi/doc.go index c93039b85..ded7b2b17 100644 --- a/vendor/github.com/mgutz/ansi/doc.go +++ b/vendor/github.com/mgutz/ansi/doc.go @@ -3,8 +3,8 @@ Package ansi is a small, fast library to create ANSI colored strings and codes. Installation - # this installs the color viewer and the package - go get -u github.com/mgutz/ansi/cmd/ansi-mgutz + # this installs the color viewer and the package + go get -u github.com/mgutz/ansi/cmd/ansi-mgutz Example diff --git a/vendor/github.com/modern-go/concurrent/go_above_19.go b/vendor/github.com/modern-go/concurrent/go_above_19.go index aeabf8c4f..7db701945 100644 --- a/vendor/github.com/modern-go/concurrent/go_above_19.go +++ b/vendor/github.com/modern-go/concurrent/go_above_19.go @@ -1,4 +1,5 @@ -//+build go1.9 +//go:build go1.9 +// +build go1.9 package concurrent diff --git a/vendor/github.com/modern-go/concurrent/go_below_19.go b/vendor/github.com/modern-go/concurrent/go_below_19.go index b9c8df7f4..64544f5b3 100644 --- a/vendor/github.com/modern-go/concurrent/go_below_19.go +++ b/vendor/github.com/modern-go/concurrent/go_below_19.go @@ -1,4 +1,5 @@ -//+build !go1.9 +//go:build !go1.9 +// +build !go1.9 package concurrent diff --git a/vendor/github.com/modern-go/concurrent/log.go b/vendor/github.com/modern-go/concurrent/log.go index 9756fcc75..4899eed02 100644 --- a/vendor/github.com/modern-go/concurrent/log.go +++ b/vendor/github.com/modern-go/concurrent/log.go @@ -1,13 +1,13 @@ package concurrent import ( - "os" - "log" "io/ioutil" + "log" + "os" ) // ErrorLogger is used to print out error, can be set to writer other than stderr var ErrorLogger = log.New(os.Stderr, "", 0) // InfoLogger is used to print informational message, default to off -var InfoLogger = log.New(ioutil.Discard, "", 0) \ No newline at end of file +var InfoLogger = log.New(ioutil.Discard, "", 0) diff --git a/vendor/github.com/modern-go/concurrent/unbounded_executor.go b/vendor/github.com/modern-go/concurrent/unbounded_executor.go index 05a77dceb..5ea18eb7b 100644 --- a/vendor/github.com/modern-go/concurrent/unbounded_executor.go +++ b/vendor/github.com/modern-go/concurrent/unbounded_executor.go @@ -3,11 +3,11 @@ package concurrent import ( "context" "fmt" + "reflect" "runtime" "runtime/debug" "sync" "time" - "reflect" ) // HandlePanic logs goroutine panic by default diff --git a/vendor/github.com/modern-go/reflect2/go_above_118.go b/vendor/github.com/modern-go/reflect2/go_above_118.go index 2b4116f6c..33c825f6d 100644 --- a/vendor/github.com/modern-go/reflect2/go_above_118.go +++ b/vendor/github.com/modern-go/reflect2/go_above_118.go @@ -1,4 +1,5 @@ -//+build go1.18 +//go:build go1.18 +// +build go1.18 package reflect2 @@ -8,6 +9,7 @@ import ( // m escapes into the return value, but the caller of mapiterinit // doesn't let the return value escape. +// //go:noescape //go:linkname mapiterinit reflect.mapiterinit func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer, it *hiter) @@ -20,4 +22,4 @@ func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { pKeyRType: type2.pKeyRType, pElemRType: type2.pElemRType, } -} \ No newline at end of file +} diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go index 974f7685e..03ccb43c6 100644 --- a/vendor/github.com/modern-go/reflect2/go_above_19.go +++ b/vendor/github.com/modern-go/reflect2/go_above_19.go @@ -1,4 +1,5 @@ -//+build go1.9 +//go:build go1.9 +// +build go1.9 package reflect2 diff --git a/vendor/github.com/modern-go/reflect2/go_below_118.go b/vendor/github.com/modern-go/reflect2/go_below_118.go index 00003dbd7..092ec5b5d 100644 --- a/vendor/github.com/modern-go/reflect2/go_below_118.go +++ b/vendor/github.com/modern-go/reflect2/go_below_118.go @@ -1,4 +1,5 @@ -//+build !go1.18 +//go:build !go1.18 +// +build !go1.18 package reflect2 @@ -8,6 +9,7 @@ import ( // m escapes into the return value, but the caller of mapiterinit // doesn't let the return value escape. +// //go:noescape //go:linkname mapiterinit reflect.mapiterinit func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) (val *hiter) @@ -18,4 +20,4 @@ func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { pKeyRType: type2.pKeyRType, pElemRType: type2.pElemRType, } -} \ No newline at end of file +} diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go index c43c8b9d6..b0c281fca 100644 --- a/vendor/github.com/modern-go/reflect2/reflect2.go +++ b/vendor/github.com/modern-go/reflect2/reflect2.go @@ -282,6 +282,7 @@ func likePtrType(typ reflect.Type) bool { // output depends on the input. noescape is inlined and currently // compiles down to zero instructions. // USE CAREFULLY! +// //go:nosplit func NoEscape(p unsafe.Pointer) unsafe.Pointer { x := uintptr(p) diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go index 4b13c3155..54c8498ef 100644 --- a/vendor/github.com/modern-go/reflect2/type_map.go +++ b/vendor/github.com/modern-go/reflect2/type_map.go @@ -1,3 +1,4 @@ +//go:build !gccgo // +build !gccgo package reflect2 @@ -9,6 +10,7 @@ import ( ) // typelinks2 for 1.7 ~ +// //go:linkname typelinks2 reflect.typelinks func typelinks2() (sections []unsafe.Pointer, offset [][]int32) diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go index b49f614ef..61849bb42 100644 --- a/vendor/github.com/modern-go/reflect2/unsafe_link.go +++ b/vendor/github.com/modern-go/reflect2/unsafe_link.go @@ -13,6 +13,7 @@ func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer // typedslicecopy copies a slice of elemType values from src to dst, // returning the number of elements copied. +// //go:linkname typedslicecopy reflect.typedslicecopy //go:noescape func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go index 003e99fad..2a73737af 100644 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -199,12 +199,15 @@ func (m *SequenceMatcher) isBJunk(s string) bool { // If IsJunk is not defined: // // Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi +// +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// // and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' +// +// k >= k' +// i <= i' +// and if i == i', j <= j' // // In other words, of all maximal matching blocks, return one that // starts earliest in a, and of all those maximal matching blocks that diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go index da67aba06..51392be8f 100644 --- a/vendor/github.com/sirupsen/logrus/doc.go +++ b/vendor/github.com/sirupsen/logrus/doc.go @@ -1,25 +1,25 @@ /* Package logrus is a structured logger for Go, completely API compatible with the standard library logger. - The simplest way to use Logrus is simply the package-level exported logger: - package main + package main - import ( - log "github.com/sirupsen/logrus" - ) + import ( + log "github.com/sirupsen/logrus" + ) - func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "number": 1, - "size": 10, - }).Info("A walrus appears") - } + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } Output: - time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 For a full guide visit https://github.com/sirupsen/logrus */ diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go index 408883773..8c7615515 100644 --- a/vendor/github.com/sirupsen/logrus/formatter.go +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -30,12 +30,12 @@ type Formatter interface { // This is to not silently overwrite `time`, `msg`, `func` and `level` fields when // dumping it. If this code wasn't there doing: // -// logrus.WithField("level", 1).Info("hello") +// logrus.WithField("level", 1).Info("hello") // // Would just silently drop the user provided level. Instead with this code // it'll logged as: // -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} // // It's not exported because it's still using Data in an opinionated way. It's to // avoid code duplication between the two default formatters. diff --git a/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go index 02b8df380..0f7d15715 100644 --- a/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go +++ b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go @@ -1,3 +1,4 @@ +//go:build !windows && !nacl && !plan9 // +build !windows,!nacl,!plan9 package syslog diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index 337704457..893c6c152 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -73,12 +73,12 @@ func (mw *MutexWrap) Disable() { // `Out` and `Hooks` directly on the default logger instance. You can also just // instantiate your own: // -// var log = &logrus.Logger{ -// Out: os.Stderr, -// Formatter: new(logrus.TextFormatter), -// Hooks: make(logrus.LevelHooks), -// Level: logrus.DebugLevel, -// } +// var log = &logrus.Logger{ +// Out: os.Stderr, +// Formatter: new(logrus.TextFormatter), +// Hooks: make(logrus.LevelHooks), +// Level: logrus.DebugLevel, +// } // // It's recommended to make this a global instance called `log`. func New() *Logger { @@ -341,9 +341,9 @@ func (logger *Logger) Exit(code int) { logger.ExitFunc(code) } -//When file is opened with appending mode, it's safe to -//write concurrently to a file (within 4k message on Linux). -//In these cases user can choose to disable the lock. +// When file is opened with appending mode, it's safe to +// write concurrently to a file (within 4k message on Linux). +// In these cases user can choose to disable the lock. func (logger *Logger) SetNoLock() { logger.mu.Disable() } diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go index 2403de981..45de3e2b6 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go index 499789984..e3fa38b71 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -1,3 +1,4 @@ +//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && !js // +build darwin dragonfly freebsd netbsd openbsd // +build !js diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go index ebdae3ec6..9e951f1b4 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_js.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -1,3 +1,4 @@ +//go:build js // +build js package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go index 97af92c68..04232da19 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go @@ -1,3 +1,4 @@ +//go:build js || nacl || plan9 // +build js nacl plan9 package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go index 3293fb3ca..1b4a99e32 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -1,3 +1,4 @@ +//go:build !appengine && !js && !windows && !nacl && !plan9 // +build !appengine,!js,!windows,!nacl,!plan9 package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go index 04748b851..f3154b17f 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -1,3 +1,4 @@ +//go:build (linux || aix || zos) && !js // +build linux aix zos // +build !js diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go index 2879eb50e..893c62410 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -1,3 +1,4 @@ +//go:build !appengine && !js && windows // +build !appengine,!js,windows package logrus diff --git a/vendor/github.com/skycoin/noise/patterns.go b/vendor/github.com/skycoin/noise/patterns.go index 094cf38bf..7d29d5ba7 100644 --- a/vendor/github.com/skycoin/noise/patterns.go +++ b/vendor/github.com/skycoin/noise/patterns.go @@ -99,7 +99,7 @@ var HandshakeXX = HandshakePattern{ } var HandshakeXXfallback = HandshakePattern{ - Name: "XXfallback", + Name: "XXfallback", ResponderPreMessages: []MessagePattern{MessagePatternE}, Messages: [][]MessagePattern{ {MessagePatternE, MessagePatternDHEE, MessagePatternS, MessagePatternDHSE}, diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/base58/base58_old.go b/vendor/github.com/skycoin/skycoin/src/cipher/base58/base58_old.go index ffa4bca5c..cd051e0ff 100644 --- a/vendor/github.com/skycoin/skycoin/src/cipher/base58/base58_old.go +++ b/vendor/github.com/skycoin/skycoin/src/cipher/base58/base58_old.go @@ -11,7 +11,7 @@ import ( "math/big" ) -//alphabet used by Bitcoins +// alphabet used by Bitcoins var alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" var ( @@ -26,7 +26,7 @@ var ( // oldBase58 type to hold the oldBase58 string type oldBase58 string -//reverse alphabet used for quckly converting base58 strings into numbers +// reverse alphabet used for quckly converting base58 strings into numbers var revalp = map[string]int{ "1": 0, "2": 1, "3": 2, "4": 3, "5": 4, "6": 5, "7": 6, "8": 7, "9": 8, "A": 9, "B": 10, "C": 11, "D": 12, "E": 13, "F": 14, "G": 15, "H": 16, "J": 17, "K": 18, "L": 19, @@ -76,7 +76,7 @@ func (b oldBase58) ToInt() (int, error) { return answer, nil } -//ToHex converts base58 to hex bytes +// ToHex converts base58 to hex bytes func (b oldBase58) ToHex() ([]byte, error) { value, err := b.ToBig() //convert to big.Int if err != nil { diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/crypto.go b/vendor/github.com/skycoin/skycoin/src/cipher/crypto.go index 53bce1fe7..60c24fa9f 100644 --- a/vendor/github.com/skycoin/skycoin/src/cipher/crypto.go +++ b/vendor/github.com/skycoin/skycoin/src/cipher/crypto.go @@ -270,7 +270,7 @@ func (sk SecKey) Null() bool { return sk == SecKey{} } -//ECDH generates a shared secret +// ECDH generates a shared secret // A: pub1,sec1 // B: pub2,sec2 // person A sends their public key pub1 diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/secp256k1.go b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/secp256k1.go index 1cbb4ae33..db8f044ee 100644 --- a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/secp256k1.go +++ b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/secp256k1.go @@ -22,8 +22,8 @@ func initConstants() { 0xBA, 0xAE, 0xDC, 0xE6, 0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E, 0x8C, 0xD0, 0x36, 0x41, 0x41}) TheCurve.halfOrder.SetBytes([]byte{ - 0X7F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, - 0X5D, 0X57, 0X6E, 0X73, 0X57, 0XA4, 0X50, 0X1D, 0XDF, 0XE9, 0X2F, 0X46, 0X68, 0X1B, 0X20, 0XA0}) + 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x5D, 0x57, 0x6E, 0x73, 0x57, 0xA4, 0x50, 0x1D, 0xDF, 0xE9, 0x2F, 0x46, 0x68, 0x1B, 0x20, 0xA0}) TheCurve.p.SetBytes([]byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/xy.go b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/xy.go index a6f9d6c89..afdd64efa 100644 --- a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/xy.go +++ b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/xy.go @@ -86,7 +86,7 @@ func (xy XY) Bytes() []byte { } // BytesUncompressed returns serialized key in uncompressed format "<04> <X> <Y>" -//65 bytes +// 65 bytes func (xy *XY) BytesUncompressed() (raw []byte) { xy.X.Normalize() // See https://github.com/piotrnar/gocoin/issues/15 xy.Y.Normalize() // See https://github.com/piotrnar/gocoin/issues/15 diff --git a/vendor/github.com/skycoin/skywire-utilities/pkg/httputil/health.go b/vendor/github.com/skycoin/skywire-utilities/pkg/httputil/health.go index 6963bf2da..015071d7e 100644 --- a/vendor/github.com/skycoin/skywire-utilities/pkg/httputil/health.go +++ b/vendor/github.com/skycoin/skywire-utilities/pkg/httputil/health.go @@ -13,13 +13,14 @@ var path = "/health" // HealthCheckResponse is struct of /health endpoint type HealthCheckResponse struct { - BuildInfo *buildinfo.Info `json:"build_info,omitempty"` - StartedAt time.Time `json:"started_at"` - DmsgAddr string `json:"dmsg_address,omitempty"` + BuildInfo *buildinfo.Info `json:"build_info,omitempty"` + StartedAt time.Time `json:"started_at"` + DmsgAddr string `json:"dmsg_address,omitempty"` + DmsgServers []string `json:"dmsg_servers,omitempty"` } // GetServiceHealth gets the response from the given service url -func GetServiceHealth(ctx context.Context, url string) (health *HealthCheckResponse, err error) { +func GetServiceHealth(_ context.Context, url string) (health *HealthCheckResponse, err error) { resp, err := http.Get(url + path) if err != nil { return nil, err diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 24a5036e9..e368aaf1c 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -27,23 +27,32 @@ unaffected. Define flags using flag.String(), Bool(), Int(), etc. This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int func init() { flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") } + Or you can create custom flags that satisfy the Value interface (with pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") + For such flags, the default value is just the initial value of the variable. After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. Flags may then be used directly. If you're using the flags themselves, they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) @@ -54,22 +63,26 @@ The arguments are indexed from 0 through flag.NArg()-1. The pflag package also defines some new functions that are not in flag, that give one-letter shorthands for flags. You can use these by appending 'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } flag.VarP(&flagval, "varname", "v", "help message") + Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. Command line flag syntax: + --flag // boolean flags only --flag=x Unlike the flag package, a single dash before an option means something different than a double dash. Single dashes signify a series of shorthand letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags -f -abc @@ -927,9 +940,9 @@ func (f *FlagSet) usage() { } } -//--unknown (args will be empty) -//--unknown --next-flag ... (args will be --next-flag ...) -//--unknown arg ... (args will be arg ...) +// --unknown (args will be empty) +// --unknown --next-flag ... (args will be --next-flag ...) +// --unknown arg ... (args will be arg ...) func stripUnknownFlagValue(args []string) []string { if len(args) == 0 { //--unknown diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go index 3cb2e69db..d421887e8 100644 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -98,9 +98,12 @@ func (f *FlagSet) GetStringSlice(name string) ([]string, error) { // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" --ss="v3" +// +// --ss="v1,v2" --ss="v3" +// // will result in -// []string{"v1", "v2", "v3"} +// +// []string{"v1", "v2", "v3"} func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { f.VarP(newStringSliceValue(value, p), name, "", usage) } @@ -114,9 +117,12 @@ func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []s // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" --ss="v3" +// +// --ss="v1,v2" --ss="v3" +// // will result in -// []string{"v1", "v2", "v3"} +// +// []string{"v1", "v2", "v3"} func StringSliceVar(p *[]string, name string, value []string, usage string) { CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) } @@ -130,9 +136,12 @@ func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" --ss="v3" +// +// --ss="v1,v2" --ss="v3" +// // will result in -// []string{"v1", "v2", "v3"} +// +// []string{"v1", "v2", "v3"} func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { p := []string{} f.StringSliceVarP(&p, name, "", value, usage) @@ -150,9 +159,12 @@ func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage str // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" --ss="v3" +// +// --ss="v1,v2" --ss="v3" +// // will result in -// []string{"v1", "v2", "v3"} +// +// []string{"v1", "v2", "v3"} func StringSlice(name string, value []string, usage string) *[]string { return CommandLine.StringSliceP(name, "", value, usage) } diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 63f852147..8b365bef0 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -6,10 +6,11 @@ package require import ( - assert "github.com/stretchr/testify/assert" http "net/http" url "net/url" time "time" + + assert "github.com/stretchr/testify/assert" ) // Condition uses a Comparison to assert a complex condition. diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 3b5b09330..8009f7500 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -6,10 +6,11 @@ package require import ( - assert "github.com/stretchr/testify/assert" http "net/http" url "net/url" time "time" + + assert "github.com/stretchr/testify/assert" ) // Condition uses a Comparison to assert a complex condition. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arch.go b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arch.go index b8ddbc99d..7adbe23b8 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arch.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arch.go @@ -6,6 +6,9 @@ package arch import ( + "fmt" + "strings" + "github.com/twitchyliquid64/golang-asm/obj" "github.com/twitchyliquid64/golang-asm/obj/arm" "github.com/twitchyliquid64/golang-asm/obj/arm64" @@ -15,8 +18,6 @@ import ( "github.com/twitchyliquid64/golang-asm/obj/s390x" "github.com/twitchyliquid64/golang-asm/obj/wasm" "github.com/twitchyliquid64/golang-asm/obj/x86" - "fmt" - "strings" ) // Pseudo-registers whose names are the constant name without the leading R. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arm64.go b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arm64.go index b0606beb3..6c334e18f 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arm64.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arm64.go @@ -9,9 +9,10 @@ package arch import ( + "errors" + "github.com/twitchyliquid64/golang-asm/obj" "github.com/twitchyliquid64/golang-asm/obj/arm64" - "errors" ) var arm64LS = map[string]uint8{ diff --git a/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_mmap.go b/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_mmap.go index 4b43d74f2..89ae39f73 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_mmap.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_mmap.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd // +build darwin dragonfly freebsd linux netbsd openbsd package bio @@ -17,12 +18,12 @@ import ( // because some operating systems place a limit on the number of // distinct mapped regions per process. As of this writing: // -// Darwin unlimited -// DragonFly 1000000 (vm.max_proc_mmap) -// FreeBSD unlimited -// Linux 65530 (vm.max_map_count) // TODO: query /proc/sys/vm/max_map_count? -// NetBSD unlimited -// OpenBSD unlimited +// Darwin unlimited +// DragonFly 1000000 (vm.max_proc_mmap) +// FreeBSD unlimited +// Linux 65530 (vm.max_map_count) // TODO: query /proc/sys/vm/max_map_count? +// NetBSD unlimited +// OpenBSD unlimited var mmapLimit int32 = 1<<31 - 1 func init() { diff --git a/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_nommap.go b/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_nommap.go index f43c67ac2..533a93180 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_nommap.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_nommap.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd // +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd package bio diff --git a/vendor/github.com/twitchyliquid64/golang-asm/dwarf/dwarf.go b/vendor/github.com/twitchyliquid64/golang-asm/dwarf/dwarf.go index 2fee79d38..cf7bcb97f 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/dwarf/dwarf.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/dwarf/dwarf.go @@ -9,13 +9,14 @@ package dwarf import ( "bytes" - "github.com/twitchyliquid64/golang-asm/objabi" "errors" "fmt" "os/exec" "sort" "strconv" "strings" + + "github.com/twitchyliquid64/golang-asm/objabi" ) // InfoPrefix is the prefix for all the symbols containing DWARF info entries. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/goobj/funcinfo.go b/vendor/github.com/twitchyliquid64/golang-asm/goobj/funcinfo.go index 9e192330d..e9812671c 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/goobj/funcinfo.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/goobj/funcinfo.go @@ -6,8 +6,9 @@ package goobj import ( "bytes" - "github.com/twitchyliquid64/golang-asm/objabi" "encoding/binary" + + "github.com/twitchyliquid64/golang-asm/objabi" ) // CUFileIndex is used to index the filenames that are stored in the diff --git a/vendor/github.com/twitchyliquid64/golang-asm/goobj/objfile.go b/vendor/github.com/twitchyliquid64/golang-asm/goobj/objfile.go index 3303549aa..a9bd52733 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/goobj/objfile.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/goobj/objfile.go @@ -20,14 +20,15 @@ package goobj import ( "bytes" - "github.com/twitchyliquid64/golang-asm/bio" "crypto/sha1" "encoding/binary" "errors" "fmt" - "github.com/twitchyliquid64/golang-asm/unsafeheader" "io" "unsafe" + + "github.com/twitchyliquid64/golang-asm/bio" + "github.com/twitchyliquid64/golang-asm/unsafeheader" ) // New object file format. @@ -268,15 +269,16 @@ func (p *ImportedPkg) Write(w *Writer) { // Symbol definition. // // Serialized format: -// Sym struct { -// Name string -// ABI uint16 -// Type uint8 -// Flag uint8 -// Flag2 uint8 -// Siz uint32 -// Align uint32 -// } +// +// Sym struct { +// Name string +// ABI uint16 +// Type uint8 +// Flag uint8 +// Flag2 uint8 +// Siz uint32 +// Align uint32 +// } type Sym [SymSize]byte const SymSize = stringRefSize + 2 + 1 + 1 + 1 + 4 + 4 @@ -372,13 +374,14 @@ const HashSize = sha1.Size // Relocation. // // Serialized format: -// Reloc struct { -// Off int32 -// Siz uint8 -// Type uint8 -// Add int64 -// Sym SymRef -// } +// +// Reloc struct { +// Off int32 +// Siz uint8 +// Type uint8 +// Add int64 +// Sym SymRef +// } type Reloc [RelocSize]byte const RelocSize = 4 + 1 + 1 + 8 + 8 @@ -416,10 +419,11 @@ func (r *Reloc) fromBytes(b []byte) { copy(r[:], b) } // Aux symbol info. // // Serialized format: -// Aux struct { -// Type uint8 -// Sym SymRef -// } +// +// Aux struct { +// Type uint8 +// Sym SymRef +// } type Aux [AuxSize]byte const AuxSize = 1 + 8 @@ -456,11 +460,12 @@ func (a *Aux) fromBytes(b []byte) { copy(a[:], b) } // Referenced symbol flags. // // Serialized format: -// RefFlags struct { -// Sym symRef -// Flag uint8 -// Flag2 uint8 -// } +// +// RefFlags struct { +// Sym symRef +// Flag uint8 +// Flag2 uint8 +// } type RefFlags [RefFlagsSize]byte const RefFlagsSize = 8 + 1 + 1 @@ -483,10 +488,11 @@ func (r *RefFlags) Write(w *Writer) { w.Bytes(r[:]) } // Referenced symbol name. // // Serialized format: -// RefName struct { -// Sym symRef -// Name string -// } +// +// RefName struct { +// Sym symRef +// Name string +// } type RefName [RefNameSize]byte const RefNameSize = 8 + stringRefSize diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/asm5.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/asm5.go index 924657fb3..49f15fab9 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/asm5.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/asm5.go @@ -31,12 +31,13 @@ package arm import ( - "github.com/twitchyliquid64/golang-asm/obj" - "github.com/twitchyliquid64/golang-asm/objabi" "fmt" "log" "math" "sort" + + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/objabi" ) // ctxt5 holds state while assembling a single function. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/list5.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/list5.go index 30e03001a..7ee07ac90 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/list5.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/list5.go @@ -31,8 +31,9 @@ package arm import ( - "github.com/twitchyliquid64/golang-asm/obj" "fmt" + + "github.com/twitchyliquid64/golang-asm/obj" ) func init() { diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/asm7.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/asm7.go index 2bbb64b9d..0afe223eb 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/asm7.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/asm7.go @@ -31,12 +31,13 @@ package arm64 import ( - "github.com/twitchyliquid64/golang-asm/obj" - "github.com/twitchyliquid64/golang-asm/objabi" "fmt" "log" "math" "sort" + + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/objabi" ) // ctxt7 holds state while assembling a single function. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/doc.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/doc.go index 751521754..4823d9b44 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/doc.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/doc.go @@ -6,24 +6,24 @@ Package arm64 implements an ARM64 assembler. Go assembly syntax is different from GNU ARM64 syntax, but we can still follow the general rules to map between them. -Instructions mnemonics mapping rules +# Instructions mnemonics mapping rules 1. Most instructions use width suffixes of instruction names to indicate operand width rather than using different register names. - Examples: - ADC R24, R14, R12 <=> adc x12, x24 - ADDW R26->24, R21, R15 <=> add w15, w21, w26, asr #24 - FCMPS F2, F3 <=> fcmp s3, s2 - FCMPD F2, F3 <=> fcmp d3, d2 - FCVTDH F2, F3 <=> fcvt h3, d2 + Examples: + ADC R24, R14, R12 <=> adc x12, x24 + ADDW R26->24, R21, R15 <=> add w15, w21, w26, asr #24 + FCMPS F2, F3 <=> fcmp s3, s2 + FCMPD F2, F3 <=> fcmp d3, d2 + FCVTDH F2, F3 <=> fcvt h3, d2 2. Go uses .P and .W suffixes to indicate post-increment and pre-increment. - Examples: - MOVD.P -8(R10), R8 <=> ldr x8, [x10],#-8 - MOVB.W 16(R16), R10 <=> ldrsb x10, [x16,#16]! - MOVBU.W 16(R16), R10 <=> ldrb x10, [x16,#16]! + Examples: + MOVD.P -8(R10), R8 <=> ldr x8, [x10],#-8 + MOVB.W 16(R16), R10 <=> ldrsb x10, [x16,#16]! + MOVBU.W 16(R16), R10 <=> ldrb x10, [x16,#16]! 3. Go uses a series of MOV instructions as load and store. @@ -39,12 +39,12 @@ ldrsh, sturh, strh => MOVH. 5. Go adds a V prefix for most floating-point and SIMD instructions, except cryptographic extension instructions and floating-point(scalar) instructions. - Examples: - VADD V5.H8, V18.H8, V9.H8 <=> add v9.8h, v18.8h, v5.8h - VLD1.P (R6)(R11), [V31.D1] <=> ld1 {v31.1d}, [x6], x11 - VFMLA V29.S2, V20.S2, V14.S2 <=> fmla v14.2s, v20.2s, v29.2s - AESD V22.B16, V19.B16 <=> aesd v19.16b, v22.16b - SCVTFWS R3, F16 <=> scvtf s17, w6 + Examples: + VADD V5.H8, V18.H8, V9.H8 <=> add v9.8h, v18.8h, v5.8h + VLD1.P (R6)(R11), [V31.D1] <=> ld1 {v31.1d}, [x6], x11 + VFMLA V29.S2, V20.S2, V14.S2 <=> fmla v14.2s, v20.2s, v29.2s + AESD V22.B16, V19.B16 <=> aesd v19.16b, v22.16b + SCVTFWS R3, F16 <=> scvtf s17, w6 6. Align directive @@ -52,25 +52,26 @@ Go asm supports the PCALIGN directive, which indicates that the next instruction to a specified boundary by padding with NOOP instruction. The alignment value supported on arm64 must be a power of 2 and in the range of [8, 2048]. - Examples: - PCALIGN $16 - MOVD $2, R0 // This instruction is aligned with 16 bytes. - PCALIGN $1024 - MOVD $3, R1 // This instruction is aligned with 1024 bytes. + Examples: + PCALIGN $16 + MOVD $2, R0 // This instruction is aligned with 16 bytes. + PCALIGN $1024 + MOVD $3, R1 // This instruction is aligned with 1024 bytes. PCALIGN also changes the function alignment. If a function has one or more PCALIGN directives, its address will be aligned to the same or coarser boundary, which is the maximum of all the alignment values. In the following example, the function Add is aligned with 128 bytes. - Examples: - TEXT ·Add(SB),$40-16 - MOVD $2, R0 - PCALIGN $32 - MOVD $4, R1 - PCALIGN $128 - MOVD $8, R2 - RET + + Examples: + TEXT ·Add(SB),$40-16 + MOVD $2, R0 + PCALIGN $32 + MOVD $4, R1 + PCALIGN $128 + MOVD $8, R2 + RET On arm64, functions in Go are aligned to 16 bytes by default, we can also use PCALGIN to set the function alignment. The functions that need to be aligned are preferably using NOFRAME and NOSPLIT @@ -79,12 +80,12 @@ have the same alignment as the first hand-written instruction. In the following example, PCALIGN at the entry of the function Add will align its address to 2048 bytes. - Examples: - TEXT ·Add(SB),NOSPLIT|NOFRAME,$0 - PCALIGN $2048 - MOVD $1, R0 - MOVD $1, R1 - RET + Examples: + TEXT ·Add(SB),NOSPLIT|NOFRAME,$0 + PCALIGN $2048 + MOVD $1, R0 + MOVD $1, R1 + RET Special Cases. @@ -98,16 +99,15 @@ Special Cases. related to real ARM64 instruction. NOOP serves for the hardware nop instruction. NOOP is an alias of HINT $0. - Examples: - VMOV V13.B[1], R20 <=> mov x20, v13.b[1] - VMOV V13.H[1], R20 <=> mov w20, v13.h[1] - JMP (R3) <=> br x3 - CALL (R17) <=> blr x17 - LDAXRB (R19), R16 <=> ldaxrb w16, [x19] - NOOP <=> nop - + Examples: + VMOV V13.B[1], R20 <=> mov x20, v13.b[1] + VMOV V13.H[1], R20 <=> mov w20, v13.h[1] + JMP (R3) <=> br x3 + CALL (R17) <=> blr x17 + LDAXRB (R19), R16 <=> ldaxrb w16, [x19] + NOOP <=> nop -Register mapping rules +# Register mapping rules 1. All basic register names are written as Rn. @@ -116,87 +116,85 @@ Register mapping rules 3. Bn, Hn, Dn, Sn and Qn instructions are written as Fn in floating-point instructions and as Vn in SIMD instructions. - -Argument mapping rules +# Argument mapping rules 1. The operands appear in left-to-right assignment order. Go reverses the arguments of most instructions. - Examples: - ADD R11.SXTB<<1, RSP, R25 <=> add x25, sp, w11, sxtb #1 - VADD V16, V19, V14 <=> add d14, d19, d16 + Examples: + ADD R11.SXTB<<1, RSP, R25 <=> add x25, sp, w11, sxtb #1 + VADD V16, V19, V14 <=> add d14, d19, d16 Special Cases. (1) Argument order is the same as in the GNU ARM64 syntax: cbz, cbnz and some store instructions, such as str, stur, strb, sturb, strh, sturh stlr, stlrb. stlrh, st1. - Examples: - MOVD R29, 384(R19) <=> str x29, [x19,#384] - MOVB.P R30, 30(R4) <=> strb w30, [x4],#30 - STLRH R21, (R19) <=> stlrh w21, [x19] + Examples: + MOVD R29, 384(R19) <=> str x29, [x19,#384] + MOVB.P R30, 30(R4) <=> strb w30, [x4],#30 + STLRH R21, (R19) <=> stlrh w21, [x19] (2) MADD, MADDW, MSUB, MSUBW, SMADDL, SMSUBL, UMADDL, UMSUBL <Rm>, <Ra>, <Rn>, <Rd> - Examples: - MADD R2, R30, R22, R6 <=> madd x6, x22, x2, x30 - SMSUBL R10, R3, R17, R27 <=> smsubl x27, w17, w10, x3 + Examples: + MADD R2, R30, R22, R6 <=> madd x6, x22, x2, x30 + SMSUBL R10, R3, R17, R27 <=> smsubl x27, w17, w10, x3 (3) FMADDD, FMADDS, FMSUBD, FMSUBS, FNMADDD, FNMADDS, FNMSUBD, FNMSUBS <Fm>, <Fa>, <Fn>, <Fd> - Examples: - FMADDD F30, F20, F3, F29 <=> fmadd d29, d3, d30, d20 - FNMSUBS F7, F25, F7, F22 <=> fnmsub s22, s7, s7, s25 + Examples: + FMADDD F30, F20, F3, F29 <=> fmadd d29, d3, d30, d20 + FNMSUBS F7, F25, F7, F22 <=> fnmsub s22, s7, s7, s25 (4) BFI, BFXIL, SBFIZ, SBFX, UBFIZ, UBFX $<lsb>, <Rn>, $<width>, <Rd> - Examples: - BFIW $16, R20, $6, R0 <=> bfi w0, w20, #16, #6 - UBFIZ $34, R26, $5, R20 <=> ubfiz x20, x26, #34, #5 + Examples: + BFIW $16, R20, $6, R0 <=> bfi w0, w20, #16, #6 + UBFIZ $34, R26, $5, R20 <=> ubfiz x20, x26, #34, #5 (5) FCCMPD, FCCMPS, FCCMPED, FCCMPES <cond>, Fm. Fn, $<nzcv> - Examples: - FCCMPD AL, F8, F26, $0 <=> fccmp d26, d8, #0x0, al - FCCMPS VS, F29, F4, $4 <=> fccmp s4, s29, #0x4, vs - FCCMPED LE, F20, F5, $13 <=> fccmpe d5, d20, #0xd, le - FCCMPES NE, F26, F10, $0 <=> fccmpe s10, s26, #0x0, ne + Examples: + FCCMPD AL, F8, F26, $0 <=> fccmp d26, d8, #0x0, al + FCCMPS VS, F29, F4, $4 <=> fccmp s4, s29, #0x4, vs + FCCMPED LE, F20, F5, $13 <=> fccmpe d5, d20, #0xd, le + FCCMPES NE, F26, F10, $0 <=> fccmpe s10, s26, #0x0, ne (6) CCMN, CCMNW, CCMP, CCMPW <cond>, <Rn>, $<imm>, $<nzcv> - Examples: - CCMP MI, R22, $12, $13 <=> ccmp x22, #0xc, #0xd, mi - CCMNW AL, R1, $11, $8 <=> ccmn w1, #0xb, #0x8, al + Examples: + CCMP MI, R22, $12, $13 <=> ccmp x22, #0xc, #0xd, mi + CCMNW AL, R1, $11, $8 <=> ccmn w1, #0xb, #0x8, al (7) CCMN, CCMNW, CCMP, CCMPW <cond>, <Rn>, <Rm>, $<nzcv> - Examples: - CCMN VS, R13, R22, $10 <=> ccmn x13, x22, #0xa, vs - CCMPW HS, R19, R14, $11 <=> ccmp w19, w14, #0xb, cs + Examples: + CCMN VS, R13, R22, $10 <=> ccmn x13, x22, #0xa, vs + CCMPW HS, R19, R14, $11 <=> ccmp w19, w14, #0xb, cs (9) CSEL, CSELW, CSNEG, CSNEGW, CSINC, CSINCW <cond>, <Rn>, <Rm>, <Rd> ; FCSELD, FCSELS <cond>, <Fn>, <Fm>, <Fd> - Examples: - CSEL GT, R0, R19, R1 <=> csel x1, x0, x19, gt - CSNEGW GT, R7, R17, R8 <=> csneg w8, w7, w17, gt - FCSELD EQ, F15, F18, F16 <=> fcsel d16, d15, d18, eq + Examples: + CSEL GT, R0, R19, R1 <=> csel x1, x0, x19, gt + CSNEGW GT, R7, R17, R8 <=> csneg w8, w7, w17, gt + FCSELD EQ, F15, F18, F16 <=> fcsel d16, d15, d18, eq (10) TBNZ, TBZ $<imm>, <Rt>, <label> - (11) STLXR, STLXRW, STXR, STXRW, STLXRB, STLXRH, STXRB, STXRH <Rf>, (<Rn|RSP>), <Rs> - Examples: - STLXR ZR, (R15), R16 <=> stlxr w16, xzr, [x15] - STXRB R9, (R21), R19 <=> stxrb w19, w9, [x21] + Examples: + STLXR ZR, (R15), R16 <=> stlxr w16, xzr, [x15] + STXRB R9, (R21), R19 <=> stxrb w19, w9, [x21] (12) STLXP, STLXPW, STXP, STXPW (<Rf1>, <Rf2>), (<Rn|RSP>), <Rs> - Examples: - STLXP (R17, R19), (R4), R5 <=> stlxp w5, x17, x19, [x4] - STXPW (R30, R25), (R22), R13 <=> stxp w13, w30, w25, [x22] + Examples: + STLXP (R17, R19), (R4), R5 <=> stlxp w5, x17, x19, [x4] + STXPW (R30, R25), (R22), R13 <=> stxp w13, w30, w25, [x22] 2. Expressions for special arguments. @@ -204,46 +202,46 @@ FCSELD, FCSELS <cond>, <Fn>, <Fm>, <Fd> Optionally-shifted immediate. - Examples: - ADD $(3151<<12), R14, R20 <=> add x20, x14, #0xc4f, lsl #12 - ADDW $1864, R25, R6 <=> add w6, w25, #0x748 + Examples: + ADD $(3151<<12), R14, R20 <=> add x20, x14, #0xc4f, lsl #12 + ADDW $1864, R25, R6 <=> add w6, w25, #0x748 Optionally-shifted registers are written as <Rm>{<shift><amount>}. The <shift> can be <<(lsl), >>(lsr), ->(asr), @>(ror). - Examples: - ADD R19>>30, R10, R24 <=> add x24, x10, x19, lsr #30 - ADDW R26->24, R21, R15 <=> add w15, w21, w26, asr #24 + Examples: + ADD R19>>30, R10, R24 <=> add x24, x10, x19, lsr #30 + ADDW R26->24, R21, R15 <=> add w15, w21, w26, asr #24 Extended registers are written as <Rm>{.<extend>{<<<amount>}}. <extend> can be UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW or SXTX. - Examples: - ADDS R19.UXTB<<4, R9, R26 <=> adds x26, x9, w19, uxtb #4 - ADDSW R14.SXTX, R14, R6 <=> adds w6, w14, w14, sxtx + Examples: + ADDS R19.UXTB<<4, R9, R26 <=> adds x26, x9, w19, uxtb #4 + ADDSW R14.SXTX, R14, R6 <=> adds w6, w14, w14, sxtx Memory references: [<Xn|SP>{,#0}] is written as (Rn|RSP), a base register and an immediate offset is written as imm(Rn|RSP), a base register and an offset register is written as (Rn|RSP)(Rm). - Examples: - LDAR (R22), R9 <=> ldar x9, [x22] - LDP 28(R17), (R15, R23) <=> ldp x15, x23, [x17,#28] - MOVWU (R4)(R12<<2), R8 <=> ldr w8, [x4, x12, lsl #2] - MOVD (R7)(R11.UXTW<<3), R25 <=> ldr x25, [x7,w11,uxtw #3] - MOVBU (R27)(R23), R14 <=> ldrb w14, [x27,x23] + Examples: + LDAR (R22), R9 <=> ldar x9, [x22] + LDP 28(R17), (R15, R23) <=> ldp x15, x23, [x17,#28] + MOVWU (R4)(R12<<2), R8 <=> ldr w8, [x4, x12, lsl #2] + MOVD (R7)(R11.UXTW<<3), R25 <=> ldr x25, [x7,w11,uxtw #3] + MOVBU (R27)(R23), R14 <=> ldrb w14, [x27,x23] Register pairs are written as (Rt1, Rt2). - Examples: - LDP.P -240(R11), (R12, R26) <=> ldp x12, x26, [x11],#-240 + Examples: + LDP.P -240(R11), (R12, R26) <=> ldp x12, x26, [x11],#-240 Register with arrangement and register with arrangement and index. - Examples: - VADD V5.H8, V18.H8, V9.H8 <=> add v9.8h, v18.8h, v5.8h - VLD1 (R2), [V21.B16] <=> ld1 {v21.16b}, [x2] - VST1.P V9.S[1], (R16)(R21) <=> st1 {v9.s}[1], [x16], x28 - VST1.P [V13.H8, V14.H8, V15.H8], (R3)(R14) <=> st1 {v13.8h-v15.8h}, [x3], x14 - VST1.P [V14.D1, V15.D1], (R7)(R23) <=> st1 {v14.1d, v15.1d}, [x7], x23 + Examples: + VADD V5.H8, V18.H8, V9.H8 <=> add v9.8h, v18.8h, v5.8h + VLD1 (R2), [V21.B16] <=> ld1 {v21.16b}, [x2] + VST1.P V9.S[1], (R16)(R21) <=> st1 {v9.s}[1], [x16], x28 + VST1.P [V13.H8, V14.H8, V15.H8], (R3)(R14) <=> st1 {v13.8h-v15.8h}, [x3], x14 + VST1.P [V14.D1, V15.D1], (R7)(R23) <=> st1 {v14.1d, v15.1d}, [x7], x23 */ package arm64 diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/list7.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/list7.go index 1906c5fb9..0531fd370 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/list7.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/list7.go @@ -31,8 +31,9 @@ package arm64 import ( - "github.com/twitchyliquid64/golang-asm/obj" "fmt" + + "github.com/twitchyliquid64/golang-asm/obj" ) var strcond = [16]string{ diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/obj7.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/obj7.go index d2d935bd4..c2ffd9868 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/obj7.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/obj7.go @@ -31,11 +31,12 @@ package arm64 import ( + "math" + "github.com/twitchyliquid64/golang-asm/obj" "github.com/twitchyliquid64/golang-asm/objabi" "github.com/twitchyliquid64/golang-asm/src" "github.com/twitchyliquid64/golang-asm/sys" - "math" ) var complements = []obj.As{ diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/data.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/data.go index a63213b19..ad31c7be3 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/data.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/data.go @@ -32,9 +32,10 @@ package obj import ( - "github.com/twitchyliquid64/golang-asm/objabi" "log" "math" + + "github.com/twitchyliquid64/golang-asm/objabi" ) // Grow increases the length of s.P to lsiz. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/dwarf.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/dwarf.go index 1e0a9a73f..c764565c5 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/dwarf.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/dwarf.go @@ -7,12 +7,13 @@ package obj import ( - "github.com/twitchyliquid64/golang-asm/dwarf" - "github.com/twitchyliquid64/golang-asm/objabi" - "github.com/twitchyliquid64/golang-asm/src" "fmt" "sort" "sync" + + "github.com/twitchyliquid64/golang-asm/dwarf" + "github.com/twitchyliquid64/golang-asm/objabi" + "github.com/twitchyliquid64/golang-asm/src" ) // Generate a sequence of opcodes that is as short as possible. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/inl.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/inl.go index 511aa65a2..fa05bc5a3 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/inl.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/inl.go @@ -13,15 +13,16 @@ import "github.com/twitchyliquid64/golang-asm/src" // every time a function is inlined. For example, suppose f() calls g() // and g has two calls to h(), and that f, g, and h are inlineable: // -// 1 func main() { -// 2 f() -// 3 } -// 4 func f() { -// 5 g() -// 6 } -// 7 func g() { -// 8 h() -// 9 h() +// 1 func main() { +// 2 f() +// 3 } +// 4 func f() { +// 5 g() +// 6 } +// 7 func g() { +// 8 h() +// 9 h() +// // 10 } // 11 func h() { // 12 println("H") @@ -30,12 +31,12 @@ import "github.com/twitchyliquid64/golang-asm/src" // Assuming the global tree starts empty, inlining will produce the // following tree: // -// []InlinedCall{ -// {Parent: -1, Func: "f", Pos: <line 2>}, -// {Parent: 0, Func: "g", Pos: <line 5>}, -// {Parent: 1, Func: "h", Pos: <line 8>}, -// {Parent: 1, Func: "h", Pos: <line 9>}, -// } +// []InlinedCall{ +// {Parent: -1, Func: "f", Pos: <line 2>}, +// {Parent: 0, Func: "g", Pos: <line 5>}, +// {Parent: 1, Func: "h", Pos: <line 8>}, +// {Parent: 1, Func: "h", Pos: <line 9>}, +// } // // The nodes of h inlined into main will have inlining indexes 2 and 3. // diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/link.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/link.go index a7f854669..c6611e368 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/link.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/link.go @@ -32,13 +32,14 @@ package obj import ( "bufio" + "fmt" + "sync" + "github.com/twitchyliquid64/golang-asm/dwarf" "github.com/twitchyliquid64/golang-asm/goobj" "github.com/twitchyliquid64/golang-asm/objabi" "github.com/twitchyliquid64/golang-asm/src" "github.com/twitchyliquid64/golang-asm/sys" - "fmt" - "sync" ) // An Addr is an argument to an instruction. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/asm0.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/asm0.go index 61bc24699..052bab0e0 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/asm0.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/asm0.go @@ -30,12 +30,13 @@ package mips import ( - "github.com/twitchyliquid64/golang-asm/obj" - "github.com/twitchyliquid64/golang-asm/objabi" - "github.com/twitchyliquid64/golang-asm/sys" "fmt" "log" "sort" + + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/objabi" + "github.com/twitchyliquid64/golang-asm/sys" ) // ctxt0 holds state while assembling a single function. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/list0.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/list0.go index 71232aab1..cdf3eb9c8 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/list0.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/list0.go @@ -30,8 +30,9 @@ package mips import ( - "github.com/twitchyliquid64/golang-asm/obj" "fmt" + + "github.com/twitchyliquid64/golang-asm/obj" ) func init() { diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/obj0.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/obj0.go index 839db889f..38171b9cc 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/obj0.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/mips/obj0.go @@ -30,12 +30,13 @@ package mips import ( - "github.com/twitchyliquid64/golang-asm/obj" - "github.com/twitchyliquid64/golang-asm/objabi" - "github.com/twitchyliquid64/golang-asm/sys" "encoding/binary" "fmt" "math" + + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/objabi" + "github.com/twitchyliquid64/golang-asm/sys" ) func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/objfile.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/objfile.go index dd2ba4b0e..1b5e1e2c7 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/objfile.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/objfile.go @@ -8,10 +8,6 @@ package obj import ( "bytes" - "github.com/twitchyliquid64/golang-asm/bio" - "github.com/twitchyliquid64/golang-asm/goobj" - "github.com/twitchyliquid64/golang-asm/objabi" - "github.com/twitchyliquid64/golang-asm/sys" "crypto/sha1" "encoding/binary" "fmt" @@ -19,6 +15,11 @@ import ( "path/filepath" "sort" "strings" + + "github.com/twitchyliquid64/golang-asm/bio" + "github.com/twitchyliquid64/golang-asm/goobj" + "github.com/twitchyliquid64/golang-asm/objabi" + "github.com/twitchyliquid64/golang-asm/sys" ) // Entry point of writing new object file. @@ -359,14 +360,14 @@ func contentHash64(s *LSym) goobj.Hash64Type { // Depending on the category of the referenced symbol, we choose // different hash algorithms such that the hash is globally // consistent. -// - For referenced content-addressable symbol, its content hash -// is globally consistent. -// - For package symbol and builtin symbol, its local index is -// globally consistent. -// - For non-package symbol, its fully-expanded name is globally -// consistent. For now, we require we know the current package -// path so we can always expand symbol names. (Otherwise, -// symbols with relocations are not considered hashable.) +// - For referenced content-addressable symbol, its content hash +// is globally consistent. +// - For package symbol and builtin symbol, its local index is +// globally consistent. +// - For non-package symbol, its fully-expanded name is globally +// consistent. For now, we require we know the current package +// path so we can always expand symbol names. (Otherwise, +// symbols with relocations are not considered hashable.) // // For now, we assume there is no circular dependencies among // hashed symbols. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/pcln.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/pcln.go index d9a33577e..aa8a4c3dd 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/pcln.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/pcln.go @@ -5,9 +5,10 @@ package obj import ( - "github.com/twitchyliquid64/golang-asm/goobj" "encoding/binary" "log" + + "github.com/twitchyliquid64/golang-asm/goobj" ) // funcpctab writes to dst a pc-value table mapping the code in func to the values diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/plist.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/plist.go index 85074a202..02c280960 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/plist.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/plist.go @@ -5,9 +5,10 @@ package obj import ( - "github.com/twitchyliquid64/golang-asm/objabi" "fmt" "strings" + + "github.com/twitchyliquid64/golang-asm/objabi" ) type Plist struct { diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/asm9.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/asm9.go index f56d87b12..f5d3782f3 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/asm9.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/asm9.go @@ -30,13 +30,14 @@ package ppc64 import ( - "github.com/twitchyliquid64/golang-asm/obj" - "github.com/twitchyliquid64/golang-asm/objabi" "encoding/binary" "fmt" "log" "math" "sort" + + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/objabi" ) // ctxt9 holds state while assembling a single function. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/doc.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/doc.go index 6e601df82..c309bef80 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/doc.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/doc.go @@ -23,222 +23,221 @@ In the examples below, the Go assembly is on the left, PPC64 assembly on the rig 1. Operand ordering - In Go asm, the last operand (right) is the target operand, but with PPC64 asm, - the first operand (left) is the target. The order of the remaining operands is - not consistent: in general opcodes with 3 operands that perform math or logical - operations have their operands in reverse order. Opcodes for vector instructions - and those with more than 3 operands usually have operands in the same order except - for the target operand, which is first in PPC64 asm and last in Go asm. + In Go asm, the last operand (right) is the target operand, but with PPC64 asm, + the first operand (left) is the target. The order of the remaining operands is + not consistent: in general opcodes with 3 operands that perform math or logical + operations have their operands in reverse order. Opcodes for vector instructions + and those with more than 3 operands usually have operands in the same order except + for the target operand, which is first in PPC64 asm and last in Go asm. - Example: - ADD R3, R4, R5 <=> add r5, r4, r3 + Example: + ADD R3, R4, R5 <=> add r5, r4, r3 2. Constant operands - In Go asm, an operand that starts with '$' indicates a constant value. If the - instruction using the constant has an immediate version of the opcode, then an - immediate value is used with the opcode if possible. + In Go asm, an operand that starts with '$' indicates a constant value. If the + instruction using the constant has an immediate version of the opcode, then an + immediate value is used with the opcode if possible. - Example: - ADD $1, R3, R4 <=> addi r4, r3, 1 + Example: + ADD $1, R3, R4 <=> addi r4, r3, 1 3. Opcodes setting condition codes - In PPC64 asm, some instructions other than compares have variations that can set - the condition code where meaningful. This is indicated by adding '.' to the end - of the PPC64 instruction. In Go asm, these instructions have 'CC' at the end of - the opcode. The possible settings of the condition code depend on the instruction. - CR0 is the default for fixed-point instructions; CR1 for floating point; CR6 for - vector instructions. + In PPC64 asm, some instructions other than compares have variations that can set + the condition code where meaningful. This is indicated by adding '.' to the end + of the PPC64 instruction. In Go asm, these instructions have 'CC' at the end of + the opcode. The possible settings of the condition code depend on the instruction. + CR0 is the default for fixed-point instructions; CR1 for floating point; CR6 for + vector instructions. - Example: - ANDCC R3, R4, R5 <=> and. r5, r3, r4 (set CR0) + Example: + ANDCC R3, R4, R5 <=> and. r5, r3, r4 (set CR0) 4. Loads and stores from memory - In Go asm, opcodes starting with 'MOV' indicate a load or store. When the target - is a memory reference, then it is a store; when the target is a register and the - source is a memory reference, then it is a load. + In Go asm, opcodes starting with 'MOV' indicate a load or store. When the target + is a memory reference, then it is a store; when the target is a register and the + source is a memory reference, then it is a load. - MOV{B,H,W,D} variations identify the size as byte, halfword, word, doubleword. + MOV{B,H,W,D} variations identify the size as byte, halfword, word, doubleword. - Adding 'Z' to the opcode for a load indicates zero extend; if omitted it is sign extend. - Adding 'U' to a load or store indicates an update of the base register with the offset. - Adding 'BR' to an opcode indicates byte-reversed load or store, or the order opposite - of the expected endian order. If 'BR' is used then zero extend is assumed. + Adding 'Z' to the opcode for a load indicates zero extend; if omitted it is sign extend. + Adding 'U' to a load or store indicates an update of the base register with the offset. + Adding 'BR' to an opcode indicates byte-reversed load or store, or the order opposite + of the expected endian order. If 'BR' is used then zero extend is assumed. - Memory references n(Ra) indicate the address in Ra + n. When used with an update form - of an opcode, the value in Ra is incremented by n. + Memory references n(Ra) indicate the address in Ra + n. When used with an update form + of an opcode, the value in Ra is incremented by n. - Memory references (Ra+Rb) or (Ra)(Rb) indicate the address Ra + Rb, used by indexed - loads or stores. Both forms are accepted. When used with an update then the base register - is updated by the value in the index register. + Memory references (Ra+Rb) or (Ra)(Rb) indicate the address Ra + Rb, used by indexed + loads or stores. Both forms are accepted. When used with an update then the base register + is updated by the value in the index register. - Examples: - MOVD (R3), R4 <=> ld r4,0(r3) - MOVW (R3), R4 <=> lwa r4,0(r3) - MOVWZU 4(R3), R4 <=> lwzu r4,4(r3) - MOVWZ (R3+R5), R4 <=> lwzx r4,r3,r5 - MOVHZ (R3), R4 <=> lhz r4,0(r3) - MOVHU 2(R3), R4 <=> lhau r4,2(r3) - MOVBZ (R3), R4 <=> lbz r4,0(r3) + Examples: + MOVD (R3), R4 <=> ld r4,0(r3) + MOVW (R3), R4 <=> lwa r4,0(r3) + MOVWZU 4(R3), R4 <=> lwzu r4,4(r3) + MOVWZ (R3+R5), R4 <=> lwzx r4,r3,r5 + MOVHZ (R3), R4 <=> lhz r4,0(r3) + MOVHU 2(R3), R4 <=> lhau r4,2(r3) + MOVBZ (R3), R4 <=> lbz r4,0(r3) - MOVD R4,(R3) <=> std r4,0(r3) - MOVW R4,(R3) <=> stw r4,0(r3) - MOVW R4,(R3+R5) <=> stwx r4,r3,r5 - MOVWU R4,4(R3) <=> stwu r4,4(r3) - MOVH R4,2(R3) <=> sth r4,2(r3) - MOVBU R4,(R3)(R5) <=> stbux r4,r3,r5 + MOVD R4,(R3) <=> std r4,0(r3) + MOVW R4,(R3) <=> stw r4,0(r3) + MOVW R4,(R3+R5) <=> stwx r4,r3,r5 + MOVWU R4,4(R3) <=> stwu r4,4(r3) + MOVH R4,2(R3) <=> sth r4,2(r3) + MOVBU R4,(R3)(R5) <=> stbux r4,r3,r5 4. Compares - When an instruction does a compare or other operation that might - result in a condition code, then the resulting condition is set - in a field of the condition register. The condition register consists - of 8 4-bit fields named CR0 - CR7. When a compare instruction - identifies a CR then the resulting condition is set in that field - to be read by a later branch or isel instruction. Within these fields, - bits are set to indicate less than, greater than, or equal conditions. + When an instruction does a compare or other operation that might + result in a condition code, then the resulting condition is set + in a field of the condition register. The condition register consists + of 8 4-bit fields named CR0 - CR7. When a compare instruction + identifies a CR then the resulting condition is set in that field + to be read by a later branch or isel instruction. Within these fields, + bits are set to indicate less than, greater than, or equal conditions. - Once an instruction sets a condition, then a subsequent branch, isel or - other instruction can read the condition field and operate based on the - bit settings. + Once an instruction sets a condition, then a subsequent branch, isel or + other instruction can read the condition field and operate based on the + bit settings. - Examples: - CMP R3, R4 <=> cmp r3, r4 (CR0 assumed) - CMP R3, R4, CR1 <=> cmp cr1, r3, r4 + Examples: + CMP R3, R4 <=> cmp r3, r4 (CR0 assumed) + CMP R3, R4, CR1 <=> cmp cr1, r3, r4 - Note that the condition register is the target operand of compare opcodes, so - the remaining operands are in the same order for Go asm and PPC64 asm. - When CR0 is used then it is implicit and does not need to be specified. + Note that the condition register is the target operand of compare opcodes, so + the remaining operands are in the same order for Go asm and PPC64 asm. + When CR0 is used then it is implicit and does not need to be specified. 5. Branches - Many branches are represented as a form of the BC instruction. There are - other extended opcodes to make it easier to see what type of branch is being - used. + Many branches are represented as a form of the BC instruction. There are + other extended opcodes to make it easier to see what type of branch is being + used. - The following is a brief description of the BC instruction and its commonly - used operands. + The following is a brief description of the BC instruction and its commonly + used operands. - BC op1, op2, op3 + BC op1, op2, op3 - op1: type of branch - 16 -> bctr (branch on ctr) - 12 -> bcr (branch if cr bit is set) - 8 -> bcr+bctr (branch on ctr and cr values) - 4 -> bcr != 0 (branch if specified cr bit is not set) + op1: type of branch + 16 -> bctr (branch on ctr) + 12 -> bcr (branch if cr bit is set) + 8 -> bcr+bctr (branch on ctr and cr values) + 4 -> bcr != 0 (branch if specified cr bit is not set) - There are more combinations but these are the most common. + There are more combinations but these are the most common. - op2: condition register field and condition bit + op2: condition register field and condition bit - This contains an immediate value indicating which condition field - to read and what bits to test. Each field is 4 bits long with CR0 - at bit 0, CR1 at bit 4, etc. The value is computed as 4*CR+condition - with these condition values: + This contains an immediate value indicating which condition field + to read and what bits to test. Each field is 4 bits long with CR0 + at bit 0, CR1 at bit 4, etc. The value is computed as 4*CR+condition + with these condition values: - 0 -> LT - 1 -> GT - 2 -> EQ - 3 -> OVG + 0 -> LT + 1 -> GT + 2 -> EQ + 3 -> OVG - Thus 0 means test CR0 for LT, 5 means CR1 for GT, 30 means CR7 for EQ. + Thus 0 means test CR0 for LT, 5 means CR1 for GT, 30 means CR7 for EQ. - op3: branch target + op3: branch target - Examples: + Examples: - BC 12, 0, target <=> blt cr0, target - BC 12, 2, target <=> beq cr0, target - BC 12, 5, target <=> bgt cr1, target - BC 12, 30, target <=> beq cr7, target - BC 4, 6, target <=> bne cr1, target - BC 4, 1, target <=> ble cr1, target + BC 12, 0, target <=> blt cr0, target + BC 12, 2, target <=> beq cr0, target + BC 12, 5, target <=> bgt cr1, target + BC 12, 30, target <=> beq cr7, target + BC 4, 6, target <=> bne cr1, target + BC 4, 1, target <=> ble cr1, target - The following extended opcodes are available for ease of use and readability: + The following extended opcodes are available for ease of use and readability: - BNE CR2, target <=> bne cr2, target - BEQ CR4, target <=> beq cr4, target - BLT target <=> blt target (cr0 default) - BGE CR7, target <=> bge cr7, target + BNE CR2, target <=> bne cr2, target + BEQ CR4, target <=> beq cr4, target + BLT target <=> blt target (cr0 default) + BGE CR7, target <=> bge cr7, target - Refer to the ISA for more information on additional values for the BC instruction, - how to handle OVG information, and much more. + Refer to the ISA for more information on additional values for the BC instruction, + how to handle OVG information, and much more. 5. Align directive - Starting with Go 1.12, Go asm supports the PCALIGN directive, which indicates - that the next instruction should be aligned to the specified value. Currently - 8 and 16 are the only supported values, and a maximum of 2 NOPs will be added - to align the code. That means in the case where the code is aligned to 4 but - PCALIGN $16 is at that location, the code will only be aligned to 8 to avoid - adding 3 NOPs. + Starting with Go 1.12, Go asm supports the PCALIGN directive, which indicates + that the next instruction should be aligned to the specified value. Currently + 8 and 16 are the only supported values, and a maximum of 2 NOPs will be added + to align the code. That means in the case where the code is aligned to 4 but + PCALIGN $16 is at that location, the code will only be aligned to 8 to avoid + adding 3 NOPs. - The purpose of this directive is to improve performance for cases like loops - where better alignment (8 or 16 instead of 4) might be helpful. This directive - exists in PPC64 assembler and is frequently used by PPC64 assembler writers. + The purpose of this directive is to improve performance for cases like loops + where better alignment (8 or 16 instead of 4) might be helpful. This directive + exists in PPC64 assembler and is frequently used by PPC64 assembler writers. - PCALIGN $16 - PCALIGN $8 + PCALIGN $16 + PCALIGN $8 - Functions in Go are aligned to 16 bytes, as is the case in all other compilers - for PPC64. + Functions in Go are aligned to 16 bytes, as is the case in all other compilers + for PPC64. 6. Shift instructions - The simple scalar shifts on PPC64 expect a shift count that fits in 5 bits for - 32-bit values or 6 bit for 64-bit values. If the shift count is a constant value - greater than the max then the assembler sets it to the max for that size (31 for - 32 bit values, 63 for 64 bit values). If the shift count is in a register, then - only the low 5 or 6 bits of the register will be used as the shift count. The - Go compiler will add appropriate code to compare the shift value to achieve the - the correct result, and the assembler does not add extra checking. + The simple scalar shifts on PPC64 expect a shift count that fits in 5 bits for + 32-bit values or 6 bit for 64-bit values. If the shift count is a constant value + greater than the max then the assembler sets it to the max for that size (31 for + 32 bit values, 63 for 64 bit values). If the shift count is in a register, then + only the low 5 or 6 bits of the register will be used as the shift count. The + Go compiler will add appropriate code to compare the shift value to achieve the + the correct result, and the assembler does not add extra checking. - Examples: + Examples: - SRAD $8,R3,R4 => sradi r4,r3,8 - SRD $8,R3,R4 => rldicl r4,r3,56,8 - SLD $8,R3,R4 => rldicr r4,r3,8,55 - SRAW $16,R4,R5 => srawi r5,r4,16 - SRW $40,R4,R5 => rlwinm r5,r4,0,0,31 - SLW $12,R4,R5 => rlwinm r5,r4,12,0,19 + SRAD $8,R3,R4 => sradi r4,r3,8 + SRD $8,R3,R4 => rldicl r4,r3,56,8 + SLD $8,R3,R4 => rldicr r4,r3,8,55 + SRAW $16,R4,R5 => srawi r5,r4,16 + SRW $40,R4,R5 => rlwinm r5,r4,0,0,31 + SLW $12,R4,R5 => rlwinm r5,r4,12,0,19 - Some non-simple shifts have operands in the Go assembly which don't map directly - onto operands in the PPC64 assembly. When an operand in a shift instruction in the - Go assembly is a bit mask, that mask is represented as a start and end bit in the - PPC64 assembly instead of a mask. See the ISA for more detail on these types of shifts. - Here are a few examples: + Some non-simple shifts have operands in the Go assembly which don't map directly + onto operands in the PPC64 assembly. When an operand in a shift instruction in the + Go assembly is a bit mask, that mask is represented as a start and end bit in the + PPC64 assembly instead of a mask. See the ISA for more detail on these types of shifts. + Here are a few examples: - RLWMI $7,R3,$65535,R6 => rlwimi r6,r3,7,16,31 - RLDMI $0,R4,$7,R6 => rldimi r6,r4,0,61 + RLWMI $7,R3,$65535,R6 => rlwimi r6,r3,7,16,31 + RLDMI $0,R4,$7,R6 => rldimi r6,r4,0,61 - More recently, Go opcodes were added which map directly onto the PPC64 opcodes. It is - recommended to use the newer opcodes to avoid confusion. + More recently, Go opcodes were added which map directly onto the PPC64 opcodes. It is + recommended to use the newer opcodes to avoid confusion. - RLDICL $0,R4,$15,R6 => rldicl r6,r4,0,15 - RLDICR $0,R4,$15,R6 => rldicr r6.r4,0,15 + RLDICL $0,R4,$15,R6 => rldicl r6,r4,0,15 + RLDICR $0,R4,$15,R6 => rldicr r6.r4,0,15 -Register naming +# Register naming 1. Special register usage in Go asm - The following registers should not be modified by user Go assembler code. + The following registers should not be modified by user Go assembler code. - R0: Go code expects this register to contain the value 0. - R1: Stack pointer - R2: TOC pointer when compiled with -shared or -dynlink (a.k.a position independent code) - R13: TLS pointer - R30: g (goroutine) + R0: Go code expects this register to contain the value 0. + R1: Stack pointer + R2: TOC pointer when compiled with -shared or -dynlink (a.k.a position independent code) + R13: TLS pointer + R30: g (goroutine) - Register names: - - Rn is used for general purpose registers. (0-31) - Fn is used for floating point registers. (0-31) - Vn is used for vector registers. Slot 0 of Vn overlaps with Fn. (0-31) - VSn is used for vector-scalar registers. V0-V31 overlap with VS32-VS63. (0-63) - CTR represents the count register. - LR represents the link register. + Register names: + Rn is used for general purpose registers. (0-31) + Fn is used for floating point registers. (0-31) + Vn is used for vector registers. Slot 0 of Vn overlaps with Fn. (0-31) + VSn is used for vector-scalar registers. V0-V31 overlap with VS32-VS63. (0-63) + CTR represents the count register. + LR represents the link register. */ package ppc64 diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/list9.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/list9.go index 451333367..b4057d18a 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/list9.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/list9.go @@ -30,8 +30,9 @@ package ppc64 import ( - "github.com/twitchyliquid64/golang-asm/obj" "fmt" + + "github.com/twitchyliquid64/golang-asm/obj" ) func init() { diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/obj9.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/obj9.go index 7997b1942..5b8bf4d02 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/obj9.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/obj9.go @@ -983,6 +983,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { /* // instruction scheduling + if(debug['Q'] == 0) return; diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/cpu.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/cpu.go index 63a4e9e9d..776c851bf 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/cpu.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/cpu.go @@ -270,13 +270,13 @@ const ( // RISC-V mnemonics, as defined in the "opcodes" and "opcodes-pseudo" files // from: // -// https://github.com/riscv/riscv-opcodes +// https://github.com/riscv/riscv-opcodes // // As well as some pseudo-mnemonics (e.g. MOV) used only in the assembler. // // See also "The RISC-V Instruction Set Manual" at: // -// https://riscv.org/specifications/ +// https://riscv.org/specifications/ // // If you modify this table, you MUST run 'go generate' to regenerate anames.go! const ( diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/obj.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/obj.go index a9ca000f5..78e3dae42 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/obj.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/obj.go @@ -21,10 +21,11 @@ package riscv import ( + "fmt" + "github.com/twitchyliquid64/golang-asm/obj" "github.com/twitchyliquid64/golang-asm/objabi" "github.com/twitchyliquid64/golang-asm/sys" - "fmt" ) func buildop(ctxt *obj.Link) {} @@ -477,8 +478,7 @@ func setPCs(p *obj.Prog, pc int64) { // A nicer version of this diagram can be found on slide 21 of the presentation // attached to: // -// https://golang.org/issue/16922#issuecomment-243748180 -// +// https://golang.org/issue/16922#issuecomment-243748180 func stackOffset(a *obj.Addr, stacksize int64) { switch a.Name { case obj.NAME_AUTO: diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/asmz.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/asmz.go index f436521fe..0e475deea 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/asmz.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/asmz.go @@ -30,12 +30,13 @@ package s390x import ( - "github.com/twitchyliquid64/golang-asm/obj" - "github.com/twitchyliquid64/golang-asm/objabi" "fmt" "log" "math" "sort" + + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/objabi" ) // ctxtz holds state while assembling a single function. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/listz.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/listz.go index 21ff0f56e..d73a30348 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/listz.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/listz.go @@ -30,8 +30,9 @@ package s390x import ( - "github.com/twitchyliquid64/golang-asm/obj" "fmt" + + "github.com/twitchyliquid64/golang-asm/obj" ) func init() { diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/objz.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/objz.go index a9bd32aef..c298de93b 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/objz.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/objz.go @@ -30,10 +30,11 @@ package s390x import ( + "math" + "github.com/twitchyliquid64/golang-asm/obj" "github.com/twitchyliquid64/golang-asm/objabi" "github.com/twitchyliquid64/golang-asm/sys" - "math" ) func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/sym.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/sym.go index f0bcce58c..f361ca055 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/sym.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/sym.go @@ -32,12 +32,13 @@ package obj import ( - "github.com/twitchyliquid64/golang-asm/goobj" - "github.com/twitchyliquid64/golang-asm/objabi" "fmt" "log" "math" "sort" + + "github.com/twitchyliquid64/golang-asm/goobj" + "github.com/twitchyliquid64/golang-asm/objabi" ) func Linknew(arch *LinkArch) *Link { diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/util.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/util.go index 9d3030a68..e8c771fcb 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/util.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/util.go @@ -6,10 +6,11 @@ package obj import ( "bytes" - "github.com/twitchyliquid64/golang-asm/objabi" "fmt" "io" "strings" + + "github.com/twitchyliquid64/golang-asm/objabi" ) const REG_NONE = 0 diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/wasm/wasmobj.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/wasm/wasmobj.go index f7e4e33a7..ccf02c072 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/wasm/wasmobj.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/wasm/wasmobj.go @@ -6,13 +6,14 @@ package wasm import ( "bytes" - "github.com/twitchyliquid64/golang-asm/obj" - "github.com/twitchyliquid64/golang-asm/objabi" - "github.com/twitchyliquid64/golang-asm/sys" "encoding/binary" "fmt" "io" "math" + + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/objabi" + "github.com/twitchyliquid64/golang-asm/sys" ) var Register = map[string]int16{ diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/asm6.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/asm6.go index 8b7b9e9fa..9d347d646 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/asm6.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/asm6.go @@ -31,13 +31,14 @@ package x86 import ( - "github.com/twitchyliquid64/golang-asm/obj" - "github.com/twitchyliquid64/golang-asm/objabi" - "github.com/twitchyliquid64/golang-asm/sys" "encoding/binary" "fmt" "log" "strings" + + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/objabi" + "github.com/twitchyliquid64/golang-asm/sys" ) var ( @@ -59,7 +60,6 @@ var ( // is very slight but negative, so the alignment is disabled by // setting MaxLoopPad = 0. The code is here for reference and // for future experiments. -// const ( loopAlign = 16 maxLoopPad = 0 @@ -873,9 +873,9 @@ var ysha1rnds4 = []ytab{ // up in instinit. For example, oclass distinguishes the constants 0 and 1 // from the more general 8-bit constants, but instinit says // -// ycover[Yi0*Ymax+Ys32] = 1 -// ycover[Yi1*Ymax+Ys32] = 1 -// ycover[Yi8*Ymax+Ys32] = 1 +// ycover[Yi0*Ymax+Ys32] = 1 +// ycover[Yi1*Ymax+Ys32] = 1 +// ycover[Yi8*Ymax+Ys32] = 1 // // which means that Yi0, Yi1, and Yi8 all count as Ys32 (signed 32) // if that's what an instruction can handle. @@ -889,26 +889,28 @@ var ysha1rnds4 = []ytab{ // is, the Ztype) and the z bytes. // // For example, let's look at AADDL. The optab line says: -// {AADDL, yaddl, Px, opBytes{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}}, +// +// {AADDL, yaddl, Px, opBytes{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}}, // // and yaddl says -// var yaddl = []ytab{ -// {Yi8, Ynone, Yml, Zibo_m, 2}, -// {Yi32, Ynone, Yax, Zil_, 1}, -// {Yi32, Ynone, Yml, Zilo_m, 2}, -// {Yrl, Ynone, Yml, Zr_m, 1}, -// {Yml, Ynone, Yrl, Zm_r, 1}, -// } +// +// var yaddl = []ytab{ +// {Yi8, Ynone, Yml, Zibo_m, 2}, +// {Yi32, Ynone, Yax, Zil_, 1}, +// {Yi32, Ynone, Yml, Zilo_m, 2}, +// {Yrl, Ynone, Yml, Zr_m, 1}, +// {Yml, Ynone, Yrl, Zm_r, 1}, +// } // // so there are 5 possible types of ADDL instruction that can be laid down, and // possible states used to lay them down (Ztype and z pointer, assuming z // points at opBytes{0x83, 00, 0x05,0x81, 00, 0x01, 0x03}) are: // -// Yi8, Yml -> Zibo_m, z (0x83, 00) -// Yi32, Yax -> Zil_, z+2 (0x05) -// Yi32, Yml -> Zilo_m, z+2+1 (0x81, 0x00) -// Yrl, Yml -> Zr_m, z+2+1+2 (0x01) -// Yml, Yrl -> Zm_r, z+2+1+2+1 (0x03) +// Yi8, Yml -> Zibo_m, z (0x83, 00) +// Yi32, Yax -> Zil_, z+2 (0x05) +// Yi32, Yml -> Zilo_m, z+2+1 (0x81, 0x00) +// Yrl, Yml -> Zr_m, z+2+1+2 (0x01) +// Yml, Yrl -> Zm_r, z+2+1+2+1 (0x03) // // The Pconstant in the optab line controls the prefix bytes to emit. That's // relatively straightforward as this program goes. @@ -918,7 +920,7 @@ var ysha1rnds4 = []ytab{ // encoded addressing mode for the Yml arg), and then a single immediate byte. // Zilo_m is the same but a long (32-bit) immediate. var optab = -// as, ytab, andproto, opcode +// as, ytab, andproto, opcode [...]Optab{ {obj.AXXX, nil, 0, opBytes{}}, {AAAA, ynone, P32, opBytes{0x37}}, @@ -4162,11 +4164,11 @@ func (ab *AsmBuf) asmvex(ctxt *obj.Link, rm, v, r *obj.Addr, vex, opcode uint8) // EVEX.R : 1 bit | EVEX extension bit | RxrEvex // // Examples: +// // REG_Z30 => 30 // REG_X15 => 15 // REG_R9 => 9 // REG_AX => 0 -// func regIndex(r int16) int { lower3bits := reg[r] high4bit := regrex[r] & Rxr << 1 diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/evex.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/evex.go index 47932684d..e5c082060 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/evex.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/evex.go @@ -5,10 +5,11 @@ package x86 import ( - "github.com/twitchyliquid64/golang-asm/obj" "errors" "fmt" "strings" + + "github.com/twitchyliquid64/golang-asm/obj" ) // evexBits stores EVEX prefix info that is used during instruction encoding. @@ -168,6 +169,7 @@ func evexZcase(zcase uint8) bool { // evexSuffixBits carries instruction EVEX suffix set flags. // // Examples: +// // "RU_SAE.Z" => {rounding: 3, zeroing: true} // "Z" => {zeroing: true} // "BCST" => {broadcast: true} @@ -273,10 +275,10 @@ func ParseSuffix(p *obj.Prog, cond string) error { // so we can burn some clocks to construct good error message. // // Reported issues: -// - duplicated suffixes -// - illegal rounding/SAE+broadcast combinations -// - unknown suffixes -// - misplaced suffix (e.g. wrong Z suffix position) +// - duplicated suffixes +// - illegal rounding/SAE+broadcast combinations +// - unknown suffixes +// - misplaced suffix (e.g. wrong Z suffix position) func inferSuffixError(cond string) error { suffixSet := make(map[string]bool) // Set for duplicates detection. unknownSet := make(map[string]bool) // Set of unknown suffixes. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/list6.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/list6.go index 3da155fd4..9d95e3cb0 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/list6.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/list6.go @@ -31,8 +31,9 @@ package x86 import ( - "github.com/twitchyliquid64/golang-asm/obj" "fmt" + + "github.com/twitchyliquid64/golang-asm/obj" ) var Register = []string{ diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/obj6.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/obj6.go index 59c556421..dd8d84c12 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/obj6.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/x86/obj6.go @@ -31,12 +31,13 @@ package x86 import ( + "math" + "strings" + "github.com/twitchyliquid64/golang-asm/obj" "github.com/twitchyliquid64/golang-asm/objabi" "github.com/twitchyliquid64/golang-asm/src" "github.com/twitchyliquid64/golang-asm/sys" - "math" - "strings" ) func CanUse1InsnTLS(ctxt *obj.Link) bool { diff --git a/vendor/github.com/twitchyliquid64/golang-asm/objabi/symkind.go b/vendor/github.com/twitchyliquid64/golang-asm/objabi/symkind.go index 6c991121e..018e229a5 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/objabi/symkind.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/objabi/symkind.go @@ -37,6 +37,7 @@ type SymKind uint8 // These are used to index into cmd/link/internal/sym/AbiSymKindToSymKind // // TODO(rsc): Give idiomatic Go names. +// //go:generate stringer -type=SymKind const ( // An otherwise invalid zero value for the type diff --git a/vendor/github.com/twitchyliquid64/golang-asm/src/pos.go b/vendor/github.com/twitchyliquid64/golang-asm/src/pos.go index b6816a56e..552514852 100644 --- a/vendor/github.com/twitchyliquid64/golang-asm/src/pos.go +++ b/vendor/github.com/twitchyliquid64/golang-asm/src/pos.go @@ -214,8 +214,10 @@ func NewFileBase(filename, absFilename string) *PosBase { } // NewLinePragmaBase returns a new *PosBase for a line directive of the form -// //line filename:line:col -// /*line filename:line:col*/ +// +// //line filename:line:col +// /*line filename:line:col*/ +// // at position pos. func NewLinePragmaBase(pos Pos, filename, absFilename string, line, col uint) *PosBase { return &PosBase{pos, filename, absFilename, FileSymPrefix + absFilename, line, col, -1} diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go index 277180a01..0da99eb3a 100644 --- a/vendor/github.com/ugorji/go/codec/gen.generated.go +++ b/vendor/github.com/ugorji/go/codec/gen.generated.go @@ -1,3 +1,4 @@ +//go:build codecgen.exec // +build codecgen.exec // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go index de7ee72f1..e06ddf618 100644 --- a/vendor/github.com/ugorji/go/codec/gen.go +++ b/vendor/github.com/ugorji/go/codec/gen.go @@ -24,6 +24,7 @@ import ( "sync" "text/template" "time" + // "ugorji.net/zz" "unicode" "unicode/utf8" diff --git a/vendor/golang.org/x/arch/x86/x86asm/gnu.go b/vendor/golang.org/x/arch/x86/x86asm/gnu.go index 75cff72b0..8eba1fd0c 100644 --- a/vendor/golang.org/x/arch/x86/x86asm/gnu.go +++ b/vendor/golang.org/x/arch/x86/x86asm/gnu.go @@ -10,7 +10,7 @@ import ( ) // GNUSyntax returns the GNU assembler syntax for the instruction, as defined by GNU binutils. -// This general form is often called ``AT&T syntax'' as a reference to AT&T System V Unix. +// This general form is often called “AT&T syntax” as a reference to AT&T System V Unix. func GNUSyntax(inst Inst, pc uint64, symname SymLookup) string { // Rewrite instruction to mimic GNU peculiarities. // Note that inst has been passed by value and contains diff --git a/vendor/golang.org/x/arch/x86/x86asm/inst.go b/vendor/golang.org/x/arch/x86/x86asm/inst.go index 4632b5064..e98f1a841 100644 --- a/vendor/golang.org/x/arch/x86/x86asm/inst.go +++ b/vendor/golang.org/x/arch/x86/x86asm/inst.go @@ -144,7 +144,7 @@ type Arg interface { // the interface value instead of requiring an allocation. // A Reg is a single register. -// The zero Reg value has no name but indicates ``no register.'' +// The zero Reg value has no name but indicates “no register.” type Reg uint8 const ( diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index 381fd4673..c6de5d0ab 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -12,6 +12,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go index ae7d049f1..05fd305da 100644 --- a/vendor/gopkg.in/yaml.v3/apic.go +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go index 0f47c9ca8..dde20e507 100644 --- a/vendor/gopkg.in/yaml.v3/emitterc.go +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -162,10 +162,9 @@ func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { // Check if we need to accumulate more events before emitting. // // We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { if emitter.events_head == len(emitter.events) { return true @@ -241,7 +240,7 @@ func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool emitter.indent += 2 } else { // Everything else aligns to the chosen indentation. - emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) + emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent) } } return true diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go index 268558a0d..25fe82363 100644 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -227,7 +227,8 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool // Parse the production: // stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ +// +// ************ func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -249,9 +250,12 @@ func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* -// * +// +// * +// // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* +// +// ************************* func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { token := peek_token(parser) @@ -356,8 +360,8 @@ func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t // Parse the productions: // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** // +// *********** func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -379,9 +383,10 @@ func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* // +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -428,30 +433,41 @@ func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) // Parse the productions: // block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// // block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// // flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// // properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* +// +// ************************* +// // block_content ::= block_collection | flow_collection | SCALAR -// ****** +// +// ****** +// // flow_content ::= flow_collection | SCALAR -// ****** +// +// ****** func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() @@ -682,8 +698,8 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i // Parse the productions: // block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* // +// ******************** *********** * ********* func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -740,7 +756,8 @@ func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_e // Parse the productions: // indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * +// +// *********** * func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -805,14 +822,14 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* // -// BLOCK-END -// ********* +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* // +// BLOCK-END +// ********* func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -881,13 +898,11 @@ func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_even // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// +// ((KEY block_node_or_indentless_sequence?)? // +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -915,16 +930,18 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev // Parse the productions: // flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * // +// * func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -987,11 +1004,10 @@ func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_ev return true } -// // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * // +// *** * func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1011,8 +1027,8 @@ func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, ev // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * // +// ***** * func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1035,8 +1051,8 @@ func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * // +// * func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1053,16 +1069,17 @@ func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, ev // Parse the productions: // flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * // +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -1128,8 +1145,7 @@ func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event // Parse the productions: // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// +// - ***** * func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { token := peek_token(parser) if token == nil { diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go index b7de0a89c..56af24536 100644 --- a/vendor/gopkg.in/yaml.v3/readerc.go +++ b/vendor/gopkg.in/yaml.v3/readerc.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go index ca0070108..30b1f0892 100644 --- a/vendor/gopkg.in/yaml.v3/scannerc.go +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -1614,11 +1614,11 @@ func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { // Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { // Eat '%'. start_mark := parser.mark @@ -1719,11 +1719,11 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool // Scan the directive name. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ // +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { // Consume the directive name. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -1758,8 +1758,9 @@ func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark // Scan the value of VERSION-DIRECTIVE. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ +// +// %YAML 1.1 # a comment \n +// ^^^^^^ func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { // Eat whitespaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -1797,10 +1798,11 @@ const max_number_length = 2 // Scan the version number of VERSION-DIRECTIVE. // // Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { // Repeat while the next character is digit. @@ -1834,9 +1836,9 @@ func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark // Scan the value of a TAG-DIRECTIVE token. // // Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { var handle_value, prefix_value []byte @@ -2847,7 +2849,7 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t continue } if parser.buffer[parser.buffer_pos+peek] == '#' { - seen := parser.mark.index+peek + seen := parser.mark.index + peek for { if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false @@ -2876,7 +2878,7 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t parser.comments = append(parser.comments, yaml_comment_t{ token_mark: token_mark, start_mark: start_mark, - line: text, + line: text, }) } return true @@ -2910,7 +2912,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo // the foot is the line below it. var foot_line = -1 if scan_mark.line > 0 { - foot_line = parser.mark.line-parser.newlines+1 + foot_line = parser.mark.line - parser.newlines + 1 if parser.newlines == 0 && parser.mark.column > 1 { foot_line++ } @@ -2996,7 +2998,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo recent_empty = false // Consume until after the consumed comment line. - seen := parser.mark.index+peek + seen := parser.mark.index + peek for { if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go index b8a116bf9..266d0b092 100644 --- a/vendor/gopkg.in/yaml.v3/writerc.go +++ b/vendor/gopkg.in/yaml.v3/writerc.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go index 8cec6da48..f0bedf3d6 100644 --- a/vendor/gopkg.in/yaml.v3/yaml.go +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -17,8 +17,7 @@ // // Source code and other details for the project are available at GitHub: // -// https://github.com/go-yaml/yaml -// +// https://github.com/go-yaml/yaml package yaml import ( @@ -75,16 +74,15 @@ type Marshaler interface { // // For example: // -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) // // See the documentation of Marshal for the format of tags and a list of // supported tag options. -// func Unmarshal(in []byte, out interface{}) (err error) { return unmarshal(in, out, false) } @@ -185,36 +183,35 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // // The field tag format accepted is: // -// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)` +// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)` // // The following flags are currently supported: // -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be excluded if IsZero returns true. +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. // -// flow Marshal using a flow style (useful for structs, -// sequences and maps). +// flow Marshal using a flow style (useful for structs, +// sequences and maps). // -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. // // In addition, if the key is "-", the field is ignored. // // For example: // -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" func Marshal(in interface{}) (out []byte, err error) { defer handleErr(&err) e := newEncoder() @@ -358,22 +355,21 @@ const ( // // For example: // -// var person struct { -// Name string -// Address yaml.Node -// } -// err := yaml.Unmarshal(data, &person) -// -// Or by itself: +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) // -// var person Node -// err := yaml.Unmarshal(data, &person) +// Or by itself: // +// var person Node +// err := yaml.Unmarshal(data, &person) type Node struct { // Kind defines whether the node is a document, a mapping, a sequence, // a scalar value, or an alias to another node. The specific data type of // scalar nodes may be obtained via the ShortTag and LongTag methods. - Kind Kind + Kind Kind // Style allows customizing the apperance of the node in the tree. Style Style @@ -421,7 +417,6 @@ func (n *Node) IsZero() bool { n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 } - // LongTag returns the long form of the tag that indicates the data type for // the node. If the Tag field isn't explicitly defined, one will be computed // based on the node properties. diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go index 7c6d00770..ddcd5513b 100644 --- a/vendor/gopkg.in/yaml.v3/yamlh.go +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -438,7 +438,9 @@ type yaml_document_t struct { // The number of written bytes should be set to the size_read variable. // // [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). +// +// yaml_parser_set_input(). +// // [out] buffer The buffer to write the data from the source. // [in] size The size of the buffer. // [out] size_read The actual number of bytes read from the source. @@ -639,7 +641,6 @@ type yaml_parser_t struct { } type yaml_comment_t struct { - scan_mark yaml_mark_t // Position where scanning for comments started token_mark yaml_mark_t // Position after which tokens will be associated with this comment start_mark yaml_mark_t // Position of '#' comment mark @@ -659,13 +660,14 @@ type yaml_comment_t struct { // @a buffer to the output. // // @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). +// +// yaml_emitter_set_output(). +// // @param[in] buffer The buffer with bytes to be written. // @param[in] size The size of the buffer. // // @returns On success, the handler should return @c 1. If the handler failed, // the returned value should be @c 0. -// type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error type yaml_emitter_state_t int diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go index e88f9c54a..dea1ba961 100644 --- a/vendor/gopkg.in/yaml.v3/yamlprivateh.go +++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE @@ -137,8 +137,8 @@ func is_crlf(b []byte, i int) bool { func is_breakz(b []byte, i int) bool { //return is_break(b, i) || is_z(b, i) return ( - // is_break: - b[i] == '\r' || // CR (#xD) + // is_break: + b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) @@ -151,8 +151,8 @@ func is_breakz(b []byte, i int) bool { func is_spacez(b []byte, i int) bool { //return is_space(b, i) || is_breakz(b, i) return ( - // is_space: - b[i] == ' ' || + // is_space: + b[i] == ' ' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) @@ -166,8 +166,8 @@ func is_spacez(b []byte, i int) bool { func is_blankz(b []byte, i int) bool { //return is_blank(b, i) || is_breakz(b, i) return ( - // is_blank: - b[i] == ' ' || b[i] == '\t' || + // is_blank: + b[i] == ' ' || b[i] == '\t' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) diff --git a/vendor/modules.txt b/vendor/modules.txt index 385829810..f72c1ef77 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -173,8 +173,8 @@ github.com/skycoin/skycoin/src/cipher/ripemd160 github.com/skycoin/skycoin/src/cipher/secp256k1-go github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2 github.com/skycoin/skycoin/src/util/logging -# github.com/skycoin/skywire-utilities v0.0.0-20231120175000-12be4345eb26 -## explicit; go 1.17 +# github.com/skycoin/skywire-utilities v1.3.14 +## explicit; go 1.21 github.com/skycoin/skywire-utilities/pkg/buildinfo github.com/skycoin/skywire-utilities/pkg/cipher github.com/skycoin/skywire-utilities/pkg/cmdutil diff --git a/vendor/nhooyr.io/websocket/accept.go b/vendor/nhooyr.io/websocket/accept.go index 479138fc4..de01cd705 100644 --- a/vendor/nhooyr.io/websocket/accept.go +++ b/vendor/nhooyr.io/websocket/accept.go @@ -1,3 +1,4 @@ +//go:build !js // +build !js package websocket diff --git a/vendor/nhooyr.io/websocket/close_notjs.go b/vendor/nhooyr.io/websocket/close_notjs.go index c25b088f1..0cd763e1b 100644 --- a/vendor/nhooyr.io/websocket/close_notjs.go +++ b/vendor/nhooyr.io/websocket/close_notjs.go @@ -1,3 +1,4 @@ +//go:build !js // +build !js package websocket diff --git a/vendor/nhooyr.io/websocket/compress_notjs.go b/vendor/nhooyr.io/websocket/compress_notjs.go index 809a272c3..e094513b3 100644 --- a/vendor/nhooyr.io/websocket/compress_notjs.go +++ b/vendor/nhooyr.io/websocket/compress_notjs.go @@ -1,3 +1,4 @@ +//go:build !js // +build !js package websocket diff --git a/vendor/nhooyr.io/websocket/conn_notjs.go b/vendor/nhooyr.io/websocket/conn_notjs.go index 7ee60fbc3..11fe7310a 100644 --- a/vendor/nhooyr.io/websocket/conn_notjs.go +++ b/vendor/nhooyr.io/websocket/conn_notjs.go @@ -1,3 +1,4 @@ +//go:build !js // +build !js package websocket diff --git a/vendor/nhooyr.io/websocket/dial.go b/vendor/nhooyr.io/websocket/dial.go index f882f122f..3583670ae 100644 --- a/vendor/nhooyr.io/websocket/dial.go +++ b/vendor/nhooyr.io/websocket/dial.go @@ -1,3 +1,4 @@ +//go:build !js // +build !js package websocket diff --git a/vendor/nhooyr.io/websocket/doc.go b/vendor/nhooyr.io/websocket/doc.go index efa920e3b..a2b873c72 100644 --- a/vendor/nhooyr.io/websocket/doc.go +++ b/vendor/nhooyr.io/websocket/doc.go @@ -1,3 +1,4 @@ +//go:build !js // +build !js // Package websocket implements the RFC 6455 WebSocket protocol. @@ -16,7 +17,7 @@ // // More documentation at https://nhooyr.io/websocket. // -// Wasm +// # Wasm // // The client side supports compiling to Wasm. // It wraps the WebSocket browser API. @@ -25,8 +26,8 @@ // // Some important caveats to be aware of: // -// - Accept always errors out -// - Conn.Ping is no-op -// - HTTPClient, HTTPHeader and CompressionMode in DialOptions are no-op -// - *http.Response from Dial is &http.Response{} with a 101 status code on success +// - Accept always errors out +// - Conn.Ping is no-op +// - HTTPClient, HTTPHeader and CompressionMode in DialOptions are no-op +// - *http.Response from Dial is &http.Response{} with a 101 status code on success package websocket // import "nhooyr.io/websocket" diff --git a/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go b/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go index 26ffb4562..88e8f43f6 100644 --- a/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go +++ b/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go @@ -1,3 +1,4 @@ +//go:build js // +build js // Package wsjs implements typed access to the browser javascript WebSocket API. diff --git a/vendor/nhooyr.io/websocket/read.go b/vendor/nhooyr.io/websocket/read.go index a1efecabb..0696d4721 100644 --- a/vendor/nhooyr.io/websocket/read.go +++ b/vendor/nhooyr.io/websocket/read.go @@ -1,3 +1,4 @@ +//go:build !js // +build !js package websocket diff --git a/vendor/nhooyr.io/websocket/write.go b/vendor/nhooyr.io/websocket/write.go index 81b9141ae..dea70c03d 100644 --- a/vendor/nhooyr.io/websocket/write.go +++ b/vendor/nhooyr.io/websocket/write.go @@ -1,3 +1,4 @@ +//go:build !js // +build !js package websocket