diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..32ac413
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,101 @@
+# Created by https://www.toptal.com/developers/gitignore/api/go,macos,linux,windows
+# Edit at https://www.toptal.com/developers/gitignore?templates=go,macos,linux,windows
+
+### Go ###
+# If you prefer the allow list template instead of the deny list, see community template:
+# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
+#
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+# Go workspace file
+go.work
+
+### Linux ###
+*~
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+
+# .nfs files are created when an open file is removed but is still being accessed
+.nfs*
+
+### macOS ###
+# General
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+### macOS Patch ###
+# iCloud generated files
+*.icloud
+
+### Windows ###
+# Windows thumbnail cache files
+Thumbs.db
+Thumbs.db:encryptable
+ehthumbs.db
+ehthumbs_vista.db
+
+# Dump file
+*.stackdump
+
+# Folder config file
+[Dd]esktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msix
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+
+# End of https://www.toptal.com/developers/gitignore/api/go,macos,linux,windows
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100755
index 0000000..ab1b80b
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,127 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+### Fixed
+- Use string length to ensure null character-containing strings in Go/JS are not terminated early.
+- Object.Set with an empty key string is now supported
+
+## [v0.7.0] - 2021-12-09
+
+### Added
+- Support for calling constructors functions with NewInstance on Function
+- Access "this" from function callback
+- value.SameValue(otherValue) function to compare values for sameness
+- Undefined, Null functions to get these constant values for the isolate
+- Support for calling a method on an object.
+- Support for calling `IsExecutionTerminating` on isolate to check if execution is still terminating.
+- Support for setting and getting internal fields for template object instances
+- Support for CPU profiling
+- Add V8 build for Apple Silicon
+- Add support for throwing an exception directly via the isolate's ThrowException function.
+- Support for compiling a context-dependent UnboundScript which can be run in any context of the isolate it was compiled in.
+- Support for creating a code cache from an UnboundScript which can be used to create an UnboundScript in other isolates
+to run a pre-compiled script in new contexts.
+- Included compile error location in `%+v` formatting of JSError
+- Enable i18n support
+
+### Changed
+- Removed error return value from NewIsolate which never fails
+- Removed error return value from NewContext which never fails
+- Removed error return value from Context.Isolate() which never fails
+- Removed error return value from NewObjectTemplate and NewFunctionTemplate. Panic if given a nil argument.
+- Function Call accepts receiver as first argument. This **subtle breaking change** will compile old code but interpret the first argument as the receiver. Use `Undefined` to prepend an argument to fix old Call use.
+- Removed Windows support until its build issues are addressed.
+- Upgrade to V8 9.6.180.12
+
+### Fixed
+- Add some missing error propagation
+- Fix crash from template finalizer releasing V8 data, let it be disposed with the isolate
+- Fix crash by keeping alive the template while its C++ pointer is still being used
+- Fix crash from accessing function template callbacks outside of `RunScript`, such as in `JSONStringify`
+
+## [v0.6.0] - 2021-05-11
+
+### Added
+- Promise resolver and promise result
+- Convert a Value to a Function and invoke it. Thanks to [@robfig](https://github.com/robfig)
+- Windows static binary. Thanks to [@cleiner](https://github.com/cleiner)
+- Setting/unsetting of V8 feature flags
+- Register promise callbacks in Go. Thanks to [@robfig](https://github.com/robfig)
+- Get Function from a template for a given context. Thanks to [@robfig](https://github.com/robfig)
+
+### Changed
+- Upgrade to V8 9.0.257.18
+
+### Fixed
+- Go GC attempting to free C memory (via finalizer) of values after an Isolate is disposed causes a panic
+
+## [v0.5.1] - 2021-02-19
+
+### Fixed
+- Memory being held by Values after the associated Context is closed
+
+## [v0.5.0] - 2021-02-08
+
+### Added
+- Support for the BigInt value to the big.Int Go type
+- Create Object Templates with primitive values, including other Object Templates
+- Configure Object Template as the global object of any new Context
+- Function Templates with callbacks to Go
+- Value to Object type, including Get/Set/Has/Delete methods
+- Get Global Object from the Context
+- Convert an Object Template to an instance of an Object
+
+### Changed
+- NewContext() API has been improved to handle optional global object, as well as optional Isolate
+- Package error messages are now prefixed with `v8go` rather than the struct name
+- Deprecated `iso.Close()` in favor of `iso.Dispose()` to keep consistancy with the C++ API
+- Upgraded V8 to 8.8.278.14
+- Licence BSD 3-Clause (same as V8 and Go)
+
+## [v0.4.0] - 2021-01-14
+
+### Added
+- Value methods for checking value kind (is string, number, array etc)
+- C formatting via `clang-format` to aid future development
+- Support of vendoring with `go mod vendor`
+- Value methods to convert to primitive data types
+
+### Changed
+- Use g++ (default for cgo) for linux builds of the static v8 lib
+
+## [v0.3.0] - 2020-12-18
+
+### Added
+- Support for Windows via [MSYS2](https://www.msys2.org/). Thanks to [@neptoess](https://github.com/neptoess)
+
+### Changed
+- Upgraded V8 to 8.7.220.31
+
+## [v0.2.0] - 2020-01-25
+
+### Added
+- Manually dispose of the isolate when required
+- Monitor isolate heap statistics. Thanks to [@mehrdadrad](https://github.com/mehrdadrad)
+
+### Changed
+- Upgrade V8 to 8.0.426.15
+
+## [v0.1.0] - 2019-09-22
+
+### Changed
+- Upgrade V8 to 7.7.299.9
+
+## [v0.0.1] - 2019-09-2020
+
+### Added
+- Create V8 Isolate
+- Create Contexts
+- Run JavaScript scripts
+- Get Values back from JavaScript in Go
+- Get detailed JavaScript errors in Go, including stack traces
+- Terminate long running scripts from any Goroutine
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100755
index 0000000..fdb9396
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,21 @@
+# How to contribute
+
+**Working on your first Pull Request?** You can learn how from this *free* series [How to Contribute to an Open Source Project on GitHub](https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github)
+
+## Guidelines for Pull Requests
+
+How to get your contributions merged smoothly and quickly.
+
+* Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy.
+
+* For speculative changes, consider opening an issue and discussing it first.
+
+* Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists.
+
+* Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity.
+
+* Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/main` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review).
+
+* Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change).
+
+* Exceptions to the rules can be made if there's a compelling reason for doing so.
diff --git a/LICENSE b/LICENSE
new file mode 100755
index 0000000..79cf04b
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2019 Roger Chapman and the v8go contributors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ 3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Makefile b/Makefile
new file mode 100755
index 0000000..a109f2c
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,32 @@
+.DEFAULT_GOAL := build
+
+GO ?= go
+GO_RUN_TOOLS ?= $(GO) run -modfile ./tools/go.mod
+GO_TEST = $(GO_RUN_TOOLS) gotest.tools/gotestsum --format pkgname
+
+
+.PHONY: generate
+generate:
+ go generate ./...
+
+.PHONY: fmt
+fmt: ## Run go fmt against code.
+ go run mvdan.cc/gofumpt -w .
+
+.PHONY: vet
+vet: ## Run go vet against code.
+ go vet ./...
+
+.PHONY: test
+test: fmt vet ## Run tests.
+ mkdir -p .test/reports
+ $(GO_TEST) --junitfile .test/reports/unit-test.xml -- -race ./... -count=1 -short -cover -coverprofile .test/reports/unit-test-coverage.out
+
+.PHONY: lint
+lint: ## Run lint.
+ $(GO_RUN_TOOLS) github.com/golangci/golangci-lint/cmd/golangci-lint run --timeout 5m -c .golangci.yml
+
+.PHONY: clean
+clean: ## Remove previous build.
+ find . -type f -name '*.gen.go' -exec rm {} +
+ git checkout go.mod
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100755
index 0000000..0c4671f
--- /dev/null
+++ b/README.md
@@ -0,0 +1,296 @@
+# Execute JavaScript from Go
+
+
+
+## Usage
+
+```go
+import v8 "github.com/nzhenev/v8go/v8go"
+```
+
+### Running a script
+
+```go
+ctx := v8.NewContext() // creates a new V8 context with a new Isolate aka VM
+ctx.RunScript("const add = (a, b) => a + b", "math.js") // executes a script on the global context
+ctx.RunScript("const result = add(3, 4)", "main.js") // any functions previously added to the context can be called
+val, _ := ctx.RunScript("result", "value.js") // return a value in JavaScript back to Go
+fmt.Printf("addition result: %s", val)
+```
+
+### One VM, many contexts
+
+```go
+iso := v8.NewIsolate() // creates a new JavaScript VM
+ctx1 := v8.NewContext(iso) // new context within the VM
+ctx1.RunScript("const multiply = (a, b) => a * b", "math.js")
+
+ctx2 := v8.NewContext(iso) // another context on the same VM
+if _, err := ctx2.RunScript("multiply(3, 4)", "main.js"); err != nil {
+ // this will error as multiply is not defined in this context
+}
+```
+
+### JavaScript function with Go callback
+
+```go
+iso := v8.NewIsolate() // create a new VM
+// a template that represents a JS function
+printfn := v8.NewFunctionTemplate(iso, func(info *v8.FunctionCallbackInfo) *v8.Value {
+ fmt.Printf("%v", info.Args()) // when the JS function is called this Go callback will execute
+ return nil // you can return a value back to the JS caller if required
+})
+global := v8.NewObjectTemplate(iso) // a template that represents a JS Object
+global.Set("print", printfn) // sets the "print" property of the Object to our function
+ctx := v8.NewContext(iso, global) // new Context with the global Object set to our object template
+ctx.RunScript("print('foo')", "print.js") // will execute the Go callback with a single argunent 'foo'
+```
+
+### Update a JavaScript object from Go
+
+```go
+ctx := v8.NewContext() // new context with a default VM
+obj := ctx.Global() // get the global object from the context
+obj.Set("version", "v1.0.0") // set the property "version" on the object
+val, _ := ctx.RunScript("version", "version.js") // global object will have the property set within the JS VM
+fmt.Printf("version: %s", val)
+
+if obj.Has("version") { // check if a property exists on the object
+ obj.Delete("version") // remove the property from the object
+}
+```
+
+### JavaScript errors
+
+```go
+val, err := ctx.RunScript(src, filename)
+if err != nil {
+ e := err.(*v8.JSError) // JavaScript errors will be returned as the JSError struct
+ fmt.Println(e.Message) // the message of the exception thrown
+ fmt.Println(e.Location) // the filename, line number and the column where the error occured
+ fmt.Println(e.StackTrace) // the full stack trace of the error, if available
+
+ fmt.Printf("javascript error: %v", e) // will format the standard error message
+ fmt.Printf("javascript stack trace: %+v", e) // will format the full error stack trace
+}
+```
+
+### Pre-compile context-independent scripts to speed-up execution times
+
+For scripts that are large or are repeatedly run in different contexts,
+it is beneficial to compile the script once and used the cached data from that
+compilation to avoid recompiling every time you want to run it.
+
+```go
+source := "const multiply = (a, b) => a * b"
+iso1 := v8.NewIsolate() // creates a new JavaScript VM
+ctx1 := v8.NewContext(iso1) // new context within the VM
+script1, _ := iso1.CompileUnboundScript(source, "math.js", v8.CompileOptions{}) // compile script to get cached data
+val, _ := script1.Run(ctx1)
+
+cachedData := script1.CreateCodeCache()
+
+iso2 := v8.NewIsolate() // create a new JavaScript VM
+ctx2 := v8.NewContext(iso2) // new context within the VM
+
+script2, _ := iso2.CompileUnboundScript(source, "math.js", v8.CompileOptions{CachedData: cachedData}) // compile script in new isolate with cached data
+val, _ = script2.Run(ctx2)
+```
+
+### Terminate long running scripts
+
+```go
+vals := make(chan *v8.Value, 1)
+errs := make(chan error, 1)
+
+go func() {
+ val, err := ctx.RunScript(script, "forever.js") // exec a long running script
+ if err != nil {
+ errs <- err
+ return
+ }
+ vals <- val
+}()
+
+select {
+case val := <- vals:
+ // success
+case err := <- errs:
+ // javascript error
+case <- time.After(200 * time.Milliseconds):
+ vm := ctx.Isolate() // get the Isolate from the context
+ vm.TerminateExecution() // terminate the execution
+ err := <- errs // will get a termination error back from the running script
+}
+```
+
+### CPU Profiler
+
+```go
+func createProfile() {
+ iso := v8.NewIsolate()
+ ctx := v8.NewContext(iso)
+ cpuProfiler := v8.NewCPUProfiler(iso)
+
+ cpuProfiler.StartProfiling("my-profile")
+
+ ctx.RunScript(profileScript, "script.js") # this script is defined in cpuprofiler_test.go
+ val, _ := ctx.Global().Get("start")
+ fn, _ := val.AsFunction()
+ fn.Call(ctx.Global())
+
+ cpuProfile := cpuProfiler.StopProfiling("my-profile")
+
+ printTree("", cpuProfile.GetTopDownRoot()) # helper function to print the profile
+}
+
+func printTree(nest string, node *v8.CPUProfileNode) {
+ fmt.Printf("%s%s %s:%d:%d\n", nest, node.GetFunctionName(), node.GetScriptResourceName(), node.GetLineNumber(), node.GetColumnNumber())
+ count := node.GetChildrenCount()
+ if count == 0 {
+ return
+ }
+ nest = fmt.Sprintf("%s ", nest)
+ for i := 0; i < count; i++ {
+ printTree(nest, node.GetChild(i))
+ }
+}
+
+// Output
+// (root) :0:0
+// (program) :0:0
+// start script.js:23:15
+// foo script.js:15:13
+// delay script.js:12:15
+// loop script.js:1:14
+// bar script.js:13:13
+// delay script.js:12:15
+// loop script.js:1:14
+// baz script.js:14:13
+// delay script.js:12:15
+// loop script.js:1:14
+// (garbage collector) :0:0
+```
+
+## Documentation
+
+Go Reference & more examples: https://pkg.go.dev/ionos-cloud/v8go
+
+### Support
+
+If you would like to ask questions about this library or want to keep up-to-date with the latest changes and releases,
+please join the [**#v8go**](https://gophers.slack.com/channels/v8go) channel on Gophers Slack. [Click here to join the Gophers Slack community!](https://invite.slack.golangbridge.org/)
+
+### Windows
+
+There used to be Windows binary support. For further information see, [PR #234](https://github.com/nzhenev/v8go/v8go/pull/234).
+
+The v8go library would welcome contributions from anyone able to get an external windows
+build of the V8 library linking with v8go, using the version of V8 checked out in the
+`deps/v8` git submodule, and documentation of the process involved. This process will likely
+involve passing a linker flag when building v8go (e.g. using the `CGO_LDFLAGS` environment
+variable.
+
+## V8 dependency
+
+V8 version: **9.0.257.18** (April 2021)
+
+In order to make `v8go` usable as a standard Go package, prebuilt static libraries of V8
+are included for Linux and macOS. you *should not* require to build V8 yourself.
+
+Due to security concerns of binary blobs hiding malicious code, the V8 binary is built via CI *ONLY*.
+
+## Project Goals
+
+To provide a high quality, idiomatic, Go binding to the [V8 C++ API](https://v8.github.io/api/head/index.html).
+
+The API should match the original API as closely as possible, but with an API that Gophers (Go enthusiasts) expect. For
+example: using multiple return values to return both result and error from a function, rather than throwing an
+exception.
+
+This project also aims to keep up-to-date with the latest (stable) release of V8.
+
+## License
+
+[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B22862%2Fgit%40github.com%3Aionos-cloud%2Fv8go.git.svg?type=large)](https://app.fossa.com/projects/custom%2B22862%2Fgit%40github.com%3Aionos-cloud%2Fv8go.git?ref=badge_large)
+
+## Development
+
+### Recompile V8 with debug info and debug checks
+
+[Aside from data races, Go should be memory-safe](https://research.swtch.com/gorace) and v8go should preserve this property by adding the necessary checks to return an error or panic on these unsupported code paths. Release builds of v8go don't include debugging information for the V8 library since it significantly adds to the binary size, slows down compilation and shouldn't be needed by users of v8go. However, if a v8go bug causes a crash (e.g. during new feature development) then it can be helpful to build V8 with debugging information to get a C++ backtrace with line numbers. The following steps will not only do that, but also enable V8 debug checking, which can help with catching misuse of the V8 API.
+
+1) Make sure to clone the projects submodules (ie. the V8's `depot_tools` project): `git submodule update --init --recursive`
+1) Build the V8 binary for your OS: `deps/build.py --debug`. V8 is a large project, and building the binary can take up to 30 minutes.
+1) Build the executable to debug, using `go build` for commands or `go test -c` for tests. You may need to add the `-ldflags=-compressdwarf=false` option to disable debug information compression so this information can be read by the debugger (e.g. lldb that comes with Xcode v12.5.1, the latest Xcode released at the time of writing)
+1) Run the executable with a debugger (e.g. `lldb -- ./v8go.test -test.run TestThatIsCrashing`, `run` to start execution then use `bt` to print a bracktrace after it breaks on a crash), since backtraces printed by Go or V8 don't currently include line number information.
+
+### Upgrading the V8 binaries
+
+We have the [upgradev8](https://github.com/nzhenev/v8go/v8go/.github/workflow/v8upgrade.yml) workflow.
+The workflow is triggered every day or manually.
+
+If the current [v8_version](https://github.com/nzhenev/v8go/v8go/deps/v8_version) is different from the latest stable version, the workflow takes care of fetching the latest stable v8 files and copying them into `deps/include`. The last step of the workflow opens a new PR with the branch name `v8_upgrade/` with all the changes.
+
+The next steps are:
+
+1) The build is not yet triggered automatically. To trigger it manually, go to the [V8
+Build](https://github.com/nzhenev/v8go/v8go/actions?query=workflow%3A%22V8+Build%22) Github Action, Select "Run workflow",
+and select your pushed branch eg. `v8_upgrade/`.
+1) Once built, this should open 3 PRs against your branch to add the `libv8.a` for Linux (for x86_64) and macOS for x86_64 and arm64; merge
+these PRs into your branch. You are now ready to raise the PR against `master` with the latest version of V8.
+
+### Flushing after C/C++ standard library printing for debugging
+
+When using the C/C++ standard library functions for printing (e.g. `printf`), then the output will be buffered by default.
+This can cause some confusion, especially because the test binary (created through `go test`) does not flush the buffer
+at exit (at the time of writing). When standard output is the terminal, then it will use line buffering and flush when
+a new line is printed, otherwise (e.g. if the output is redirected to a pipe or file) it will be fully buffered and not even
+flush at the end of a line. When the test binary is executed through `go test .` (e.g. instead of
+separately compiled with `go test -c` and run with `./v8go.test`) Go may redirect standard output internally, resulting in
+standard output being fully buffered.
+
+A simple way to avoid this problem is to flush the standard output stream after printing with the `fflush(stdout);` statement.
+Not relying on the flushing at exit can also help ensure the output is printed before a crash.
+
+### Local leak checking
+
+Leak checking is automatically done in CI, but it can be useful to do locally to debug leaks.
+
+Leak checking is done using the [Leak Sanitizer](https://clang.llvm.org/docs/LeakSanitizer.html) which
+is a part of LLVM. As such, compiling with clang as the C/C++ compiler seems to produce more complete
+backtraces (unfortunately still only of the system stack at the time of writing).
+
+For instance, on a Debian-based Linux system, you can use `sudo apt-get install clang-12` to install a
+recent version of clang. Then CC and CXX environment variables are needed to use that compiler. With
+that compiler, the tests can be run as follows
+
+```
+CC=clang-12 CXX=clang++-12 go test -c --tags leakcheck && ./v8go.test
+```
+
+The separate compile and link commands are currently needed to get line numbers in the backtrace.
+
+On macOS, leak checking isn't available with the version of clang that comes with Xcode, so a separate
+compiler installation is needed. For example, with homebrew, `brew install llvm` will install a version
+of clang with support for this. The ASAN_OPTIONS environment variable will also be needed to run the code
+with leak checking enabled, since it isn't enabled by default on macOS. E.g. with the homebrew
+installation of llvm, the tests can be run with
+
+```
+CXX=/usr/local/opt/llvm/bin/clang++ CC=/usr/local/opt/llvm/bin/clang go test -c --tags leakcheck -ldflags=-compressdwarf=false
+ASAN_OPTIONS=detect_leaks=1 ./v8go.test
+```
+
+The `-ldflags=-compressdwarf=false` is currently (with clang 13) needed to get line numbers in the backtrace.
+
+### Formatting
+
+Go has `go fmt`, C has `clang-format`. Any changes to the `v8go.h|cc` should be formated with `clang-format` with the
+"Chromium" Coding style. This can be done easily by running the `go generate` command.
+
+`brew install clang-format` to install on macOS.
+
+---
+
+V8 Gopher image based on original artwork from the amazing [Renee French](http://reneefrench.blogspot.com).
diff --git a/cgo.go b/cgo.go
new file mode 100755
index 0000000..0396c6e
--- /dev/null
+++ b/cgo.go
@@ -0,0 +1,27 @@
+// Copyright 2019 Roger Chapman and the v8go contributors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package v8go
+
+//go:generate clang-format -i --verbose -style=Chromium v8go.h v8go.cc
+
+// #cgo CXXFLAGS: -fno-rtti -fPIC -std=c++17 -DV8_COMPRESS_POINTERS -DV8_31BIT_SMIS_ON_64BIT_ARCH -I${SRCDIR}/deps/include -Wall -DV8_ENABLE_SANDBOX
+// #cgo LDFLAGS: -pthread -lv8
+// #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/deps/darwin_x86_64
+// #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/deps/darwin_arm64
+// #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/deps/linux_x86_64 -ldl
+// #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/deps/linux_arm64 -ldl
+import "C"
+
+// These imports forces `go mod vendor` to pull in all the folders that
+// contain V8 libraries and headers which otherwise would be ignored.
+// DO NOT REMOVE
+// nolint:revive
+import (
+ _ "github.com/nzhenev/v8go/v8go/deps/darwin_arm64"
+ _ "github.com/nzhenev/v8go/v8go/deps/darwin_x86_64"
+ _ "github.com/nzhenev/v8go/v8go/deps/include"
+ _ "github.com/nzhenev/v8go/v8go/deps/linux_arm64"
+ _ "github.com/nzhenev/v8go/v8go/deps/linux_x86_64"
+)
diff --git a/context.go b/context.go
new file mode 100755
index 0000000..33ca614
--- /dev/null
+++ b/context.go
@@ -0,0 +1,185 @@
+// Copyright 2019 Roger Chapman and the v8go contributors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package v8go
+
+// #include
+// #include "v8go.h"
+import "C"
+
+import (
+ "runtime"
+ "sync"
+ "unsafe"
+)
+
+// Due to the limitations of passing pointers to C from Go we need to create
+// a registry so that we can lookup the Context from any given callback from V8.
+// This is similar to what is described here: https://github.com/golang/go/wiki/cgo#function-variables
+type ctxRef struct {
+ ctx *Context
+ refCount int
+}
+
+var (
+ ctxMutex sync.RWMutex
+ ctxRegistry = make(map[int]*ctxRef)
+ ctxSeq = 0
+)
+
+// Context is a global root execution environment that allows separate,
+// unrelated, JavaScript applications to run in a single instance of V8.
+type Context struct {
+ ref int
+ ptr C.ContextPtr
+ iso *Isolate
+}
+
+type contextOptions struct {
+ iso *Isolate
+ gTmpl *ObjectTemplate
+}
+
+// ContextOption sets options such as Isolate and Global Template to the NewContext
+type ContextOption interface {
+ apply(*contextOptions)
+}
+
+// NewContext creates a new JavaScript context; if no Isolate is passed as a
+// ContextOption than a new Isolate will be created.
+func NewContext(opt ...ContextOption) *Context {
+ opts := contextOptions{}
+ for _, o := range opt {
+ if o != nil {
+ o.apply(&opts)
+ }
+ }
+
+ if opts.iso == nil {
+ opts.iso = NewIsolate()
+ }
+
+ if opts.gTmpl == nil {
+ opts.gTmpl = &ObjectTemplate{&template{}}
+ }
+
+ ctxMutex.Lock()
+ ctxSeq++
+ ref := ctxSeq
+ ctxMutex.Unlock()
+
+ ctx := &Context{
+ ref: ref,
+ ptr: C.NewContext(opts.iso.ptr, opts.gTmpl.ptr, C.int(ref)),
+ iso: opts.iso,
+ }
+ ctx.register()
+ runtime.KeepAlive(opts.gTmpl)
+ return ctx
+}
+
+// Isolate gets the current context's parent isolate.
+func (c *Context) Isolate() *Isolate {
+ return c.iso
+}
+
+func (c *Context) RetainedValueCount() int {
+ ctxMutex.Lock()
+ defer ctxMutex.Unlock()
+ return int(C.ContextRetainedValueCount(c.ptr))
+}
+
+// RunScript executes the source JavaScript; origin (a.k.a. filename) provides a
+// reference for the script and used in the stack trace if there is an error.
+// error will be of type `JSError` if not nil.
+func (c *Context) RunScript(source string, origin string) (*Value, error) {
+ cSource := C.CString(source)
+ cOrigin := C.CString(origin)
+ defer C.free(unsafe.Pointer(cSource))
+ defer C.free(unsafe.Pointer(cOrigin))
+
+ rtn := C.RunScript(c.ptr, cSource, cOrigin)
+ return valueResult(c, rtn)
+}
+
+// Global returns the global proxy object.
+// Global proxy object is a thin wrapper whose prototype points to actual
+// context's global object with the properties like Object, etc. This is
+// done that way for security reasons.
+// Please note that changes to global proxy object prototype most probably
+// would break the VM — V8 expects only global object as a prototype of
+// global proxy object.
+func (c *Context) Global() *Object {
+ valPtr := C.ContextGlobal(c.ptr)
+ v := &Value{valPtr, c}
+ return &Object{v}
+}
+
+// PerformMicrotaskCheckpoint runs the default MicrotaskQueue until empty.
+// This is used to make progress on Promises.
+func (c *Context) PerformMicrotaskCheckpoint() {
+ C.IsolatePerformMicrotaskCheckpoint(c.iso.ptr)
+}
+
+// Close will dispose the context and free the memory.
+// Access to any values associated with the context after calling Close may panic.
+func (c *Context) Close() {
+ c.deregister()
+ C.ContextFree(c.ptr)
+ c.ptr = nil
+}
+
+func (c *Context) register() {
+ ctxMutex.Lock()
+ r := ctxRegistry[c.ref]
+ if r == nil {
+ r = &ctxRef{ctx: c}
+ ctxRegistry[c.ref] = r
+ }
+ r.refCount++
+ ctxMutex.Unlock()
+}
+
+func (c *Context) deregister() {
+ ctxMutex.Lock()
+ defer ctxMutex.Unlock()
+ r := ctxRegistry[c.ref]
+ if r == nil {
+ return
+ }
+ r.refCount--
+ if r.refCount <= 0 {
+ delete(ctxRegistry, c.ref)
+ }
+}
+
+func getContext(ref int) *Context {
+ ctxMutex.RLock()
+ defer ctxMutex.RUnlock()
+ r := ctxRegistry[ref]
+ if r == nil {
+ return nil
+ }
+ return r.ctx
+}
+
+//export goContext
+func goContext(ref int) C.ContextPtr {
+ ctx := getContext(ref)
+ return ctx.ptr
+}
+
+func valueResult(ctx *Context, rtn C.RtnValue) (*Value, error) {
+ if rtn.value == nil {
+ return nil, newJSError(rtn.error)
+ }
+ return &Value{rtn.value, ctx}, nil
+}
+
+func objectResult(ctx *Context, rtn C.RtnValue) (*Object, error) {
+ if rtn.value == nil {
+ return nil, newJSError(rtn.error)
+ }
+ return &Object{&Value{rtn.value, ctx}}, nil
+}
diff --git a/context_test.go b/context_test.go
new file mode 100755
index 0000000..4659d34
--- /dev/null
+++ b/context_test.go
@@ -0,0 +1,210 @@
+// Copyright 2019 Roger Chapman and the v8go contributors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package v8go_test
+
+import (
+ "encoding/json"
+ "fmt"
+ "testing"
+
+ v8 "github.com/nzhenev/v8go/v8go"
+)
+
+func TestContextExec(t *testing.T) {
+ t.Parallel()
+ ctx := v8.NewContext(nil)
+ defer ctx.Isolate().Dispose()
+ defer ctx.Close()
+
+ ctx.RunScript(`const add = (a, b) => a + b`, "add.js")
+ val, _ := ctx.RunScript(`add(3, 4)`, "main.js")
+ rtn := val.String()
+ if rtn != "7" {
+ t.Errorf("script returned an unexpected value: expected %q, got %q", "7", rtn)
+ }
+
+ _, err := ctx.RunScript(`add`, "func.js")
+ if err != nil {
+ t.Errorf("error not expected: %v", err)
+ }
+
+ iso := ctx.Isolate()
+ ctx2 := v8.NewContext(iso)
+ defer ctx2.Close()
+ _, err = ctx2.RunScript(`add`, "ctx2.js")
+ if err == nil {
+ t.Error("error expected but was ")
+ }
+}
+
+func TestJSExceptions(t *testing.T) {
+ t.Parallel()
+
+ tests := [...]struct {
+ name string
+ source string
+ origin string
+ err string
+ }{
+ {"SyntaxError", "bad js syntax", "syntax.js", "SyntaxError: Unexpected identifier 'js'"},
+ {"ReferenceError", "add()", "add.js", "ReferenceError: add is not defined"},
+ }
+
+ ctx := v8.NewContext(nil)
+ defer ctx.Isolate().Dispose()
+ defer ctx.Close()
+
+ for _, tt := range tests {
+ tt := tt
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := ctx.RunScript(tt.source, tt.origin)
+ if err == nil {
+ t.Error("error expected but got ")
+ return
+ }
+ if err.Error() != tt.err {
+ t.Errorf("expected %q, got %q", tt.err, err.Error())
+ }
+ })
+ }
+}
+
+func TestContextRegistry(t *testing.T) {
+ t.Parallel()
+
+ ctx := v8.NewContext()
+ defer ctx.Isolate().Dispose()
+ defer ctx.Close()
+
+ ctxref := ctx.Ref()
+
+ c1 := v8.GetContext(ctxref)
+ if c1 == nil {
+ t.Error("expected context, but got ")
+ }
+ if c1 != ctx {
+ t.Errorf("contexts should match %p != %p", c1, ctx)
+ }
+
+ ctx.Close()
+
+ c2 := v8.GetContext(ctxref)
+ if c2 != nil {
+ t.Error("expected context to be after close")
+ }
+}
+
+func TestMemoryLeak(t *testing.T) {
+ t.Parallel()
+
+ iso := v8.NewIsolate()
+ defer iso.Dispose()
+
+ for i := 0; i < 6000; i++ {
+ ctx := v8.NewContext(iso)
+ _ = ctx.Global()
+ // _ = obj.String()
+ _, _ = ctx.RunScript("2", "")
+ ctx.Close()
+ }
+ if n := iso.GetHeapStatistics().NumberOfNativeContexts; n >= 6000 {
+ t.Errorf("Context not being GC'd, got %d native contexts", n)
+ }
+}
+
+// https://github.com/rogchap/v8go/issues/186
+func TestRegistryFromJSON(t *testing.T) {
+ t.Parallel()
+
+ iso := v8.NewIsolate()
+ defer iso.Dispose()
+
+ global := v8.NewObjectTemplate(iso)
+ err := global.Set("location", v8.NewFunctionTemplate(iso, func(info *v8.FunctionCallbackInfo) *v8.Value {
+ v, err := v8.NewValue(iso, "world")
+ fatalIf(t, err)
+ return v
+ }))
+ fatalIf(t, err)
+
+ ctx := v8.NewContext(iso, global)
+ defer ctx.Close()
+
+ v, err := ctx.RunScript(`
+ new Proxy({
+ "hello": "unknown"
+ }, {
+ get: function () {
+ return location()
+ },
+ })
+ `, "main.js")
+ fatalIf(t, err)
+
+ s, err := v8.JSONStringify(ctx, v)
+ fatalIf(t, err)
+
+ expected := `{"hello":"world"}`
+ if s != expected {
+ t.Fatalf("expected %q, got %q", expected, s)
+ }
+}
+
+func BenchmarkContext(b *testing.B) {
+ b.ReportAllocs()
+ iso := v8.NewIsolate()
+ defer iso.Dispose()
+ for n := 0; n < b.N; n++ {
+ ctx := v8.NewContext(iso)
+ ctx.RunScript(script, "main.js")
+ str, _ := json.Marshal(makeObject())
+ cmd := fmt.Sprintf("process(%s)", str)
+ ctx.RunScript(cmd, "cmd.js")
+ ctx.Close()
+ }
+}
+
+func ExampleContext() {
+ ctx := v8.NewContext()
+ defer ctx.Isolate().Dispose()
+ defer ctx.Close()
+ ctx.RunScript("const add = (a, b) => a + b", "math.js")
+ ctx.RunScript("const result = add(3, 4)", "main.js")
+ val, _ := ctx.RunScript("result", "value.js")
+ fmt.Println(val)
+ // Output:
+ // 7
+}
+
+func ExampleContext_isolate() {
+ iso := v8.NewIsolate()
+ defer iso.Dispose()
+ ctx1 := v8.NewContext(iso)
+ defer ctx1.Close()
+ ctx1.RunScript("const foo = 'bar'", "context_one.js")
+ val, _ := ctx1.RunScript("foo", "foo.js")
+ fmt.Println(val)
+
+ ctx2 := v8.NewContext(iso)
+ defer ctx2.Close()
+ _, err := ctx2.RunScript("foo", "context_two.js")
+ fmt.Println(err)
+ // Output:
+ // bar
+ // ReferenceError: foo is not defined
+}
+
+func ExampleContext_globalTemplate() {
+ iso := v8.NewIsolate()
+ defer iso.Dispose()
+ obj := v8.NewObjectTemplate(iso)
+ obj.Set("version", "v1.0.0")
+ ctx := v8.NewContext(iso, obj)
+ defer ctx.Close()
+ val, _ := ctx.RunScript("version", "main.js")
+ fmt.Println(val)
+ // Output:
+ // v1.0.0
+}
diff --git a/cpuprofile.go b/cpuprofile.go
new file mode 100755
index 0000000..4a1593c
--- /dev/null
+++ b/cpuprofile.go
@@ -0,0 +1,55 @@
+// Copyright 2021 the v8go contributors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package v8go
+
+/*
+#include "v8go.h"
+*/
+import "C"
+import "time"
+
+type CPUProfile struct {
+ p *C.CPUProfile
+
+ // The CPU profile title.
+ title string
+
+ // root is the root node of the top down call tree.
+ root *CPUProfileNode
+
+ // startTimeOffset is the time when the profile recording was started
+ // since some unspecified starting point.
+ startTimeOffset time.Duration
+
+ // endTimeOffset is the time when the profile recording was stopped
+ // since some unspecified starting point.
+ // The point is equal to the starting point used by startTimeOffset.
+ endTimeOffset time.Duration
+}
+
+// Returns CPU profile title.
+func (c *CPUProfile) GetTitle() string {
+ return c.title
+}
+
+// Returns the root node of the top down call tree.
+func (c *CPUProfile) GetTopDownRoot() *CPUProfileNode {
+ return c.root
+}
+
+// Returns the duration of the profile.
+func (c *CPUProfile) GetDuration() time.Duration {
+ return c.endTimeOffset - c.startTimeOffset
+}
+
+// Deletes the profile and removes it from CpuProfiler's list.
+// All pointers to nodes previously returned become invalid.
+func (c *CPUProfile) Delete() {
+ if c.p == nil {
+ return
+ }
+ C.CPUProfileDelete(c.p)
+ c.p = nil
+}
diff --git a/cpuprofile_test.go b/cpuprofile_test.go
new file mode 100755
index 0000000..0646932
--- /dev/null
+++ b/cpuprofile_test.go
@@ -0,0 +1,70 @@
+// Copyright 2021 the v8go contributors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package v8go_test
+
+import (
+ "testing"
+
+ v8 "github.com/nzhenev/v8go/v8go"
+)
+
+func TestCPUProfile(t *testing.T) {
+ t.Parallel()
+
+ ctx := v8.NewContext(nil)
+ iso := ctx.Isolate()
+ defer iso.Dispose()
+ defer ctx.Close()
+
+ cpuProfiler := v8.NewCPUProfiler(iso)
+ defer cpuProfiler.Dispose()
+
+ title := "cpuprofiletest"
+ cpuProfiler.StartProfiling(title)
+
+ _, err := ctx.RunScript(profileScript, "script.js")
+ fatalIf(t, err)
+ val, err := ctx.Global().Get("start")
+ fatalIf(t, err)
+ fn, err := val.AsFunction()
+ fatalIf(t, err)
+ _, err = fn.Call(ctx.Global())
+ fatalIf(t, err)
+
+ cpuProfile := cpuProfiler.StopProfiling(title)
+ defer cpuProfile.Delete()
+
+ if cpuProfile.GetTitle() != title {
+ t.Fatalf("expected title %s, but got %s", title, cpuProfile.GetTitle())
+ }
+
+ root := cpuProfile.GetTopDownRoot()
+ if root == nil {
+ t.Fatal("expected root not to be nil")
+ }
+ if root.GetFunctionName() != "(root)" {
+ t.Errorf("expected (root), but got %v", root.GetFunctionName())
+ }
+
+ if cpuProfile.GetDuration() <= 0 {
+ t.Fatalf("expected positive profile duration (%s)", cpuProfile.GetDuration())
+ }
+}
+
+func TestCPUProfile_Delete(t *testing.T) {
+ t.Parallel()
+
+ iso := v8.NewIsolate()
+ defer iso.Dispose()
+
+ cpuProfiler := v8.NewCPUProfiler(iso)
+ defer cpuProfiler.Dispose()
+
+ cpuProfiler.StartProfiling("cpuprofiletest")
+ cpuProfile := cpuProfiler.StopProfiling("cpuprofiletest")
+ cpuProfile.Delete()
+ // noop when called multiple times
+ cpuProfile.Delete()
+}
diff --git a/cpuprofilenode.go b/cpuprofilenode.go
new file mode 100755
index 0000000..6ac5b2c
--- /dev/null
+++ b/cpuprofilenode.go
@@ -0,0 +1,91 @@
+// Copyright 2021 the v8go contributors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package v8go
+
+type CPUProfileNode struct {
+ // The id of the current node, unique within the tree.
+ nodeID int
+
+ // The id of the script where the function originates.
+ scriptID int
+
+ // The resource name for script from where the function originates.
+ scriptResourceName string
+
+ // The function name (empty string for anonymous functions.)
+ functionName string
+
+ // The number of the line where the function originates.
+ lineNumber int
+
+ // The number of the column where the function originates.
+ columnNumber int
+
+ // The count of samples where the function was currently executing.
+ hitCount int
+
+ // The bailout reason for the function if the optimization was disabled for it.
+ bailoutReason string
+
+ // The children node of this node.
+ children []*CPUProfileNode
+
+ // The parent node of this node.
+ parent *CPUProfileNode
+}
+
+// Returns node id.
+func (c *CPUProfileNode) GetNodeID() int {
+ return c.nodeID
+}
+
+// Returns id for script from where the function originates.
+func (c *CPUProfileNode) GetScriptID() int {
+ return c.scriptID
+}
+
+// Returns function name (empty string for anonymous functions.)
+func (c *CPUProfileNode) GetFunctionName() string {
+ return c.functionName
+}
+
+// Returns resource name for script from where the function originates.
+func (c *CPUProfileNode) GetScriptResourceName() string {
+ return c.scriptResourceName
+}
+
+// Returns number of the line where the function originates.
+func (c *CPUProfileNode) GetLineNumber() int {
+ return c.lineNumber
+}
+
+// Returns number of the column where the function originates.
+func (c *CPUProfileNode) GetColumnNumber() int {
+ return c.columnNumber
+}
+
+// Returns count of samples where the function was currently executing.
+func (c *CPUProfileNode) GetHitCount() int {
+ return c.hitCount
+}
+
+// Returns the bailout reason for the function if the optimization was disabled for it.
+func (c *CPUProfileNode) GetBailoutReason() string {
+ return c.bailoutReason
+}
+
+// Retrieves the ancestor node, or nil if the root.
+func (c *CPUProfileNode) GetParent() *CPUProfileNode {
+ return c.parent
+}
+
+func (c *CPUProfileNode) GetChildrenCount() int {
+ return len(c.children)
+}
+
+// Retrieves a child node by index.
+func (c *CPUProfileNode) GetChild(index int) *CPUProfileNode {
+ return c.children[index]
+}
diff --git a/cpuprofilenode_test.go b/cpuprofilenode_test.go
new file mode 100755
index 0000000..b9b4e89
--- /dev/null
+++ b/cpuprofilenode_test.go
@@ -0,0 +1,112 @@
+// Copyright 2021 the v8go contributors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package v8go_test
+
+import (
+ "testing"
+
+ v8 "github.com/nzhenev/v8go/v8go"
+)
+
+func TestCPUProfileNode(t *testing.T) {
+ t.Parallel()
+
+ ctx := v8.NewContext(nil)
+ iso := ctx.Isolate()
+ defer iso.Dispose()
+ defer ctx.Close()
+
+ cpuProfiler := v8.NewCPUProfiler(iso)
+ defer cpuProfiler.Dispose()
+
+ title := "cpuprofilenodetest"
+ cpuProfiler.StartProfiling(title)
+
+ _, err := ctx.RunScript(profileScript, "script.js")
+ fatalIf(t, err)
+ val, err := ctx.Global().Get("start")
+ fatalIf(t, err)
+ fn, err := val.AsFunction()
+ fatalIf(t, err)
+ timeout, err := v8.NewValue(iso, int32(1000))
+ fatalIf(t, err)
+ _, err = fn.Call(ctx.Global(), timeout)
+ fatalIf(t, err)
+
+ cpuProfile := cpuProfiler.StopProfiling(title)
+ if cpuProfile == nil {
+ t.Fatal("expected profile not to be nil")
+ }
+ defer cpuProfile.Delete()
+
+ rootNode := cpuProfile.GetTopDownRoot()
+ if rootNode == nil {
+ t.Fatal("expected top down root not to be nil")
+ }
+ count := rootNode.GetChildrenCount()
+ var startNode *v8.CPUProfileNode
+ for i := 0; i < count; i++ {
+ if rootNode.GetChild(i).GetFunctionName() == "start" {
+ startNode = rootNode.GetChild(i)
+ }
+ }
+ if startNode == nil {
+ t.Fatal("expected node not to be nil")
+ }
+ checkNode(t, startNode, "script.js", "start", 23, 15)
+
+ parentName := startNode.GetParent().GetFunctionName()
+ if parentName != "(root)" {
+ t.Fatalf("expected (root), but got %v", parentName)
+ }
+
+ fooNode := findChild(t, startNode, "foo")
+ checkNode(t, fooNode, "script.js", "foo", 15, 13)
+
+ delayNode := findChild(t, fooNode, "delay")
+ checkNode(t, delayNode, "script.js", "delay", 12, 15)
+
+ barNode := findChild(t, fooNode, "bar")
+ checkNode(t, barNode, "script.js", "bar", 13, 13)
+
+ loopNode := findChild(t, delayNode, "loop")
+ checkNode(t, loopNode, "script.js", "loop", 1, 14)
+
+ bazNode := findChild(t, fooNode, "baz")
+ checkNode(t, bazNode, "script.js", "baz", 14, 13)
+}
+
+func findChild(t *testing.T, node *v8.CPUProfileNode, functionName string) *v8.CPUProfileNode {
+ t.Helper()
+
+ var child *v8.CPUProfileNode
+ count := node.GetChildrenCount()
+ for i := 0; i < count; i++ {
+ if node.GetChild(i).GetFunctionName() == functionName {
+ child = node.GetChild(i)
+ }
+ }
+ if child == nil {
+ t.Fatal("failed to find child node")
+ }
+ return child
+}
+
+func checkNode(t *testing.T, node *v8.CPUProfileNode, scriptResourceName string, functionName string, line, column int) {
+ t.Helper()
+
+ if node.GetFunctionName() != functionName {
+ t.Fatalf("expected node to have function name %s, but got %s", functionName, node.GetFunctionName())
+ }
+ if node.GetScriptResourceName() != scriptResourceName {
+ t.Fatalf("expected node to have script resource name %s, but got %s", scriptResourceName, node.GetScriptResourceName())
+ }
+ if node.GetLineNumber() != line {
+ t.Fatalf("expected node at line %d, but got %d", line, node.GetLineNumber())
+ }
+ if node.GetColumnNumber() != column {
+ t.Fatalf("expected node at column %d, but got %d", column, node.GetColumnNumber())
+ }
+}
diff --git a/cpuprofiler.go b/cpuprofiler.go
new file mode 100755
index 0000000..bbe7a17
--- /dev/null
+++ b/cpuprofiler.go
@@ -0,0 +1,98 @@
+// Copyright 2021 the v8go contributors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package v8go
+
+/*
+#include
+#include "v8go.h"
+*/
+import "C"
+
+import (
+ "time"
+ "unsafe"
+)
+
+type CPUProfiler struct {
+ p *C.CPUProfiler
+ iso *Isolate
+}
+
+// CPUProfiler is used to control CPU profiling.
+func NewCPUProfiler(iso *Isolate) *CPUProfiler {
+ profiler := C.NewCPUProfiler(iso.ptr)
+ return &CPUProfiler{
+ p: profiler,
+ iso: iso,
+ }
+}
+
+// Dispose will dispose the profiler.
+func (c *CPUProfiler) Dispose() {
+ if c.p == nil {
+ return
+ }
+
+ C.CPUProfilerDispose(c.p)
+ c.p = nil
+}
+
+// StartProfiling starts collecting a CPU profile. Title may be an empty string. Several
+// profiles may be collected at once. Attempts to start collecting several
+// profiles with the same title are silently ignored.
+func (c *CPUProfiler) StartProfiling(title string) {
+ if c.p == nil || c.iso.ptr == nil {
+ panic("profiler or isolate are nil")
+ }
+
+ tstr := C.CString(title)
+ defer C.free(unsafe.Pointer(tstr))
+
+ C.CPUProfilerStartProfiling(c.p, tstr)
+}
+
+// Stops collecting CPU profile with a given title and returns it.
+// If the title given is empty, finishes the last profile started.
+func (c *CPUProfiler) StopProfiling(title string) *CPUProfile {
+ if c.p == nil || c.iso.ptr == nil {
+ panic("profiler or isolate are nil")
+ }
+
+ tstr := C.CString(title)
+ defer C.free(unsafe.Pointer(tstr))
+
+ profile := C.CPUProfilerStopProfiling(c.p, tstr)
+
+ return &CPUProfile{
+ p: profile,
+ title: C.GoString(profile.title),
+ root: newCPUProfileNode(profile.root, nil),
+ startTimeOffset: time.Duration(profile.startTime) * time.Millisecond,
+ endTimeOffset: time.Duration(profile.endTime) * time.Millisecond,
+ }
+}
+
+func newCPUProfileNode(node *C.CPUProfileNode, parent *CPUProfileNode) *CPUProfileNode {
+ n := &CPUProfileNode{
+ nodeID: int(node.nodeId),
+ scriptID: int(node.scriptId),
+ scriptResourceName: C.GoString(node.scriptResourceName),
+ functionName: C.GoString(node.functionName),
+ lineNumber: int(node.lineNumber),
+ columnNumber: int(node.columnNumber),
+ hitCount: int(node.hitCount),
+ bailoutReason: C.GoString(node.bailoutReason),
+ parent: parent,
+ }
+
+ if node.childrenCount > 0 {
+ n.children = make([]*CPUProfileNode, node.childrenCount)
+ for i, child := range (*[1 << 28]*C.CPUProfileNode)(unsafe.Pointer(node.children))[:node.childrenCount:node.childrenCount] {
+ n.children[i] = newCPUProfileNode(child, n)
+ }
+ }
+
+ return n
+}
diff --git a/cpuprofiler_test.go b/cpuprofiler_test.go
new file mode 100755
index 0000000..6b5eebb
--- /dev/null
+++ b/cpuprofiler_test.go
@@ -0,0 +1,109 @@
+// Copyright 2021 the v8go contributors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package v8go_test
+
+import (
+ "testing"
+
+ v8 "github.com/nzhenev/v8go/v8go"
+)
+
+func TestCPUProfiler_Dispose(t *testing.T) {
+ t.Parallel()
+
+ iso := v8.NewIsolate()
+ defer iso.Dispose()
+ cpuProfiler := v8.NewCPUProfiler(iso)
+
+ cpuProfiler.Dispose()
+ // noop when called multiple times
+ cpuProfiler.Dispose()
+
+ // verify panics when profiler disposed
+ if recoverPanic(func() { cpuProfiler.StartProfiling("") }) == nil {
+ t.Error("expected panic")
+ }
+
+ if recoverPanic(func() { cpuProfiler.StopProfiling("") }) == nil {
+ t.Error("expected panic")
+ }
+
+ cpuProfiler = v8.NewCPUProfiler(iso)
+ defer cpuProfiler.Dispose()
+ iso.Dispose()
+
+ // verify panics when isolate disposed
+ if recoverPanic(func() { cpuProfiler.StartProfiling("") }) == nil {
+ t.Error("expected panic")
+ }
+
+ if recoverPanic(func() { cpuProfiler.StopProfiling("") }) == nil {
+ t.Error("expected panic")
+ }
+}
+
+func TestCPUProfiler(t *testing.T) {
+ t.Parallel()
+
+ ctx := v8.NewContext(nil)
+ iso := ctx.Isolate()
+ defer iso.Dispose()
+ defer ctx.Close()
+
+ cpuProfiler := v8.NewCPUProfiler(iso)
+ defer cpuProfiler.Dispose()
+
+ title := "cpuprofilertest"
+ cpuProfiler.StartProfiling(title)
+
+ _, err := ctx.RunScript(profileScript, "script.js")
+ fatalIf(t, err)
+ val, err := ctx.Global().Get("start")
+ fatalIf(t, err)
+ fn, err := val.AsFunction()
+ fatalIf(t, err)
+ timeout, err := v8.NewValue(iso, int32(0))
+ fatalIf(t, err)
+ _, err = fn.Call(ctx.Global(), timeout)
+ fatalIf(t, err)
+
+ cpuProfile := cpuProfiler.StopProfiling(title)
+ defer cpuProfile.Delete()
+
+ if cpuProfile.GetTitle() != title {
+ t.Errorf("expected %s, but got %s", title, cpuProfile.GetTitle())
+ }
+}
+
+const profileScript = `function loop(timeout) {
+ this.mmm = 0;
+ var start = Date.now();
+ while (Date.now() - start < timeout) {
+ var n = 10;
+ while(n > 1) {
+ n--;
+ this.mmm += n * n * n;
+ }
+ }
+}
+function delay() { try { loop(10); } catch(e) { } }
+function bar() { delay(); }
+function baz() { delay(); }
+function foo() {
+ try {
+ delay();
+ bar();
+ delay();
+ baz();
+ } catch (e) { }
+}
+function start(timeout) {
+ var start = Date.now();
+ do {
+ foo();
+ var duration = Date.now() - start;
+ } while (duration < timeout);
+ return duration;
+};`
diff --git a/deps/.gclient b/deps/.gclient
new file mode 100755
index 0000000..a8725fe
--- /dev/null
+++ b/deps/.gclient
@@ -0,0 +1,9 @@
+solutions = [
+ {
+ "name": "v8",
+ "url": "https://chromium.googlesource.com/v8/v8.git",
+ "deps_file": "DEPS",
+ "managed": False,
+ "custom_deps": {},
+ },
+]
diff --git a/deps/build.py b/deps/build.py
new file mode 100755
index 0000000..805585b
--- /dev/null
+++ b/deps/build.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python
+import platform
+import os
+import subprocess
+import shutil
+import argparse
+
+valid_archs = ['arm64', 'x86_64']
+# "x86_64" is called "amd64" on Windows
+current_arch = platform.uname()[4].lower().replace("amd64", "x86_64")
+default_arch = current_arch if current_arch in valid_archs else None
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--debug', dest='debug', action='store_true')
+parser.add_argument('--no-clang', dest='clang', action='store_false')
+parser.add_argument('--arch',
+ dest='arch',
+ action='store',
+ choices=valid_archs,
+ default=default_arch,
+ required=default_arch is None)
+parser.set_defaults(debug=False, clang=True)
+args = parser.parse_args()
+
+deps_path = os.path.dirname(os.path.realpath(__file__))
+v8_path = os.path.join(deps_path, "v8")
+tools_path = os.path.join(deps_path, "depot_tools")
+is_windows = platform.system().lower() == "windows"
+
+gclient_sln = [
+ { "name" : "v8",
+ "url" : "https://chromium.googlesource.com/v8/v8.git",
+ "deps_file" : "DEPS",
+ "managed" : False,
+ "custom_deps" : {
+ # These deps are unnecessary for building.
+ "v8/testing/gmock" : None,
+ "v8/test/wasm-js" : None,
+ "v8/third_party/android_tools" : None,
+ "v8/third_party/catapult" : None,
+ "v8/third_party/colorama/src" : None,
+ "v8/tools/gyp" : None,
+ "v8/tools/luci-go" : None,
+ },
+ "custom_vars": {
+ "build_for_node" : True,
+ },
+ },
+]
+
+gn_args = """
+is_debug=%s
+is_clang=%s
+target_cpu="%s"
+v8_target_cpu="%s"
+clang_use_chrome_plugins=false
+use_custom_libcxx=false
+use_sysroot=false
+symbol_level=%s
+strip_debug_info=%s
+is_component_build=false
+v8_monolithic=true
+v8_use_external_startup_data=false
+treat_warnings_as_errors=false
+v8_embedder_string="-v8go"
+v8_enable_gdbjit=false
+v8_enable_i18n_support=true
+icu_use_data_file=false
+v8_enable_test_features=false
+exclude_unwind_tables=true
+"""
+
+def v8deps():
+ spec = "solutions = %s" % gclient_sln
+ env = os.environ.copy()
+ env["PATH"] = tools_path + os.pathsep + env["PATH"]
+ subprocess.check_call(cmd(["gclient", "sync", "--spec", spec]),
+ cwd=deps_path,
+ env=env)
+
+def cmd(args):
+ return ["cmd", "/c"] + args if is_windows else args
+
+def os_arch():
+ u = platform.uname()
+ return u[0].lower() + "_" + args.arch
+
+def v8_arch():
+ if args.arch == "x86_64":
+ return "x64"
+ return args.arch
+
+def apply_mingw_patches():
+ v8_build_path = os.path.join(v8_path, "build")
+ apply_patch("0000-add-mingw-main-code-changes", v8_path)
+ apply_patch("0001-add-mingw-toolchain", v8_build_path)
+ update_last_change()
+ zlib_path = os.path.join(v8_path, "third_party", "zlib")
+ zlib_src_gn = os.path.join(deps_path, os_arch(), "zlib.gn")
+ zlib_dst_gn = os.path.join(zlib_path, "BUILD.gn")
+ shutil.copy(zlib_src_gn, zlib_dst_gn)
+
+def apply_patch(patch_name, working_dir):
+ patch_path = os.path.join(deps_path, os_arch(), patch_name + ".patch")
+ subprocess.check_call(["git", "apply", "-v", patch_path], cwd=working_dir)
+
+def update_last_change():
+ out_path = os.path.join(v8_path, "build", "util", "LASTCHANGE")
+ subprocess.check_call(["python", "build/util/lastchange.py", "-o", out_path], cwd=v8_path)
+
+def main():
+ v8deps()
+ if is_windows:
+ apply_mingw_patches()
+
+ gn_path = os.path.join(tools_path, "gn")
+ assert(os.path.exists(gn_path))
+ ninja_path = os.path.join(tools_path, "ninja" + (".exe" if is_windows else ""))
+ assert(os.path.exists(ninja_path))
+
+ build_path = os.path.join(deps_path, ".build", os_arch())
+ env = os.environ.copy()
+
+ is_debug = 'true' if args.debug else 'false'
+ is_clang = 'true' if args.clang else 'false'
+ # symbol_level = 1 includes line number information
+ # symbol_level = 2 can be used for additional debug information, but it can increase the
+ # compiled library by an order of magnitude and further slow down compilation
+ symbol_level = 1 if args.debug else 0
+ strip_debug_info = 'false' if args.debug else 'true'
+
+ arch = v8_arch()
+ gnargs = gn_args % (is_debug, is_clang, arch, arch, symbol_level, strip_debug_info)
+ gen_args = gnargs.replace('\n', ' ')
+
+ subprocess.check_call(cmd([gn_path, "gen", build_path, "--args=" + gen_args]),
+ cwd=v8_path,
+ env=env)
+ subprocess.check_call([ninja_path, "-v", "-C", build_path, "v8_monolith"],
+ cwd=v8_path,
+ env=env)
+
+ lib_fn = os.path.join(build_path, "obj/libv8_monolith.a")
+ dest_path = os.path.join(deps_path, os_arch())
+ if not os.path.exists(dest_path):
+ os.makedirs(dest_path)
+ dest_fn = os.path.join(dest_path, 'libv8.a')
+ shutil.copy(lib_fn, dest_fn)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/deps/darwin_arm64/libv8.a b/deps/darwin_arm64/libv8.a
new file mode 100755
index 0000000..2b76165
Binary files /dev/null and b/deps/darwin_arm64/libv8.a differ
diff --git a/deps/darwin_arm64/vendor.go b/deps/darwin_arm64/vendor.go
new file mode 100755
index 0000000..0d899f5
--- /dev/null
+++ b/deps/darwin_arm64/vendor.go
@@ -0,0 +1,3 @@
+// Package darwin_arm64 is required to provide support for vendoring modules
+// DO NOT REMOVE
+package darwin_arm64
diff --git a/deps/darwin_x86_64/libv8.a b/deps/darwin_x86_64/libv8.a
new file mode 100755
index 0000000..3a75d22
Binary files /dev/null and b/deps/darwin_x86_64/libv8.a differ
diff --git a/deps/darwin_x86_64/vendor.go b/deps/darwin_x86_64/vendor.go
new file mode 100755
index 0000000..203e5f1
--- /dev/null
+++ b/deps/darwin_x86_64/vendor.go
@@ -0,0 +1,3 @@
+// Package darwin_x86_64 is required to provide support for vendoring modules
+// DO NOT REMOVE
+package darwin_x86_64
diff --git a/deps/include/APIDesign.md b/deps/include/APIDesign.md
new file mode 100755
index 0000000..fe42c8e
--- /dev/null
+++ b/deps/include/APIDesign.md
@@ -0,0 +1,72 @@
+# The V8 public C++ API
+
+# Overview
+
+The V8 public C++ API aims to support four use cases:
+
+1. Enable applications that embed V8 (called the embedder) to configure and run
+ one or more instances of V8.
+2. Expose ECMAScript-like capabilities to the embedder.
+3. Enable the embedder to interact with ECMAScript by exposing API objects.
+4. Provide access to the V8 debugger (inspector).
+
+# Configuring and running an instance of V8
+
+V8 requires access to certain OS-level primitives such as the ability to
+schedule work on threads, or allocate memory.
+
+The embedder can define how to access those primitives via the v8::Platform
+interface. While V8 bundles a basic implementation, embedders are highly
+encouraged to implement v8::Platform themselves.
+
+Currently, the v8::ArrayBuffer::Allocator is passed to the v8::Isolate factory
+method, however, conceptually it should also be part of the v8::Platform since
+all instances of V8 should share one allocator.
+
+Once the v8::Platform is configured, an v8::Isolate can be created. All
+further interactions with V8 should explicitly reference the v8::Isolate they
+refer to. All API methods should eventually take an v8::Isolate parameter.
+
+When a given instance of V8 is no longer needed, it can be destroyed by
+disposing the respective v8::Isolate. If the embedder wishes to free all memory
+associated with the v8::Isolate, it has to first clear all global handles
+associated with that v8::Isolate.
+
+# ECMAScript-like capabilities
+
+In general, the C++ API shouldn't enable capabilities that aren't available to
+scripts running in V8. Experience has shown that it's not possible to maintain
+such API methods in the long term. However, capabilities also available to
+scripts, i.e., ones that are defined in the ECMAScript standard are there to
+stay, and we can safely expose them to embedders.
+
+The C++ API should also be pleasant to use, and not require learning new
+paradigms. Similarly to how the API exposed to scripts aims to provide good
+ergonomics, we should aim to provide a reasonable developer experience for this
+API surface.
+
+ECMAScript makes heavy use of exceptions, however, V8's C++ code doesn't use
+C++ exceptions. Therefore, all API methods that can throw exceptions should
+indicate so by returning a v8::Maybe<> or v8::MaybeLocal<> result,
+and by taking a v8::Local<v8::Context> parameter that indicates in which
+context a possible exception should be thrown.
+
+# API objects
+
+V8 allows embedders to define special objects that expose additional
+capabilities and APIs to scripts. The most prominent example is exposing the
+HTML DOM in Blink. Other examples are e.g. node.js. It is less clear what kind
+of capabilities we want to expose via this API surface. As a rule of thumb, we
+want to expose operations as defined in the WebIDL and HTML spec: we
+assume that those requirements are somewhat stable, and that they are a
+superset of the requirements of other embedders including node.js.
+
+Ideally, the API surfaces defined in those specs hook into the ECMAScript spec
+which in turn guarantees long-term stability of the API.
+
+# The V8 inspector
+
+All debugging capabilities of V8 should be exposed via the inspector protocol.
+The exception to this are profiling features exposed via v8-profiler.h.
+Changes to the inspector protocol need to ensure backwards compatibility and
+commitment to maintain.
diff --git a/deps/include/DEPS b/deps/include/DEPS
new file mode 100755
index 0000000..21ce3d9
--- /dev/null
+++ b/deps/include/DEPS
@@ -0,0 +1,10 @@
+include_rules = [
+ # v8-inspector-protocol.h depends on generated files under include/inspector.
+ "+inspector",
+ "+cppgc/common.h",
+ # Used by v8-cppgc.h to bridge to cppgc.
+ "+cppgc/custom-space.h",
+ "+cppgc/heap-statistics.h",
+ "+cppgc/internal/write-barrier.h",
+ "+cppgc/visitor.h",
+]
diff --git a/deps/include/DIR_METADATA b/deps/include/DIR_METADATA
new file mode 100755
index 0000000..a27ea1b
--- /dev/null
+++ b/deps/include/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>API"
+}
\ No newline at end of file
diff --git a/deps/include/OWNERS b/deps/include/OWNERS
new file mode 100755
index 0000000..535040c
--- /dev/null
+++ b/deps/include/OWNERS
@@ -0,0 +1,23 @@
+adamk@chromium.org
+cbruni@chromium.org
+leszeks@chromium.org
+mlippautz@chromium.org
+verwaest@chromium.org
+yangguo@chromium.org
+
+per-file *DEPS=file:../COMMON_OWNERS
+per-file v8-internal.h=file:../COMMON_OWNERS
+
+per-file v8-debug.h=file:../src/debug/OWNERS
+
+per-file js_protocol.pdl=file:../src/inspector/OWNERS
+per-file v8-inspector*=file:../src/inspector/OWNERS
+per-file v8-inspector*=file:../src/inspector/OWNERS
+
+# Needed by the auto_tag builder
+per-file v8-version.h=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com
+
+# For branch updates:
+per-file v8-version.h=file:../INFRA_OWNERS
+per-file v8-version.h=hablich@chromium.org
+per-file v8-version.h=vahl@chromium.org
diff --git a/deps/include/cppgc/DEPS b/deps/include/cppgc/DEPS
new file mode 100755
index 0000000..861d118
--- /dev/null
+++ b/deps/include/cppgc/DEPS
@@ -0,0 +1,8 @@
+include_rules = [
+ "-include",
+ "+v8config.h",
+ "+v8-platform.h",
+ "+cppgc",
+ "-src",
+ "+libplatform/libplatform.h",
+]
diff --git a/deps/include/cppgc/OWNERS b/deps/include/cppgc/OWNERS
new file mode 100755
index 0000000..6ccabf6
--- /dev/null
+++ b/deps/include/cppgc/OWNERS
@@ -0,0 +1,2 @@
+bikineev@chromium.org
+omerkatz@chromium.org
\ No newline at end of file
diff --git a/deps/include/cppgc/README.md b/deps/include/cppgc/README.md
new file mode 100755
index 0000000..d825ea5
--- /dev/null
+++ b/deps/include/cppgc/README.md
@@ -0,0 +1,135 @@
+# Oilpan: C++ Garbage Collection
+
+Oilpan is an open-source garbage collection library for C++ that can be used stand-alone or in collaboration with V8's JavaScript garbage collector.
+Oilpan implements mark-and-sweep garbage collection (GC) with limited compaction (for a subset of objects).
+
+**Key properties**
+
+- Trace-based garbage collection;
+- Incremental and concurrent marking;
+- Incremental and concurrent sweeping;
+- Precise on-heap memory layout;
+- Conservative on-stack memory layout;
+- Allows for collection with and without considering stack;
+- Non-incremental and non-concurrent compaction for selected spaces;
+
+See the [Hello World](https://chromium.googlesource.com/v8/v8/+/main/samples/cppgc/hello-world.cc) example on how to get started using Oilpan to manage C++ code.
+
+Oilpan follows V8's project organization, see e.g. on how we accept [contributions](https://v8.dev/docs/contribute) and [provide a stable API](https://v8.dev/docs/api).
+
+## Threading model
+
+Oilpan features thread-local garbage collection and assumes heaps are not shared among threads.
+In other words, objects are accessed and ultimately reclaimed by the garbage collector on the same thread that allocates them.
+This allows Oilpan to run garbage collection in parallel with mutators running in other threads.
+
+References to objects belonging to another thread's heap are modeled using cross-thread roots.
+This is even true for on-heap to on-heap references.
+
+Oilpan heaps may generally not be accessed from different threads unless otherwise noted.
+
+## Heap partitioning
+
+Oilpan's heaps are partitioned into spaces.
+The space for an object is chosen depending on a number of criteria, e.g.:
+
+- Objects over 64KiB are allocated in a large object space
+- Objects can be assigned to a dedicated custom space.
+ Custom spaces can also be marked as compactable.
+- Other objects are allocated in one of the normal page spaces bucketed depending on their size.
+
+## Precise and conservative garbage collection
+
+Oilpan supports two kinds of GCs:
+
+1. **Conservative GC.**
+A GC is called conservative when it is executed while the regular native stack is not empty.
+In this case, the native stack might contain references to objects in Oilpan's heap, which should be kept alive.
+The GC scans the native stack and treats the pointers discovered via the native stack as part of the root set.
+This kind of GC is considered imprecise because values on stack other than references may accidentally appear as references to on-heap object, which means these objects will be kept alive despite being in practice unreachable from the application as an actual reference.
+
+2. **Precise GC.**
+A precise GC is triggered at the end of an event loop, which is controlled by an embedder via a platform.
+At this point, it is guaranteed that there are no on-stack references pointing to Oilpan's heap.
+This means there is no risk of confusing other value types with references.
+Oilpan has precise knowledge of on-heap object layouts, and so it knows exactly where pointers lie in memory.
+Oilpan can just start marking from the regular root set and collect all garbage precisely.
+
+## Atomic, incremental and concurrent garbage collection
+
+Oilpan has three modes of operation:
+
+1. **Atomic GC.**
+The entire GC cycle, including all its phases (e.g. see [Marking](#Marking-phase) and [Sweeping](#Sweeping-phase)), are executed back to back in a single pause.
+This mode of operation is also known as Stop-The-World (STW) garbage collection.
+It results in the most jank (due to a single long pause), but is overall the most efficient (e.g. no need for write barriers).
+
+2. **Incremental GC.**
+Garbage collection work is split up into multiple steps which are interleaved with the mutator, i.e. user code chunked into tasks.
+Each step is a small chunk of work that is executed either as dedicated tasks between mutator tasks or, as needed, during mutator tasks.
+Using incremental GC introduces the need for write barriers that record changes to the object graph so that a consistent state is observed and no objects are accidentally considered dead and reclaimed.
+The incremental steps are followed by a smaller atomic pause to finalize garbage collection.
+The smaller pause times, due to smaller chunks of work, helps with reducing jank.
+
+3. **Concurrent GC.**
+This is the most common type of GC.
+It builds on top of incremental GC and offloads much of the garbage collection work away from the mutator thread and on to background threads.
+Using concurrent GC allows the mutator thread to spend less time on GC and more on the actual mutator.
+
+## Marking phase
+
+The marking phase consists of the following steps:
+
+1. Mark all objects in the root set.
+
+2. Mark all objects transitively reachable from the root set by calling `Trace()` methods defined on each object.
+
+3. Clear out all weak handles to unreachable objects and run weak callbacks.
+
+The marking phase can be executed atomically in a stop-the-world manner, in which all 3 steps are executed one after the other.
+
+Alternatively, it can also be executed incrementally/concurrently.
+With incremental/concurrent marking, step 1 is executed in a short pause after which the mutator regains control.
+Step 2 is repeatedly executed in an interleaved manner with the mutator.
+When the GC is ready to finalize, i.e. step 2 is (almost) finished, another short pause is triggered in which step 2 is finished and step 3 is performed.
+
+To prevent a user-after-free (UAF) issues it is required for Oilpan to know about all edges in the object graph.
+This means that all pointers except on-stack pointers must be wrapped with Oilpan's handles (i.e., Persistent<>, Member<>, WeakMember<>).
+Raw pointers to on-heap objects create an edge that Oilpan cannot observe and cause UAF issues
+Thus, raw pointers shall not be used to reference on-heap objects (except for raw pointers on native stacks).
+
+## Sweeping phase
+
+The sweeping phase consists of the following steps:
+
+1. Invoke pre-finalizers.
+At this point, no destructors have been invoked and no memory has been reclaimed.
+Pre-finalizers are allowed to access any other on-heap objects, even those that may get destructed.
+
+2. Sweeping invokes destructors of the dead (unreachable) objects and reclaims memory to be reused by future allocations.
+
+Assumptions should not be made about the order and the timing of their execution.
+There is no guarantee on the order in which the destructors are invoked.
+That's why destructors must not access any other on-heap objects (which might have already been destructed).
+If some destructor unavoidably needs to access other on-heap objects, it will have to be converted to a pre-finalizer.
+The pre-finalizer is allowed to access other on-heap objects.
+
+The mutator is resumed before all destructors have ran.
+For example, imagine a case where X is a client of Y, and Y holds a list of clients.
+If the code relies on X's destructor removing X from the list, there is a risk that Y iterates the list and calls some method of X which may touch other on-heap objects.
+This causes a use-after-free.
+Care must be taken to make sure that X is explicitly removed from the list before the mutator resumes its execution in a way that doesn't rely on X's destructor (e.g. a pre-finalizer).
+
+Similar to marking, sweeping can be executed in either an atomic stop-the-world manner or incrementally/concurrently.
+With incremental/concurrent sweeping, step 2 is interleaved with mutator.
+Incremental/concurrent sweeping can be atomically finalized in case it is needed to trigger another GC cycle.
+Even with concurrent sweeping, destructors are guaranteed to run on the thread the object has been allocated on to preserve C++ semantics.
+
+Notes:
+
+* Weak processing runs only when the holder object of the WeakMember outlives the pointed object.
+If the holder object and the pointed object die at the same time, weak processing doesn't run.
+It is wrong to write code assuming that the weak processing always runs.
+
+* Pre-finalizers are heavy because the thread needs to scan all pre-finalizers at each sweeping phase to determine which pre-finalizers should be invoked (the thread needs to invoke pre-finalizers of dead objects).
+Adding pre-finalizers to frequently created objects should be avoided.
diff --git a/deps/include/cppgc/allocation.h b/deps/include/cppgc/allocation.h
new file mode 100755
index 0000000..69883fb
--- /dev/null
+++ b/deps/include/cppgc/allocation.h
@@ -0,0 +1,310 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_ALLOCATION_H_
+#define INCLUDE_CPPGC_ALLOCATION_H_
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "cppgc/custom-space.h"
+#include "cppgc/internal/api-constants.h"
+#include "cppgc/internal/gc-info.h"
+#include "cppgc/type-traits.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+#if defined(__has_attribute)
+#if __has_attribute(assume_aligned)
+#define CPPGC_DEFAULT_ALIGNED \
+ __attribute__((assume_aligned(api_constants::kDefaultAlignment)))
+#define CPPGC_DOUBLE_WORD_ALIGNED \
+ __attribute__((assume_aligned(2 * api_constants::kDefaultAlignment)))
+#endif // __has_attribute(assume_aligned)
+#endif // defined(__has_attribute)
+
+#if !defined(CPPGC_DEFAULT_ALIGNED)
+#define CPPGC_DEFAULT_ALIGNED
+#endif
+
+#if !defined(CPPGC_DOUBLE_WORD_ALIGNED)
+#define CPPGC_DOUBLE_WORD_ALIGNED
+#endif
+
+namespace cppgc {
+
+/**
+ * AllocationHandle is used to allocate garbage-collected objects.
+ */
+class AllocationHandle;
+
+namespace internal {
+
+// Similar to C++17 std::align_val_t;
+enum class AlignVal : size_t {};
+
+class V8_EXPORT MakeGarbageCollectedTraitInternal {
+ protected:
+ static inline void MarkObjectAsFullyConstructed(const void* payload) {
+ // See api_constants for an explanation of the constants.
+ std::atomic* atomic_mutable_bitfield =
+ reinterpret_cast*>(
+ const_cast(reinterpret_cast(
+ reinterpret_cast(payload) -
+ api_constants::kFullyConstructedBitFieldOffsetFromPayload)));
+ // It's safe to split use load+store here (instead of a read-modify-write
+ // operation), since it's guaranteed that this 16-bit bitfield is only
+ // modified by a single thread. This is cheaper in terms of code bloat (on
+ // ARM) and performance.
+ uint16_t value = atomic_mutable_bitfield->load(std::memory_order_relaxed);
+ value |= api_constants::kFullyConstructedBitMask;
+ atomic_mutable_bitfield->store(value, std::memory_order_release);
+ }
+
+ // Dispatch based on compile-time information.
+ //
+ // Default implementation is for a custom space with >`kDefaultAlignment` byte
+ // alignment.
+ template
+ struct AllocationDispatcher final {
+ static void* Invoke(AllocationHandle& handle, size_t size) {
+ static_assert(std::is_base_of::value,
+ "Custom space must inherit from CustomSpaceBase.");
+ static_assert(
+ !CustomSpace::kSupportsCompaction,
+ "Custom spaces that support compaction do not support allocating "
+ "objects with non-default (i.e. word-sized) alignment.");
+ return MakeGarbageCollectedTraitInternal::Allocate(
+ handle, size, static_cast(alignment),
+ internal::GCInfoTrait::Index(), CustomSpace::kSpaceIndex);
+ }
+ };
+
+ // Fast path for regular allocations for the default space with
+ // `kDefaultAlignment` byte alignment.
+ template
+ struct AllocationDispatcher
+ final {
+ static void* Invoke(AllocationHandle& handle, size_t size) {
+ return MakeGarbageCollectedTraitInternal::Allocate(
+ handle, size, internal::GCInfoTrait::Index());
+ }
+ };
+
+ // Default space with >`kDefaultAlignment` byte alignment.
+ template
+ struct AllocationDispatcher final {
+ static void* Invoke(AllocationHandle& handle, size_t size) {
+ return MakeGarbageCollectedTraitInternal::Allocate(
+ handle, size, static_cast(alignment),
+ internal::GCInfoTrait::Index());
+ }
+ };
+
+ // Custom space with `kDefaultAlignment` byte alignment.
+ template
+ struct AllocationDispatcher
+ final {
+ static void* Invoke(AllocationHandle& handle, size_t size) {
+ static_assert(std::is_base_of::value,
+ "Custom space must inherit from CustomSpaceBase.");
+ return MakeGarbageCollectedTraitInternal::Allocate(
+ handle, size, internal::GCInfoTrait::Index(),
+ CustomSpace::kSpaceIndex);
+ }
+ };
+
+ private:
+ static void* CPPGC_DEFAULT_ALIGNED Allocate(cppgc::AllocationHandle&, size_t,
+ GCInfoIndex);
+ static void* CPPGC_DOUBLE_WORD_ALIGNED Allocate(cppgc::AllocationHandle&,
+ size_t, AlignVal,
+ GCInfoIndex);
+ static void* CPPGC_DEFAULT_ALIGNED Allocate(cppgc::AllocationHandle&, size_t,
+ GCInfoIndex, CustomSpaceIndex);
+ static void* CPPGC_DOUBLE_WORD_ALIGNED Allocate(cppgc::AllocationHandle&,
+ size_t, AlignVal, GCInfoIndex,
+ CustomSpaceIndex);
+
+ friend class HeapObjectHeader;
+};
+
+} // namespace internal
+
+/**
+ * Base trait that provides utilities for advancers users that have custom
+ * allocation needs (e.g., overriding size). It's expected that users override
+ * MakeGarbageCollectedTrait (see below) and inherit from
+ * MakeGarbageCollectedTraitBase and make use of the low-level primitives
+ * offered to allocate and construct an object.
+ */
+template
+class MakeGarbageCollectedTraitBase
+ : private internal::MakeGarbageCollectedTraitInternal {
+ private:
+ static_assert(internal::IsGarbageCollectedType::value,
+ "T needs to be a garbage collected object");
+ static_assert(!IsGarbageCollectedWithMixinTypeV ||
+ sizeof(T) <=
+ internal::api_constants::kLargeObjectSizeThreshold,
+ "GarbageCollectedMixin may not be a large object");
+
+ protected:
+ /**
+ * Allocates memory for an object of type T.
+ *
+ * \param handle AllocationHandle identifying the heap to allocate the object
+ * on.
+ * \param size The size that should be reserved for the object.
+ * \returns the memory to construct an object of type T on.
+ */
+ V8_INLINE static void* Allocate(AllocationHandle& handle, size_t size) {
+ static_assert(
+ std::is_base_of::value,
+ "U of GarbageCollected must be a base of T. Check "
+ "GarbageCollected base class inheritance.");
+ static constexpr size_t kWantedAlignment =
+ alignof(T) < internal::api_constants::kDefaultAlignment
+ ? internal::api_constants::kDefaultAlignment
+ : alignof(T);
+ static_assert(
+ kWantedAlignment <= internal::api_constants::kMaxSupportedAlignment,
+ "Requested alignment larger than alignof(std::max_align_t) bytes. "
+ "Please file a bug to possibly get this restriction lifted.");
+ return AllocationDispatcher<
+ typename internal::GCInfoFolding<
+ T, typename T::ParentMostGarbageCollectedType>::ResultType,
+ typename SpaceTrait::Space, kWantedAlignment>::Invoke(handle, size);
+ }
+
+ /**
+ * Marks an object as fully constructed, resulting in precise handling by the
+ * garbage collector.
+ *
+ * \param payload The base pointer the object is allocated at.
+ */
+ V8_INLINE static void MarkObjectAsFullyConstructed(const void* payload) {
+ internal::MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(
+ payload);
+ }
+};
+
+/**
+ * Passed to MakeGarbageCollected to specify how many bytes should be appended
+ * to the allocated object.
+ *
+ * Example:
+ * \code
+ * class InlinedArray final : public GarbageCollected {
+ * public:
+ * explicit InlinedArray(size_t bytes) : size(bytes), byte_array(this + 1) {}
+ * void Trace(Visitor*) const {}
+
+ * size_t size;
+ * char* byte_array;
+ * };
+ *
+ * auto* inlined_array = MakeGarbageCollectedbyte_array[i]);
+ * }
+ * \endcode
+ */
+struct AdditionalBytes {
+ constexpr explicit AdditionalBytes(size_t bytes) : value(bytes) {}
+ const size_t value;
+};
+
+/**
+ * Default trait class that specifies how to construct an object of type T.
+ * Advanced users may override how an object is constructed using the utilities
+ * that are provided through MakeGarbageCollectedTraitBase.
+ *
+ * Any trait overriding construction must
+ * - allocate through `MakeGarbageCollectedTraitBase::Allocate`;
+ * - mark the object as fully constructed using
+ * `MakeGarbageCollectedTraitBase::MarkObjectAsFullyConstructed`;
+ */
+template
+class MakeGarbageCollectedTrait : public MakeGarbageCollectedTraitBase {
+ public:
+ template
+ static T* Call(AllocationHandle& handle, Args&&... args) {
+ void* memory =
+ MakeGarbageCollectedTraitBase::Allocate(handle, sizeof(T));
+ T* object = ::new (memory) T(std::forward(args)...);
+ MakeGarbageCollectedTraitBase::MarkObjectAsFullyConstructed(object);
+ return object;
+ }
+
+ template
+ static T* Call(AllocationHandle& handle, AdditionalBytes additional_bytes,
+ Args&&... args) {
+ void* memory = MakeGarbageCollectedTraitBase::Allocate(
+ handle, sizeof(T) + additional_bytes.value);
+ T* object = ::new (memory) T(std::forward(args)...);
+ MakeGarbageCollectedTraitBase::MarkObjectAsFullyConstructed(object);
+ return object;
+ }
+};
+
+/**
+ * Allows users to specify a post-construction callback for specific types. The
+ * callback is invoked on the instance of type T right after it has been
+ * constructed. This can be useful when the callback requires a
+ * fully-constructed object to be able to dispatch to virtual methods.
+ */
+template
+struct PostConstructionCallbackTrait {
+ static void Call(T*) {}
+};
+
+/**
+ * Constructs a managed object of type T where T transitively inherits from
+ * GarbageCollected.
+ *
+ * \param args List of arguments with which an instance of T will be
+ * constructed.
+ * \returns an instance of type T.
+ */
+template
+V8_INLINE T* MakeGarbageCollected(AllocationHandle& handle, Args&&... args) {
+ T* object =
+ MakeGarbageCollectedTrait::Call(handle, std::forward(args)...);
+ PostConstructionCallbackTrait::Call(object);
+ return object;
+}
+
+/**
+ * Constructs a managed object of type T where T transitively inherits from
+ * GarbageCollected. Created objects will have additional bytes appended to
+ * it. Allocated memory would suffice for `sizeof(T) + additional_bytes`.
+ *
+ * \param additional_bytes Denotes how many bytes to append to T.
+ * \param args List of arguments with which an instance of T will be
+ * constructed.
+ * \returns an instance of type T.
+ */
+template
+V8_INLINE T* MakeGarbageCollected(AllocationHandle& handle,
+ AdditionalBytes additional_bytes,
+ Args&&... args) {
+ T* object = MakeGarbageCollectedTrait::Call(handle, additional_bytes,
+ std::forward(args)...);
+ PostConstructionCallbackTrait::Call(object);
+ return object;
+}
+
+} // namespace cppgc
+
+#undef CPPGC_DEFAULT_ALIGNED
+#undef CPPGC_DOUBLE_WORD_ALIGNED
+
+#endif // INCLUDE_CPPGC_ALLOCATION_H_
diff --git a/deps/include/cppgc/common.h b/deps/include/cppgc/common.h
new file mode 100755
index 0000000..9610383
--- /dev/null
+++ b/deps/include/cppgc/common.h
@@ -0,0 +1,28 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_COMMON_H_
+#define INCLUDE_CPPGC_COMMON_H_
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+
+/**
+ * Indicator for the stack state of the embedder.
+ */
+enum class EmbedderStackState {
+ /**
+ * Stack may contain interesting heap pointers.
+ */
+ kMayContainHeapPointers,
+ /**
+ * Stack does not contain any interesting heap pointers.
+ */
+ kNoHeapPointers,
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_COMMON_H_
diff --git a/deps/include/cppgc/cross-thread-persistent.h b/deps/include/cppgc/cross-thread-persistent.h
new file mode 100755
index 0000000..1fa28af
--- /dev/null
+++ b/deps/include/cppgc/cross-thread-persistent.h
@@ -0,0 +1,464 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_CROSS_THREAD_PERSISTENT_H_
+#define INCLUDE_CPPGC_CROSS_THREAD_PERSISTENT_H_
+
+#include
+
+#include "cppgc/internal/persistent-node.h"
+#include "cppgc/internal/pointer-policies.h"
+#include "cppgc/persistent.h"
+#include "cppgc/visitor.h"
+
+namespace cppgc {
+namespace internal {
+
+// Wrapper around PersistentBase that allows accessing poisoned memory when
+// using ASAN. This is needed as the GC of the heap that owns the value
+// of a CTP, may clear it (heap termination, weakness) while the object
+// holding the CTP may be poisoned as itself may be deemed dead.
+class CrossThreadPersistentBase : public PersistentBase {
+ public:
+ CrossThreadPersistentBase() = default;
+ explicit CrossThreadPersistentBase(const void* raw) : PersistentBase(raw) {}
+
+ V8_CLANG_NO_SANITIZE("address") const void* GetValueFromGC() const {
+ return raw_;
+ }
+
+ V8_CLANG_NO_SANITIZE("address")
+ PersistentNode* GetNodeFromGC() const { return node_; }
+
+ V8_CLANG_NO_SANITIZE("address")
+ void ClearFromGC() const {
+ raw_ = nullptr;
+ SetNodeSafe(nullptr);
+ }
+
+ // GetNodeSafe() can be used for a thread-safe IsValid() check in a
+ // double-checked locking pattern. See ~BasicCrossThreadPersistent.
+ PersistentNode* GetNodeSafe() const {
+ return reinterpret_cast*>(&node_)->load(
+ std::memory_order_acquire);
+ }
+
+ // The GC writes using SetNodeSafe() while holding the lock.
+ V8_CLANG_NO_SANITIZE("address")
+ void SetNodeSafe(PersistentNode* value) const {
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define V8_IS_ASAN 1
+#endif
+#endif
+
+#ifdef V8_IS_ASAN
+ __atomic_store(&node_, &value, __ATOMIC_RELEASE);
+#else // !V8_IS_ASAN
+ // Non-ASAN builds can use atomics. This also covers MSVC which does not
+ // have the __atomic_store intrinsic.
+ reinterpret_cast*>(&node_)->store(
+ value, std::memory_order_release);
+#endif // !V8_IS_ASAN
+
+#undef V8_IS_ASAN
+ }
+};
+
+template
+class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
+ public LocationPolicy,
+ private WeaknessPolicy,
+ private CheckingPolicy {
+ public:
+ using typename WeaknessPolicy::IsStrongPersistent;
+ using PointeeType = T;
+
+ ~BasicCrossThreadPersistent() {
+ // This implements fast path for destroying empty/sentinel.
+ //
+ // Simplified version of `AssignUnsafe()` to allow calling without a
+ // complete type `T`. Uses double-checked locking with a simple thread-safe
+ // check for a valid handle based on a node.
+ if (GetNodeSafe()) {
+ PersistentRegionLock guard;
+ const void* old_value = GetValue();
+ // The fast path check (GetNodeSafe()) does not acquire the lock. Recheck
+ // validity while holding the lock to ensure the reference has not been
+ // cleared.
+ if (IsValid(old_value)) {
+ CrossThreadPersistentRegion& region =
+ this->GetPersistentRegion(old_value);
+ region.FreeNode(GetNode());
+ SetNode(nullptr);
+ } else {
+ CPPGC_DCHECK(!GetNode());
+ }
+ }
+ // No need to call SetValue() as the handle is not used anymore. This can
+ // leave behind stale sentinel values but will always destroy the underlying
+ // node.
+ }
+
+ BasicCrossThreadPersistent(
+ const SourceLocation& loc = SourceLocation::Current())
+ : LocationPolicy(loc) {}
+
+ BasicCrossThreadPersistent(
+ std::nullptr_t, const SourceLocation& loc = SourceLocation::Current())
+ : LocationPolicy(loc) {}
+
+ BasicCrossThreadPersistent(
+ SentinelPointer s, const SourceLocation& loc = SourceLocation::Current())
+ : CrossThreadPersistentBase(s), LocationPolicy(loc) {}
+
+ BasicCrossThreadPersistent(
+ T* raw, const SourceLocation& loc = SourceLocation::Current())
+ : CrossThreadPersistentBase(raw), LocationPolicy(loc) {
+ if (!IsValid(raw)) return;
+ PersistentRegionLock guard;
+ CrossThreadPersistentRegion& region = this->GetPersistentRegion(raw);
+ SetNode(region.AllocateNode(this, &TraceAsRoot));
+ this->CheckPointer(raw);
+ }
+
+ class UnsafeCtorTag {
+ private:
+ UnsafeCtorTag() = default;
+ template
+ friend class BasicCrossThreadPersistent;
+ };
+
+ BasicCrossThreadPersistent(
+ UnsafeCtorTag, T* raw,
+ const SourceLocation& loc = SourceLocation::Current())
+ : CrossThreadPersistentBase(raw), LocationPolicy(loc) {
+ if (!IsValid(raw)) return;
+ CrossThreadPersistentRegion& region = this->GetPersistentRegion(raw);
+ SetNode(region.AllocateNode(this, &TraceAsRoot));
+ this->CheckPointer(raw);
+ }
+
+ BasicCrossThreadPersistent(
+ T& raw, const SourceLocation& loc = SourceLocation::Current())
+ : BasicCrossThreadPersistent(&raw, loc) {}
+
+ template ::value>>
+ BasicCrossThreadPersistent(
+ internal::BasicMember
+ member,
+ const SourceLocation& loc = SourceLocation::Current())
+ : BasicCrossThreadPersistent(member.Get(), loc) {}
+
+ BasicCrossThreadPersistent(
+ const BasicCrossThreadPersistent& other,
+ const SourceLocation& loc = SourceLocation::Current())
+ : BasicCrossThreadPersistent(loc) {
+ // Invoke operator=.
+ *this = other;
+ }
+
+ // Heterogeneous ctor.
+ template ::value>>
+ BasicCrossThreadPersistent(
+ const BasicCrossThreadPersistent& other,
+ const SourceLocation& loc = SourceLocation::Current())
+ : BasicCrossThreadPersistent(loc) {
+ *this = other;
+ }
+
+ BasicCrossThreadPersistent(
+ BasicCrossThreadPersistent&& other,
+ const SourceLocation& loc = SourceLocation::Current()) noexcept {
+ // Invoke operator=.
+ *this = std::move(other);
+ }
+
+ BasicCrossThreadPersistent& operator=(
+ const BasicCrossThreadPersistent& other) {
+ PersistentRegionLock guard;
+ AssignSafe(guard, other.Get());
+ return *this;
+ }
+
+ template ::value>>
+ BasicCrossThreadPersistent& operator=(
+ const BasicCrossThreadPersistent& other) {
+ PersistentRegionLock guard;
+ AssignSafe(guard, other.Get());
+ return *this;
+ }
+
+ BasicCrossThreadPersistent& operator=(BasicCrossThreadPersistent&& other) {
+ if (this == &other) return *this;
+ Clear();
+ PersistentRegionLock guard;
+ PersistentBase::operator=(std::move(other));
+ LocationPolicy::operator=(std::move(other));
+ if (!IsValid(GetValue())) return *this;
+ GetNode()->UpdateOwner(this);
+ other.SetValue(nullptr);
+ other.SetNode(nullptr);
+ this->CheckPointer(Get());
+ return *this;
+ }
+
+ /**
+ * Assigns a raw pointer.
+ *
+ * Note: **Not thread-safe.**
+ */
+ BasicCrossThreadPersistent& operator=(T* other) {
+ AssignUnsafe(other);
+ return *this;
+ }
+
+ // Assignment from member.
+ template ::value>>
+ BasicCrossThreadPersistent& operator=(
+ internal::BasicMember
+ member) {
+ return operator=(member.Get());
+ }
+
+ /**
+ * Assigns a nullptr.
+ *
+ * \returns the handle.
+ */
+ BasicCrossThreadPersistent& operator=(std::nullptr_t) {
+ Clear();
+ return *this;
+ }
+
+ /**
+ * Assigns the sentinel pointer.
+ *
+ * \returns the handle.
+ */
+ BasicCrossThreadPersistent& operator=(SentinelPointer s) {
+ PersistentRegionLock guard;
+ AssignSafe(guard, s);
+ return *this;
+ }
+
+ /**
+ * Returns a pointer to the stored object.
+ *
+ * Note: **Not thread-safe.**
+ *
+ * \returns a pointer to the stored object.
+ */
+ // CFI cast exemption to allow passing SentinelPointer through T* and support
+ // heterogeneous assignments between different Member and Persistent handles
+ // based on their actual types.
+ V8_CLANG_NO_SANITIZE("cfi-unrelated-cast") T* Get() const {
+ return static_cast(const_cast(GetValue()));
+ }
+
+ /**
+ * Clears the stored object.
+ */
+ void Clear() {
+ PersistentRegionLock guard;
+ AssignSafe(guard, nullptr);
+ }
+
+ /**
+ * Returns a pointer to the stored object and releases it.
+ *
+ * Note: **Not thread-safe.**
+ *
+ * \returns a pointer to the stored object.
+ */
+ T* Release() {
+ T* result = Get();
+ Clear();
+ return result;
+ }
+
+ /**
+ * Conversio to boolean.
+ *
+ * Note: **Not thread-safe.**
+ *
+ * \returns true if an actual object has been stored and false otherwise.
+ */
+ explicit operator bool() const { return Get(); }
+
+ /**
+ * Conversion to object of type T.
+ *
+ * Note: **Not thread-safe.**
+ *
+ * \returns the object.
+ */
+ operator T*() const { return Get(); }
+
+ /**
+ * Dereferences the stored object.
+ *
+ * Note: **Not thread-safe.**
+ */
+ T* operator->() const { return Get(); }
+ T& operator*() const { return *Get(); }
+
+ template
+ BasicCrossThreadPersistent
+ To() const {
+ using OtherBasicCrossThreadPersistent =
+ BasicCrossThreadPersistent;
+ PersistentRegionLock guard;
+ return OtherBasicCrossThreadPersistent(
+ typename OtherBasicCrossThreadPersistent::UnsafeCtorTag(),
+ static_cast(Get()));
+ }
+
+ template ::IsStrongPersistent::value>::type>
+ BasicCrossThreadPersistent
+ Lock() const {
+ return BasicCrossThreadPersistent<
+ U, internal::StrongCrossThreadPersistentPolicy>(*this);
+ }
+
+ private:
+ static bool IsValid(const void* ptr) {
+ return ptr && ptr != kSentinelPointer;
+ }
+
+ static void TraceAsRoot(RootVisitor& root_visitor, const void* ptr) {
+ root_visitor.Trace(*static_cast(ptr));
+ }
+
+ void AssignUnsafe(T* ptr) {
+ const void* old_value = GetValue();
+ if (IsValid(old_value)) {
+ PersistentRegionLock guard;
+ old_value = GetValue();
+ // The fast path check (IsValid()) does not acquire the lock. Reload
+ // the value to ensure the reference has not been cleared.
+ if (IsValid(old_value)) {
+ CrossThreadPersistentRegion& region =
+ this->GetPersistentRegion(old_value);
+ if (IsValid(ptr) && (®ion == &this->GetPersistentRegion(ptr))) {
+ SetValue(ptr);
+ this->CheckPointer(ptr);
+ return;
+ }
+ region.FreeNode(GetNode());
+ SetNode(nullptr);
+ } else {
+ CPPGC_DCHECK(!GetNode());
+ }
+ }
+ SetValue(ptr);
+ if (!IsValid(ptr)) return;
+ PersistentRegionLock guard;
+ SetNode(this->GetPersistentRegion(ptr).AllocateNode(this, &TraceAsRoot));
+ this->CheckPointer(ptr);
+ }
+
+ void AssignSafe(PersistentRegionLock&, T* ptr) {
+ PersistentRegionLock::AssertLocked();
+ const void* old_value = GetValue();
+ if (IsValid(old_value)) {
+ CrossThreadPersistentRegion& region =
+ this->GetPersistentRegion(old_value);
+ if (IsValid(ptr) && (®ion == &this->GetPersistentRegion(ptr))) {
+ SetValue(ptr);
+ this->CheckPointer(ptr);
+ return;
+ }
+ region.FreeNode(GetNode());
+ SetNode(nullptr);
+ }
+ SetValue(ptr);
+ if (!IsValid(ptr)) return;
+ SetNode(this->GetPersistentRegion(ptr).AllocateNode(this, &TraceAsRoot));
+ this->CheckPointer(ptr);
+ }
+
+ void ClearFromGC() const {
+ if (IsValid(GetValueFromGC())) {
+ WeaknessPolicy::GetPersistentRegion(GetValueFromGC())
+ .FreeNode(GetNodeFromGC());
+ CrossThreadPersistentBase::ClearFromGC();
+ }
+ }
+
+ // See Get() for details.
+ V8_CLANG_NO_SANITIZE("cfi-unrelated-cast")
+ T* GetFromGC() const {
+ return static_cast(const_cast(GetValueFromGC()));
+ }
+
+ friend class internal::RootVisitor;
+};
+
+template
+struct IsWeak<
+ BasicCrossThreadPersistent>
+ : std::true_type {};
+
+} // namespace internal
+
+namespace subtle {
+
+/**
+ * **DO NOT USE: Has known caveats, see below.**
+ *
+ * CrossThreadPersistent allows retaining objects from threads other than the
+ * thread the owning heap is operating on.
+ *
+ * Known caveats:
+ * - Does not protect the heap owning an object from terminating.
+ * - Reaching transitively through the graph is unsupported as objects may be
+ * moved concurrently on the thread owning the object.
+ */
+template
+using CrossThreadPersistent = internal::BasicCrossThreadPersistent<
+ T, internal::StrongCrossThreadPersistentPolicy>;
+
+/**
+ * **DO NOT USE: Has known caveats, see below.**
+ *
+ * CrossThreadPersistent allows weakly retaining objects from threads other than
+ * the thread the owning heap is operating on.
+ *
+ * Known caveats:
+ * - Does not protect the heap owning an object from terminating.
+ * - Reaching transitively through the graph is unsupported as objects may be
+ * moved concurrently on the thread owning the object.
+ */
+template
+using WeakCrossThreadPersistent = internal::BasicCrossThreadPersistent<
+ T, internal::WeakCrossThreadPersistentPolicy>;
+
+} // namespace subtle
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_CROSS_THREAD_PERSISTENT_H_
diff --git a/deps/include/cppgc/custom-space.h b/deps/include/cppgc/custom-space.h
new file mode 100755
index 0000000..757c4fd
--- /dev/null
+++ b/deps/include/cppgc/custom-space.h
@@ -0,0 +1,97 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_CUSTOM_SPACE_H_
+#define INCLUDE_CPPGC_CUSTOM_SPACE_H_
+
+#include
+
+namespace cppgc {
+
+/**
+ * Index identifying a custom space.
+ */
+struct CustomSpaceIndex {
+ constexpr CustomSpaceIndex(size_t value) : value(value) {} // NOLINT
+ size_t value;
+};
+
+/**
+ * Top-level base class for custom spaces. Users must inherit from CustomSpace
+ * below.
+ */
+class CustomSpaceBase {
+ public:
+ virtual ~CustomSpaceBase() = default;
+ virtual CustomSpaceIndex GetCustomSpaceIndex() const = 0;
+ virtual bool IsCompactable() const = 0;
+};
+
+/**
+ * Base class custom spaces should directly inherit from. The class inheriting
+ * from `CustomSpace` must define `kSpaceIndex` as unique space index. These
+ * indices need for form a sequence starting at 0.
+ *
+ * Example:
+ * \code
+ * class CustomSpace1 : public CustomSpace {
+ * public:
+ * static constexpr CustomSpaceIndex kSpaceIndex = 0;
+ * };
+ * class CustomSpace2 : public CustomSpace {
+ * public:
+ * static constexpr CustomSpaceIndex kSpaceIndex = 1;
+ * };
+ * \endcode
+ */
+template
+class CustomSpace : public CustomSpaceBase {
+ public:
+ /**
+ * Compaction is only supported on spaces that manually manage slots
+ * recording.
+ */
+ static constexpr bool kSupportsCompaction = false;
+
+ CustomSpaceIndex GetCustomSpaceIndex() const final {
+ return ConcreteCustomSpace::kSpaceIndex;
+ }
+ bool IsCompactable() const final {
+ return ConcreteCustomSpace::kSupportsCompaction;
+ }
+};
+
+/**
+ * User-overridable trait that allows pinning types to custom spaces.
+ */
+template
+struct SpaceTrait {
+ using Space = void;
+};
+
+namespace internal {
+
+template
+struct IsAllocatedOnCompactableSpaceImpl {
+ static constexpr bool value = CustomSpace::kSupportsCompaction;
+};
+
+template <>
+struct IsAllocatedOnCompactableSpaceImpl {
+ // Non-custom spaces are by default not compactable.
+ static constexpr bool value = false;
+};
+
+template
+struct IsAllocatedOnCompactableSpace {
+ public:
+ static constexpr bool value =
+ IsAllocatedOnCompactableSpaceImpl::Space>::value;
+};
+
+} // namespace internal
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_CUSTOM_SPACE_H_
diff --git a/deps/include/cppgc/default-platform.h b/deps/include/cppgc/default-platform.h
new file mode 100755
index 0000000..a27871c
--- /dev/null
+++ b/deps/include/cppgc/default-platform.h
@@ -0,0 +1,67 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_DEFAULT_PLATFORM_H_
+#define INCLUDE_CPPGC_DEFAULT_PLATFORM_H_
+
+#include
+
+#include "cppgc/platform.h"
+#include "libplatform/libplatform.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+
+/**
+ * Platform provided by cppgc. Uses V8's DefaultPlatform provided by
+ * libplatform internally. Exception: `GetForegroundTaskRunner()`, see below.
+ */
+class V8_EXPORT DefaultPlatform : public Platform {
+ public:
+ using IdleTaskSupport = v8::platform::IdleTaskSupport;
+ explicit DefaultPlatform(
+ int thread_pool_size = 0,
+ IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled,
+ std::unique_ptr tracing_controller = {})
+ : v8_platform_(v8::platform::NewDefaultPlatform(
+ thread_pool_size, idle_task_support,
+ v8::platform::InProcessStackDumping::kDisabled,
+ std::move(tracing_controller))) {}
+
+ cppgc::PageAllocator* GetPageAllocator() override {
+ return v8_platform_->GetPageAllocator();
+ }
+
+ double MonotonicallyIncreasingTime() override {
+ return v8_platform_->MonotonicallyIncreasingTime();
+ }
+
+ std::shared_ptr GetForegroundTaskRunner() override {
+ // V8's default platform creates a new task runner when passed the
+ // `v8::Isolate` pointer the first time. For non-default platforms this will
+ // require getting the appropriate task runner.
+ return v8_platform_->GetForegroundTaskRunner(kNoIsolate);
+ }
+
+ std::unique_ptr PostJob(
+ cppgc::TaskPriority priority,
+ std::unique_ptr job_task) override {
+ return v8_platform_->PostJob(priority, std::move(job_task));
+ }
+
+ TracingController* GetTracingController() override {
+ return v8_platform_->GetTracingController();
+ }
+
+ v8::Platform* GetV8Platform() const { return v8_platform_.get(); }
+
+ protected:
+ static constexpr v8::Isolate* kNoIsolate = nullptr;
+
+ std::unique_ptr v8_platform_;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_DEFAULT_PLATFORM_H_
diff --git a/deps/include/cppgc/ephemeron-pair.h b/deps/include/cppgc/ephemeron-pair.h
new file mode 100755
index 0000000..e16cf1f
--- /dev/null
+++ b/deps/include/cppgc/ephemeron-pair.h
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_EPHEMERON_PAIR_H_
+#define INCLUDE_CPPGC_EPHEMERON_PAIR_H_
+
+#include "cppgc/liveness-broker.h"
+#include "cppgc/member.h"
+
+namespace cppgc {
+
+/**
+ * An ephemeron pair is used to conditionally retain an object.
+ * The `value` will be kept alive only if the `key` is alive.
+ */
+template
+struct EphemeronPair {
+ EphemeronPair(K* k, V* v) : key(k), value(v) {}
+ WeakMember key;
+ Member value;
+
+ void ClearValueIfKeyIsDead(const LivenessBroker& broker) {
+ if (!broker.IsHeapObjectAlive(key)) value = nullptr;
+ }
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_EPHEMERON_PAIR_H_
diff --git a/deps/include/cppgc/explicit-management.h b/deps/include/cppgc/explicit-management.h
new file mode 100755
index 0000000..0290328
--- /dev/null
+++ b/deps/include/cppgc/explicit-management.h
@@ -0,0 +1,100 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_EXPLICIT_MANAGEMENT_H_
+#define INCLUDE_CPPGC_EXPLICIT_MANAGEMENT_H_
+
+#include
+
+#include "cppgc/allocation.h"
+#include "cppgc/internal/logging.h"
+#include "cppgc/type-traits.h"
+
+namespace cppgc {
+
+class HeapHandle;
+
+namespace subtle {
+
+template
+void FreeUnreferencedObject(HeapHandle& heap_handle, T& object);
+template
+bool Resize(T& object, AdditionalBytes additional_bytes);
+
+} // namespace subtle
+
+namespace internal {
+
+class ExplicitManagementImpl final {
+ private:
+ V8_EXPORT static void FreeUnreferencedObject(HeapHandle&, void*);
+ V8_EXPORT static bool Resize(void*, size_t);
+
+ template
+ friend void subtle::FreeUnreferencedObject(HeapHandle&, T&);
+ template
+ friend bool subtle::Resize(T&, AdditionalBytes);
+};
+} // namespace internal
+
+namespace subtle {
+
+/**
+ * Informs the garbage collector that `object` can be immediately reclaimed. The
+ * destructor may not be invoked immediately but only on next garbage
+ * collection.
+ *
+ * It is up to the embedder to guarantee that no other object holds a reference
+ * to `object` after calling `FreeUnreferencedObject()`. In case such a
+ * reference exists, it's use results in a use-after-free.
+ *
+ * To aid in using the API, `FreeUnreferencedObject()` may be called from
+ * destructors on objects that would be reclaimed in the same garbage collection
+ * cycle.
+ *
+ * \param heap_handle The corresponding heap.
+ * \param object Reference to an object that is of type `GarbageCollected` and
+ * should be immediately reclaimed.
+ */
+template
+void FreeUnreferencedObject(HeapHandle& heap_handle, T& object) {
+ static_assert(IsGarbageCollectedTypeV,
+ "Object must be of type GarbageCollected.");
+ internal::ExplicitManagementImpl::FreeUnreferencedObject(heap_handle,
+ &object);
+}
+
+/**
+ * Tries to resize `object` of type `T` with additional bytes on top of
+ * sizeof(T). Resizing is only useful with trailing inlined storage, see e.g.
+ * `MakeGarbageCollected(AllocationHandle&, AdditionalBytes)`.
+ *
+ * `Resize()` performs growing or shrinking as needed and may skip the operation
+ * for internal reasons, see return value.
+ *
+ * It is up to the embedder to guarantee that in case of shrinking a larger
+ * object down, the reclaimed area is not used anymore. Any subsequent use
+ * results in a use-after-free.
+ *
+ * The `object` must be live when calling `Resize()`.
+ *
+ * \param object Reference to an object that is of type `GarbageCollected` and
+ * should be resized.
+ * \param additional_bytes Bytes in addition to sizeof(T) that the object should
+ * provide.
+ * \returns true when the operation was successful and the result can be relied
+ * on, and false otherwise.
+ */
+template
+bool Resize(T& object, AdditionalBytes additional_bytes) {
+ static_assert(IsGarbageCollectedTypeV,
+ "Object must be of type GarbageCollected.");
+ return internal::ExplicitManagementImpl::Resize(
+ &object, sizeof(T) + additional_bytes.value);
+}
+
+} // namespace subtle
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_EXPLICIT_MANAGEMENT_H_
diff --git a/deps/include/cppgc/garbage-collected.h b/deps/include/cppgc/garbage-collected.h
new file mode 100755
index 0000000..6737c8b
--- /dev/null
+++ b/deps/include/cppgc/garbage-collected.h
@@ -0,0 +1,106 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_GARBAGE_COLLECTED_H_
+#define INCLUDE_CPPGC_GARBAGE_COLLECTED_H_
+
+#include "cppgc/internal/api-constants.h"
+#include "cppgc/platform.h"
+#include "cppgc/trace-trait.h"
+#include "cppgc/type-traits.h"
+
+namespace cppgc {
+
+class Visitor;
+
+/**
+ * Base class for managed objects. Only descendent types of `GarbageCollected`
+ * can be constructed using `MakeGarbageCollected()`. Must be inherited from as
+ * left-most base class.
+ *
+ * Types inheriting from GarbageCollected must provide a method of
+ * signature `void Trace(cppgc::Visitor*) const` that dispatchs all managed
+ * pointers to the visitor and delegates to garbage-collected base classes.
+ * The method must be virtual if the type is not directly a child of
+ * GarbageCollected and marked as final.
+ *
+ * \code
+ * // Example using final class.
+ * class FinalType final : public GarbageCollected {
+ * public:
+ * void Trace(cppgc::Visitor* visitor) const {
+ * // Dispatch using visitor->Trace(...);
+ * }
+ * };
+ *
+ * // Example using non-final base class.
+ * class NonFinalBase : public GarbageCollected {
+ * public:
+ * virtual void Trace(cppgc::Visitor*) const {}
+ * };
+ *
+ * class FinalChild final : public NonFinalBase {
+ * public:
+ * void Trace(cppgc::Visitor* visitor) const final {
+ * // Dispatch using visitor->Trace(...);
+ * NonFinalBase::Trace(visitor);
+ * }
+ * };
+ * \endcode
+ */
+template
+class GarbageCollected {
+ public:
+ using IsGarbageCollectedTypeMarker = void;
+ using ParentMostGarbageCollectedType = T;
+
+ // Must use MakeGarbageCollected.
+ void* operator new(size_t) = delete;
+ void* operator new[](size_t) = delete;
+ // The garbage collector is taking care of reclaiming the object. Also,
+ // virtual destructor requires an unambiguous, accessible 'operator delete'.
+ void operator delete(void*) {
+#ifdef V8_ENABLE_CHECKS
+ internal::Fatal(
+ "Manually deleting a garbage collected object is not allowed");
+#endif // V8_ENABLE_CHECKS
+ }
+ void operator delete[](void*) = delete;
+
+ protected:
+ GarbageCollected() = default;
+};
+
+/**
+ * Base class for managed mixin objects. Such objects cannot be constructed
+ * directly but must be mixed into the inheritance hierarchy of a
+ * GarbageCollected object.
+ *
+ * Types inheriting from GarbageCollectedMixin must override a virtual method
+ * of signature `void Trace(cppgc::Visitor*) const` that dispatchs all managed
+ * pointers to the visitor and delegates to base classes.
+ *
+ * \code
+ * class Mixin : public GarbageCollectedMixin {
+ * public:
+ * void Trace(cppgc::Visitor* visitor) const override {
+ * // Dispatch using visitor->Trace(...);
+ * }
+ * };
+ * \endcode
+ */
+class GarbageCollectedMixin {
+ public:
+ using IsGarbageCollectedMixinTypeMarker = void;
+
+ /**
+ * This Trace method must be overriden by objects inheriting from
+ * GarbageCollectedMixin.
+ */
+ virtual void Trace(cppgc::Visitor*) const {}
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_GARBAGE_COLLECTED_H_
diff --git a/deps/include/cppgc/heap-consistency.h b/deps/include/cppgc/heap-consistency.h
new file mode 100755
index 0000000..35c59ed
--- /dev/null
+++ b/deps/include/cppgc/heap-consistency.h
@@ -0,0 +1,309 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_HEAP_CONSISTENCY_H_
+#define INCLUDE_CPPGC_HEAP_CONSISTENCY_H_
+
+#include
+
+#include "cppgc/internal/write-barrier.h"
+#include "cppgc/macros.h"
+#include "cppgc/member.h"
+#include "cppgc/trace-trait.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+
+class HeapHandle;
+
+namespace subtle {
+
+/**
+ * **DO NOT USE: Use the appropriate managed types.**
+ *
+ * Consistency helpers that aid in maintaining a consistent internal state of
+ * the garbage collector.
+ */
+class HeapConsistency final {
+ public:
+ using WriteBarrierParams = internal::WriteBarrier::Params;
+ using WriteBarrierType = internal::WriteBarrier::Type;
+
+ /**
+ * Gets the required write barrier type for a specific write.
+ *
+ * \param slot Slot containing the pointer to the object. The slot itself
+ * must reside in an object that has been allocated using
+ * `MakeGarbageCollected()`.
+ * \param value The pointer to the object. May be an interior pointer to an
+ * interface of the actual object.
+ * \param params Parameters that may be used for actual write barrier calls.
+ * Only filled if return value indicates that a write barrier is needed. The
+ * contents of the `params` are an implementation detail.
+ * \returns whether a write barrier is needed and which barrier to invoke.
+ */
+ static V8_INLINE WriteBarrierType GetWriteBarrierType(
+ const void* slot, const void* value, WriteBarrierParams& params) {
+ return internal::WriteBarrier::GetWriteBarrierType(slot, value, params);
+ }
+
+ /**
+ * Gets the required write barrier type for a specific write. This override is
+ * only used for all the BasicMember types.
+ *
+ * \param slot Slot containing the pointer to the object. The slot itself
+ * must reside in an object that has been allocated using
+ * `MakeGarbageCollected()`.
+ * \param value The pointer to the object held via `BasicMember`.
+ * \param params Parameters that may be used for actual write barrier calls.
+ * Only filled if return value indicates that a write barrier is needed. The
+ * contents of the `params` are an implementation detail.
+ * \returns whether a write barrier is needed and which barrier to invoke.
+ */
+ template
+ static V8_INLINE WriteBarrierType GetWriteBarrierType(
+ const internal::BasicMember& value,
+ WriteBarrierParams& params) {
+ return internal::WriteBarrier::GetWriteBarrierType(
+ value.GetRawSlot(), value.GetRawStorage(), params);
+ }
+
+ /**
+ * Gets the required write barrier type for a specific write.
+ *
+ * \param slot Slot to some part of an object. The object must not necessarily
+ have been allocated using `MakeGarbageCollected()` but can also live
+ off-heap or on stack.
+ * \param params Parameters that may be used for actual write barrier calls.
+ * Only filled if return value indicates that a write barrier is needed. The
+ * contents of the `params` are an implementation detail.
+ * \param callback Callback returning the corresponding heap handle. The
+ * callback is only invoked if the heap cannot otherwise be figured out. The
+ * callback must not allocate.
+ * \returns whether a write barrier is needed and which barrier to invoke.
+ */
+ template
+ static V8_INLINE WriteBarrierType
+ GetWriteBarrierType(const void* slot, WriteBarrierParams& params,
+ HeapHandleCallback callback) {
+ return internal::WriteBarrier::GetWriteBarrierType(slot, params, callback);
+ }
+
+ /**
+ * Gets the required write barrier type for a specific write.
+ * This version is meant to be used in conjunction with with a marking write
+ * barrier barrier which doesn't consider the slot.
+ *
+ * \param value The pointer to the object. May be an interior pointer to an
+ * interface of the actual object.
+ * \param params Parameters that may be used for actual write barrier calls.
+ * Only filled if return value indicates that a write barrier is needed. The
+ * contents of the `params` are an implementation detail.
+ * \returns whether a write barrier is needed and which barrier to invoke.
+ */
+ static V8_INLINE WriteBarrierType
+ GetWriteBarrierType(const void* value, WriteBarrierParams& params) {
+ return internal::WriteBarrier::GetWriteBarrierType(value, params);
+ }
+
+ /**
+ * Conservative Dijkstra-style write barrier that processes an object if it
+ * has not yet been processed.
+ *
+ * \param params The parameters retrieved from `GetWriteBarrierType()`.
+ * \param object The pointer to the object. May be an interior pointer to a
+ * an interface of the actual object.
+ */
+ static V8_INLINE void DijkstraWriteBarrier(const WriteBarrierParams& params,
+ const void* object) {
+ internal::WriteBarrier::DijkstraMarkingBarrier(params, object);
+ }
+
+ /**
+ * Conservative Dijkstra-style write barrier that processes a range of
+ * elements if they have not yet been processed.
+ *
+ * \param params The parameters retrieved from `GetWriteBarrierType()`.
+ * \param first_element Pointer to the first element that should be processed.
+ * The slot itself must reside in an object that has been allocated using
+ * `MakeGarbageCollected()`.
+ * \param element_size Size of the element in bytes.
+ * \param number_of_elements Number of elements that should be processed,
+ * starting with `first_element`.
+ * \param trace_callback The trace callback that should be invoked for each
+ * element if necessary.
+ */
+ static V8_INLINE void DijkstraWriteBarrierRange(
+ const WriteBarrierParams& params, const void* first_element,
+ size_t element_size, size_t number_of_elements,
+ TraceCallback trace_callback) {
+ internal::WriteBarrier::DijkstraMarkingBarrierRange(
+ params, first_element, element_size, number_of_elements,
+ trace_callback);
+ }
+
+ /**
+ * Steele-style write barrier that re-processes an object if it has already
+ * been processed.
+ *
+ * \param params The parameters retrieved from `GetWriteBarrierType()`.
+ * \param object The pointer to the object which must point to an object that
+ * has been allocated using `MakeGarbageCollected()`. Interior pointers are
+ * not supported.
+ */
+ static V8_INLINE void SteeleWriteBarrier(const WriteBarrierParams& params,
+ const void* object) {
+ internal::WriteBarrier::SteeleMarkingBarrier(params, object);
+ }
+
+ /**
+ * Generational barrier for maintaining consistency when running with multiple
+ * generations.
+ *
+ * \param params The parameters retrieved from `GetWriteBarrierType()`.
+ * \param slot Slot containing the pointer to the object. The slot itself
+ * must reside in an object that has been allocated using
+ * `MakeGarbageCollected()`.
+ */
+ static V8_INLINE void GenerationalBarrier(const WriteBarrierParams& params,
+ const void* slot) {
+ internal::WriteBarrier::GenerationalBarrier<
+ internal::WriteBarrier::GenerationalBarrierType::kPreciseSlot>(params,
+ slot);
+ }
+
+ /**
+ * Generational barrier for maintaining consistency when running with multiple
+ * generations. This version is used when slot contains uncompressed pointer.
+ *
+ * \param params The parameters retrieved from `GetWriteBarrierType()`.
+ * \param slot Uncompressed slot containing the direct pointer to the object.
+ * The slot itself must reside in an object that has been allocated using
+ * `MakeGarbageCollected()`.
+ */
+ static V8_INLINE void GenerationalBarrierForUncompressedSlot(
+ const WriteBarrierParams& params, const void* uncompressed_slot) {
+ internal::WriteBarrier::GenerationalBarrier<
+ internal::WriteBarrier::GenerationalBarrierType::
+ kPreciseUncompressedSlot>(params, uncompressed_slot);
+ }
+
+ /**
+ * Generational barrier for source object that may contain outgoing pointers
+ * to objects in young generation.
+ *
+ * \param params The parameters retrieved from `GetWriteBarrierType()`.
+ * \param inner_pointer Pointer to the source object.
+ */
+ static V8_INLINE void GenerationalBarrierForSourceObject(
+ const WriteBarrierParams& params, const void* inner_pointer) {
+ internal::WriteBarrier::GenerationalBarrier<
+ internal::WriteBarrier::GenerationalBarrierType::kImpreciseSlot>(
+ params, inner_pointer);
+ }
+
+ private:
+ HeapConsistency() = delete;
+};
+
+/**
+ * Disallows garbage collection finalizations. Any garbage collection triggers
+ * result in a crash when in this scope.
+ *
+ * Note that the garbage collector already covers paths that can lead to garbage
+ * collections, so user code does not require checking
+ * `IsGarbageCollectionAllowed()` before allocations.
+ */
+class V8_EXPORT V8_NODISCARD DisallowGarbageCollectionScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ /**
+ * \returns whether garbage collections are currently allowed.
+ */
+ static bool IsGarbageCollectionAllowed(HeapHandle& heap_handle);
+
+ /**
+ * Enters a disallow garbage collection scope. Must be paired with `Leave()`.
+ * Prefer a scope instance of `DisallowGarbageCollectionScope`.
+ *
+ * \param heap_handle The corresponding heap.
+ */
+ static void Enter(HeapHandle& heap_handle);
+
+ /**
+ * Leaves a disallow garbage collection scope. Must be paired with `Enter()`.
+ * Prefer a scope instance of `DisallowGarbageCollectionScope`.
+ *
+ * \param heap_handle The corresponding heap.
+ */
+ static void Leave(HeapHandle& heap_handle);
+
+ /**
+ * Constructs a scoped object that automatically enters and leaves a disallow
+ * garbage collection scope based on its lifetime.
+ *
+ * \param heap_handle The corresponding heap.
+ */
+ explicit DisallowGarbageCollectionScope(HeapHandle& heap_handle);
+ ~DisallowGarbageCollectionScope();
+
+ DisallowGarbageCollectionScope(const DisallowGarbageCollectionScope&) =
+ delete;
+ DisallowGarbageCollectionScope& operator=(
+ const DisallowGarbageCollectionScope&) = delete;
+
+ private:
+ HeapHandle& heap_handle_;
+};
+
+/**
+ * Avoids invoking garbage collection finalizations. Already running garbage
+ * collection phase are unaffected by this scope.
+ *
+ * Should only be used temporarily as the scope has an impact on memory usage
+ * and follow up garbage collections.
+ */
+class V8_EXPORT V8_NODISCARD NoGarbageCollectionScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ /**
+ * Enters a no garbage collection scope. Must be paired with `Leave()`. Prefer
+ * a scope instance of `NoGarbageCollectionScope`.
+ *
+ * \param heap_handle The corresponding heap.
+ */
+ static void Enter(HeapHandle& heap_handle);
+
+ /**
+ * Leaves a no garbage collection scope. Must be paired with `Enter()`. Prefer
+ * a scope instance of `NoGarbageCollectionScope`.
+ *
+ * \param heap_handle The corresponding heap.
+ */
+ static void Leave(HeapHandle& heap_handle);
+
+ /**
+ * Constructs a scoped object that automatically enters and leaves a no
+ * garbage collection scope based on its lifetime.
+ *
+ * \param heap_handle The corresponding heap.
+ */
+ explicit NoGarbageCollectionScope(HeapHandle& heap_handle);
+ ~NoGarbageCollectionScope();
+
+ NoGarbageCollectionScope(const NoGarbageCollectionScope&) = delete;
+ NoGarbageCollectionScope& operator=(const NoGarbageCollectionScope&) = delete;
+
+ private:
+ HeapHandle& heap_handle_;
+};
+
+} // namespace subtle
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_HEAP_CONSISTENCY_H_
diff --git a/deps/include/cppgc/heap-handle.h b/deps/include/cppgc/heap-handle.h
new file mode 100755
index 0000000..0d1d21e
--- /dev/null
+++ b/deps/include/cppgc/heap-handle.h
@@ -0,0 +1,48 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_HEAP_HANDLE_H_
+#define INCLUDE_CPPGC_HEAP_HANDLE_H_
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+
+namespace internal {
+class HeapBase;
+class WriteBarrierTypeForCagedHeapPolicy;
+class WriteBarrierTypeForNonCagedHeapPolicy;
+} // namespace internal
+
+/**
+ * Opaque handle used for additional heap APIs.
+ */
+class HeapHandle {
+ public:
+ // Deleted copy ctor to avoid treating the type by value.
+ HeapHandle(const HeapHandle&) = delete;
+ HeapHandle& operator=(const HeapHandle&) = delete;
+
+ private:
+ HeapHandle() = default;
+
+ V8_INLINE bool is_incremental_marking_in_progress() const {
+ return is_incremental_marking_in_progress_;
+ }
+
+ V8_INLINE bool is_young_generation_enabled() const {
+ return is_young_generation_enabled_;
+ }
+
+ bool is_incremental_marking_in_progress_ = false;
+ bool is_young_generation_enabled_ = false;
+
+ friend class internal::HeapBase;
+ friend class internal::WriteBarrierTypeForCagedHeapPolicy;
+ friend class internal::WriteBarrierTypeForNonCagedHeapPolicy;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_HEAP_HANDLE_H_
diff --git a/deps/include/cppgc/heap-state.h b/deps/include/cppgc/heap-state.h
new file mode 100755
index 0000000..2821258
--- /dev/null
+++ b/deps/include/cppgc/heap-state.h
@@ -0,0 +1,82 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_HEAP_STATE_H_
+#define INCLUDE_CPPGC_HEAP_STATE_H_
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+
+class HeapHandle;
+
+namespace subtle {
+
+/**
+ * Helpers to peek into heap-internal state.
+ */
+class V8_EXPORT HeapState final {
+ public:
+ /**
+ * Returns whether the garbage collector is marking. This API is experimental
+ * and is expected to be removed in future.
+ *
+ * \param heap_handle The corresponding heap.
+ * \returns true if the garbage collector is currently marking, and false
+ * otherwise.
+ */
+ static bool IsMarking(const HeapHandle& heap_handle);
+
+ /*
+ * Returns whether the garbage collector is sweeping. This API is experimental
+ * and is expected to be removed in future.
+ *
+ * \param heap_handle The corresponding heap.
+ * \returns true if the garbage collector is currently sweeping, and false
+ * otherwise.
+ */
+ static bool IsSweeping(const HeapHandle& heap_handle);
+
+ /*
+ * Returns whether the garbage collector is currently sweeping on the thread
+ * owning this heap. This API allows the caller to determine whether it has
+ * been called from a destructor of a managed object. This API is experimental
+ * and may be removed in future.
+ *
+ * \param heap_handle The corresponding heap.
+ * \returns true if the garbage collector is currently sweeping on this
+ * thread, and false otherwise.
+ */
+ static bool IsSweepingOnOwningThread(const HeapHandle& heap_handle);
+
+ /**
+ * Returns whether the garbage collector is in the atomic pause, i.e., the
+ * mutator is stopped from running. This API is experimental and is expected
+ * to be removed in future.
+ *
+ * \param heap_handle The corresponding heap.
+ * \returns true if the garbage collector is currently in the atomic pause,
+ * and false otherwise.
+ */
+ static bool IsInAtomicPause(const HeapHandle& heap_handle);
+
+ /**
+ * Returns whether the last garbage collection was finalized conservatively
+ * (i.e., with a non-empty stack). This API is experimental and is expected to
+ * be removed in future.
+ *
+ * \param heap_handle The corresponding heap.
+ * \returns true if the last garbage collection was finalized conservatively,
+ * and false otherwise.
+ */
+ static bool PreviousGCWasConservative(const HeapHandle& heap_handle);
+
+ private:
+ HeapState() = delete;
+};
+
+} // namespace subtle
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_HEAP_STATE_H_
diff --git a/deps/include/cppgc/heap-statistics.h b/deps/include/cppgc/heap-statistics.h
new file mode 100755
index 0000000..5e38987
--- /dev/null
+++ b/deps/include/cppgc/heap-statistics.h
@@ -0,0 +1,120 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_HEAP_STATISTICS_H_
+#define INCLUDE_CPPGC_HEAP_STATISTICS_H_
+
+#include
+#include
+#include
+#include
+
+namespace cppgc {
+
+/**
+ * `HeapStatistics` contains memory consumption and utilization statistics for a
+ * cppgc heap.
+ */
+struct HeapStatistics final {
+ /**
+ * Specifies the detail level of the heap statistics. Brief statistics contain
+ * only the top-level allocated and used memory statistics for the entire
+ * heap. Detailed statistics also contain a break down per space and page, as
+ * well as freelist statistics and object type histograms. Note that used
+ * memory reported by brief statistics and detailed statistics might differ
+ * slightly.
+ */
+ enum DetailLevel : uint8_t {
+ kBrief,
+ kDetailed,
+ };
+
+ /**
+ * Object statistics for a single type.
+ */
+ struct ObjectStatsEntry {
+ /**
+ * Number of allocated bytes.
+ */
+ size_t allocated_bytes;
+ /**
+ * Number of allocated objects.
+ */
+ size_t object_count;
+ };
+
+ /**
+ * Page granularity statistics. For each page the statistics record the
+ * allocated memory size and overall used memory size for the page.
+ */
+ struct PageStatistics {
+ /** Overall committed amount of memory for the page. */
+ size_t committed_size_bytes = 0;
+ /** Resident amount of memory held by the page. */
+ size_t resident_size_bytes = 0;
+ /** Amount of memory actually used on the page. */
+ size_t used_size_bytes = 0;
+ /** Statistics for object allocated on the page. Filled only when
+ * NameProvider::SupportsCppClassNamesAsObjectNames() is true. */
+ std::vector object_statistics;
+ };
+
+ /**
+ * Statistics of the freelist (used only in non-large object spaces). For
+ * each bucket in the freelist the statistics record the bucket size, the
+ * number of freelist entries in the bucket, and the overall allocated memory
+ * consumed by these freelist entries.
+ */
+ struct FreeListStatistics {
+ /** bucket sizes in the freelist. */
+ std::vector bucket_size;
+ /** number of freelist entries per bucket. */
+ std::vector free_count;
+ /** memory size consumed by freelist entries per size. */
+ std::vector free_size;
+ };
+
+ /**
+ * Space granularity statistics. For each space the statistics record the
+ * space name, the amount of allocated memory and overall used memory for the
+ * space. The statistics also contain statistics for each of the space's
+ * pages, its freelist and the objects allocated on the space.
+ */
+ struct SpaceStatistics {
+ /** The space name */
+ std::string name;
+ /** Overall committed amount of memory for the heap. */
+ size_t committed_size_bytes = 0;
+ /** Resident amount of memory held by the heap. */
+ size_t resident_size_bytes = 0;
+ /** Amount of memory actually used on the space. */
+ size_t used_size_bytes = 0;
+ /** Statistics for each of the pages in the space. */
+ std::vector page_stats;
+ /** Statistics for the freelist of the space. */
+ FreeListStatistics free_list_stats;
+ };
+
+ /** Overall committed amount of memory for the heap. */
+ size_t committed_size_bytes = 0;
+ /** Resident amount of memory held by the heap. */
+ size_t resident_size_bytes = 0;
+ /** Amount of memory actually used on the heap. */
+ size_t used_size_bytes = 0;
+ /** Detail level of this HeapStatistics. */
+ DetailLevel detail_level;
+
+ /** Statistics for each of the spaces in the heap. Filled only when
+ * `detail_level` is `DetailLevel::kDetailed`. */
+ std::vector space_stats;
+
+ /**
+ * Vector of `cppgc::GarbageCollected` type names.
+ */
+ std::vector type_names;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_HEAP_STATISTICS_H_
diff --git a/deps/include/cppgc/heap.h b/deps/include/cppgc/heap.h
new file mode 100755
index 0000000..02ee12e
--- /dev/null
+++ b/deps/include/cppgc/heap.h
@@ -0,0 +1,202 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_HEAP_H_
+#define INCLUDE_CPPGC_HEAP_H_
+
+#include
+#include
+#include
+#include
+
+#include "cppgc/common.h"
+#include "cppgc/custom-space.h"
+#include "cppgc/platform.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+/**
+ * cppgc - A C++ garbage collection library.
+ */
+namespace cppgc {
+
+class AllocationHandle;
+class HeapHandle;
+
+/**
+ * Implementation details of cppgc. Those details are considered internal and
+ * may change at any point in time without notice. Users should never rely on
+ * the contents of this namespace.
+ */
+namespace internal {
+class Heap;
+} // namespace internal
+
+class V8_EXPORT Heap {
+ public:
+ /**
+ * Specifies the stack state the embedder is in.
+ */
+ using StackState = EmbedderStackState;
+
+ /**
+ * Specifies whether conservative stack scanning is supported.
+ */
+ enum class StackSupport : uint8_t {
+ /**
+ * Conservative stack scan is supported.
+ */
+ kSupportsConservativeStackScan,
+ /**
+ * Conservative stack scan is not supported. Embedders may use this option
+ * when using custom infrastructure that is unsupported by the library.
+ */
+ kNoConservativeStackScan,
+ };
+
+ /**
+ * Specifies supported marking types.
+ */
+ enum class MarkingType : uint8_t {
+ /**
+ * Atomic stop-the-world marking. This option does not require any write
+ * barriers but is the most intrusive in terms of jank.
+ */
+ kAtomic,
+ /**
+ * Incremental marking interleaves marking with the rest of the application
+ * workload on the same thread.
+ */
+ kIncremental,
+ /**
+ * Incremental and concurrent marking.
+ */
+ kIncrementalAndConcurrent
+ };
+
+ /**
+ * Specifies supported sweeping types.
+ */
+ enum class SweepingType : uint8_t {
+ /**
+ * Atomic stop-the-world sweeping. All of sweeping is performed at once.
+ */
+ kAtomic,
+ /**
+ * Incremental sweeping interleaves sweeping with the rest of the
+ * application workload on the same thread.
+ */
+ kIncremental,
+ /**
+ * Incremental and concurrent sweeping. Sweeping is split and interleaved
+ * with the rest of the application.
+ */
+ kIncrementalAndConcurrent
+ };
+
+ /**
+ * Constraints for a Heap setup.
+ */
+ struct ResourceConstraints {
+ /**
+ * Allows the heap to grow to some initial size in bytes before triggering
+ * garbage collections. This is useful when it is known that applications
+ * need a certain minimum heap to run to avoid repeatedly invoking the
+ * garbage collector when growing the heap.
+ */
+ size_t initial_heap_size_bytes = 0;
+ };
+
+ /**
+ * Options specifying Heap properties (e.g. custom spaces) when initializing a
+ * heap through `Heap::Create()`.
+ */
+ struct HeapOptions {
+ /**
+ * Creates reasonable defaults for instantiating a Heap.
+ *
+ * \returns the HeapOptions that can be passed to `Heap::Create()`.
+ */
+ static HeapOptions Default() { return {}; }
+
+ /**
+ * Custom spaces added to heap are required to have indices forming a
+ * numbered sequence starting at 0, i.e., their `kSpaceIndex` must
+ * correspond to the index they reside in the vector.
+ */
+ std::vector> custom_spaces;
+
+ /**
+ * Specifies whether conservative stack scan is supported. When conservative
+ * stack scan is not supported, the collector may try to invoke
+ * garbage collections using non-nestable task, which are guaranteed to have
+ * no interesting stack, through the provided Platform. If such tasks are
+ * not supported by the Platform, the embedder must take care of invoking
+ * the GC through `ForceGarbageCollectionSlow()`.
+ */
+ StackSupport stack_support = StackSupport::kSupportsConservativeStackScan;
+
+ /**
+ * Specifies which types of marking are supported by the heap.
+ */
+ MarkingType marking_support = MarkingType::kIncrementalAndConcurrent;
+
+ /**
+ * Specifies which types of sweeping are supported by the heap.
+ */
+ SweepingType sweeping_support = SweepingType::kIncrementalAndConcurrent;
+
+ /**
+ * Resource constraints specifying various properties that the internal
+ * GC scheduler follows.
+ */
+ ResourceConstraints resource_constraints;
+ };
+
+ /**
+ * Creates a new heap that can be used for object allocation.
+ *
+ * \param platform implemented and provided by the embedder.
+ * \param options HeapOptions specifying various properties for the Heap.
+ * \returns a new Heap instance.
+ */
+ static std::unique_ptr Create(
+ std::shared_ptr platform,
+ HeapOptions options = HeapOptions::Default());
+
+ virtual ~Heap() = default;
+
+ /**
+ * Forces garbage collection.
+ *
+ * \param source String specifying the source (or caller) triggering a
+ * forced garbage collection.
+ * \param reason String specifying the reason for the forced garbage
+ * collection.
+ * \param stack_state The embedder stack state, see StackState.
+ */
+ void ForceGarbageCollectionSlow(
+ const char* source, const char* reason,
+ StackState stack_state = StackState::kMayContainHeapPointers);
+
+ /**
+ * \returns the opaque handle for allocating objects using
+ * `MakeGarbageCollected()`.
+ */
+ AllocationHandle& GetAllocationHandle();
+
+ /**
+ * \returns the opaque heap handle which may be used to refer to this heap in
+ * other APIs. Valid as long as the underlying `Heap` is alive.
+ */
+ HeapHandle& GetHeapHandle();
+
+ private:
+ Heap() = default;
+
+ friend class internal::Heap;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_HEAP_H_
diff --git a/deps/include/cppgc/internal/api-constants.h b/deps/include/cppgc/internal/api-constants.h
new file mode 100755
index 0000000..023426e
--- /dev/null
+++ b/deps/include/cppgc/internal/api-constants.h
@@ -0,0 +1,65 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_API_CONSTANTS_H_
+#define INCLUDE_CPPGC_INTERNAL_API_CONSTANTS_H_
+
+#include
+#include
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+namespace internal {
+
+// Embedders should not rely on this code!
+
+// Internal constants to avoid exposing internal types on the API surface.
+namespace api_constants {
+
+constexpr size_t kKB = 1024;
+constexpr size_t kMB = kKB * 1024;
+constexpr size_t kGB = kMB * 1024;
+
+// Offset of the uint16_t bitfield from the payload contaning the
+// in-construction bit. This is subtracted from the payload pointer to get
+// to the right bitfield.
+static constexpr size_t kFullyConstructedBitFieldOffsetFromPayload =
+ 2 * sizeof(uint16_t);
+// Mask for in-construction bit.
+static constexpr uint16_t kFullyConstructedBitMask = uint16_t{1};
+
+static constexpr size_t kPageSize = size_t{1} << 17;
+
+#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOS)
+constexpr size_t kGuardPageSize = 0;
+#else
+constexpr size_t kGuardPageSize = 4096;
+#endif
+
+static constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
+
+#if defined(CPPGC_CAGED_HEAP)
+#if defined(CPPGC_2GB_CAGE)
+constexpr size_t kCagedHeapReservationSize = static_cast(2) * kGB;
+#else // !defined(CPPGC_2GB_CAGE)
+constexpr size_t kCagedHeapReservationSize = static_cast(4) * kGB;
+#endif // !defined(CPPGC_2GB_CAGE)
+constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
+#endif // defined(CPPGC_CAGED_HEAP)
+
+static constexpr size_t kDefaultAlignment = sizeof(void*);
+
+// Maximum support alignment for a type as in `alignof(T)`.
+static constexpr size_t kMaxSupportedAlignment = 2 * kDefaultAlignment;
+
+// Granularity of heap allocations.
+constexpr size_t kAllocationGranularity = sizeof(void*);
+
+} // namespace api_constants
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_API_CONSTANTS_H_
diff --git a/deps/include/cppgc/internal/atomic-entry-flag.h b/deps/include/cppgc/internal/atomic-entry-flag.h
new file mode 100755
index 0000000..5a7d3b8
--- /dev/null
+++ b/deps/include/cppgc/internal/atomic-entry-flag.h
@@ -0,0 +1,48 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_ATOMIC_ENTRY_FLAG_H_
+#define INCLUDE_CPPGC_INTERNAL_ATOMIC_ENTRY_FLAG_H_
+
+#include
+
+namespace cppgc {
+namespace internal {
+
+// A flag which provides a fast check whether a scope may be entered on the
+// current thread, without needing to access thread-local storage or mutex. Can
+// have false positives (i.e., spuriously report that it might be entered), so
+// it is expected that this will be used in tandem with a precise check that the
+// scope is in fact entered on that thread.
+//
+// Example:
+// g_frobnicating_flag.MightBeEntered() &&
+// ThreadLocalFrobnicator().IsFrobnicating()
+//
+// Relaxed atomic operations are sufficient, since:
+// - all accesses remain atomic
+// - each thread must observe its own operations in order
+// - no thread ever exits the flag more times than it enters (if used correctly)
+// And so if a thread observes zero, it must be because it has observed an equal
+// number of exits as entries.
+class AtomicEntryFlag final {
+ public:
+ void Enter() { entries_.fetch_add(1, std::memory_order_relaxed); }
+ void Exit() { entries_.fetch_sub(1, std::memory_order_relaxed); }
+
+ // Returns false only if the current thread is not between a call to Enter
+ // and a call to Exit. Returns true if this thread or another thread may
+ // currently be in the scope guarded by this flag.
+ bool MightBeEntered() const {
+ return entries_.load(std::memory_order_relaxed) != 0;
+ }
+
+ private:
+ std::atomic_int entries_{0};
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_ATOMIC_ENTRY_FLAG_H_
diff --git a/deps/include/cppgc/internal/base-page-handle.h b/deps/include/cppgc/internal/base-page-handle.h
new file mode 100755
index 0000000..9c69075
--- /dev/null
+++ b/deps/include/cppgc/internal/base-page-handle.h
@@ -0,0 +1,45 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_BASE_PAGE_HANDLE_H_
+#define INCLUDE_CPPGC_INTERNAL_BASE_PAGE_HANDLE_H_
+
+#include "cppgc/heap-handle.h"
+#include "cppgc/internal/api-constants.h"
+#include "cppgc/internal/logging.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+namespace internal {
+
+// The class is needed in the header to allow for fast access to HeapHandle in
+// the write barrier.
+class BasePageHandle {
+ public:
+ static V8_INLINE BasePageHandle* FromPayload(void* payload) {
+ return reinterpret_cast(
+ (reinterpret_cast(payload) &
+ ~(api_constants::kPageSize - 1)) +
+ api_constants::kGuardPageSize);
+ }
+ static V8_INLINE const BasePageHandle* FromPayload(const void* payload) {
+ return FromPayload(const_cast(payload));
+ }
+
+ HeapHandle& heap_handle() { return heap_handle_; }
+ const HeapHandle& heap_handle() const { return heap_handle_; }
+
+ protected:
+ explicit BasePageHandle(HeapHandle& heap_handle) : heap_handle_(heap_handle) {
+ CPPGC_DCHECK(reinterpret_cast(this) % api_constants::kPageSize ==
+ api_constants::kGuardPageSize);
+ }
+
+ HeapHandle& heap_handle_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_BASE_PAGE_HANDLE_H_
diff --git a/deps/include/cppgc/internal/caged-heap-local-data.h b/deps/include/cppgc/internal/caged-heap-local-data.h
new file mode 100755
index 0000000..7d689f8
--- /dev/null
+++ b/deps/include/cppgc/internal/caged-heap-local-data.h
@@ -0,0 +1,111 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_
+#define INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_
+
+#include
+#include
+#include
+
+#include "cppgc/internal/api-constants.h"
+#include "cppgc/internal/caged-heap.h"
+#include "cppgc/internal/logging.h"
+#include "cppgc/platform.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+#if __cpp_lib_bitopts
+#include
+#endif // __cpp_lib_bitopts
+
+#if defined(CPPGC_CAGED_HEAP)
+
+namespace cppgc {
+namespace internal {
+
+class HeapBase;
+class HeapBaseHandle;
+
+#if defined(CPPGC_YOUNG_GENERATION)
+
+// AgeTable is the bytemap needed for the fast generation check in the write
+// barrier. AgeTable contains entries that correspond to 4096 bytes memory
+// regions (cards). Each entry in the table represents generation of the objects
+// that reside on the corresponding card (young, old or mixed).
+class V8_EXPORT AgeTable final {
+ static constexpr size_t kRequiredSize = 1 * api_constants::kMB;
+ static constexpr size_t kAllocationGranularity =
+ api_constants::kAllocationGranularity;
+
+ public:
+ // Represents age of the objects living on a single card.
+ enum class Age : uint8_t { kOld, kYoung, kMixed };
+ // When setting age for a range, consider or ignore ages of the adjacent
+ // cards.
+ enum class AdjacentCardsPolicy : uint8_t { kConsider, kIgnore };
+
+ static constexpr size_t kCardSizeInBytes =
+ api_constants::kCagedHeapReservationSize / kRequiredSize;
+
+ void SetAge(uintptr_t cage_offset, Age age) {
+ table_[card(cage_offset)] = age;
+ }
+
+ V8_INLINE Age GetAge(uintptr_t cage_offset) const {
+ return table_[card(cage_offset)];
+ }
+
+ void SetAgeForRange(uintptr_t cage_offset_begin, uintptr_t cage_offset_end,
+ Age age, AdjacentCardsPolicy adjacent_cards_policy);
+
+ Age GetAgeForRange(uintptr_t cage_offset_begin,
+ uintptr_t cage_offset_end) const;
+
+ void ResetForTesting();
+
+ private:
+ V8_INLINE size_t card(uintptr_t offset) const {
+ constexpr size_t kGranularityBits =
+#if __cpp_lib_bitopts
+ std::countr_zero(static_cast(kCardSizeInBytes));
+#elif V8_HAS_BUILTIN_CTZ
+ __builtin_ctz(static_cast(kCardSizeInBytes));
+#else //! V8_HAS_BUILTIN_CTZ
+ // Hardcode and check with assert.
+#if defined(CPPGC_2GB_CAGE)
+ 11;
+#else // !defined(CPPGC_2GB_CAGE)
+ 12;
+#endif // !defined(CPPGC_2GB_CAGE)
+#endif // !V8_HAS_BUILTIN_CTZ
+ static_assert((1 << kGranularityBits) == kCardSizeInBytes);
+ const size_t entry = offset >> kGranularityBits;
+ CPPGC_DCHECK(table_.size() > entry);
+ return entry;
+ }
+
+ std::array table_;
+};
+
+static_assert(sizeof(AgeTable) == 1 * api_constants::kMB,
+ "Size of AgeTable is 1MB");
+
+#endif // CPPGC_YOUNG_GENERATION
+
+struct CagedHeapLocalData final {
+ V8_INLINE static CagedHeapLocalData& Get() {
+ return *reinterpret_cast(CagedHeapBase::GetBase());
+ }
+
+#if defined(CPPGC_YOUNG_GENERATION)
+ AgeTable age_table;
+#endif
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // defined(CPPGC_CAGED_HEAP)
+
+#endif // INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_
diff --git a/deps/include/cppgc/internal/caged-heap.h b/deps/include/cppgc/internal/caged-heap.h
new file mode 100755
index 0000000..4db42ae
--- /dev/null
+++ b/deps/include/cppgc/internal/caged-heap.h
@@ -0,0 +1,61 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_H_
+#define INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_H_
+
+#include
+#include
+
+#include "cppgc/internal/api-constants.h"
+#include "cppgc/internal/base-page-handle.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+#if defined(CPPGC_CAGED_HEAP)
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT CagedHeapBase {
+ public:
+ V8_INLINE static uintptr_t OffsetFromAddress(const void* address) {
+ return reinterpret_cast(address) &
+ (api_constants::kCagedHeapReservationAlignment - 1);
+ }
+
+ V8_INLINE static bool IsWithinCage(const void* address) {
+ CPPGC_DCHECK(g_heap_base_);
+ return (reinterpret_cast(address) &
+ ~(api_constants::kCagedHeapReservationAlignment - 1)) ==
+ g_heap_base_;
+ }
+
+ V8_INLINE static bool AreWithinCage(const void* addr1, const void* addr2) {
+#if defined(CPPGC_2GB_CAGE)
+ static constexpr size_t kHalfWordShift = sizeof(uint32_t) * CHAR_BIT - 1;
+#else //! defined(CPPGC_2GB_CAGE)
+ static constexpr size_t kHalfWordShift = sizeof(uint32_t) * CHAR_BIT;
+#endif //! defined(CPPGC_2GB_CAGE)
+ static_assert((static_cast(1) << kHalfWordShift) ==
+ api_constants::kCagedHeapReservationSize);
+ CPPGC_DCHECK(g_heap_base_);
+ return !(((reinterpret_cast(addr1) ^ g_heap_base_) |
+ (reinterpret_cast(addr2) ^ g_heap_base_)) >>
+ kHalfWordShift);
+ }
+
+ V8_INLINE static uintptr_t GetBase() { return g_heap_base_; }
+
+ private:
+ friend class CagedHeap;
+
+ static uintptr_t g_heap_base_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // defined(CPPGC_CAGED_HEAP)
+
+#endif // INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_H_
diff --git a/deps/include/cppgc/internal/compiler-specific.h b/deps/include/cppgc/internal/compiler-specific.h
new file mode 100755
index 0000000..595b639
--- /dev/null
+++ b/deps/include/cppgc/internal/compiler-specific.h
@@ -0,0 +1,38 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_COMPILER_SPECIFIC_H_
+#define INCLUDE_CPPGC_INTERNAL_COMPILER_SPECIFIC_H_
+
+namespace cppgc {
+
+#if defined(__has_attribute)
+#define CPPGC_HAS_ATTRIBUTE(FEATURE) __has_attribute(FEATURE)
+#else
+#define CPPGC_HAS_ATTRIBUTE(FEATURE) 0
+#endif
+
+#if defined(__has_cpp_attribute)
+#define CPPGC_HAS_CPP_ATTRIBUTE(FEATURE) __has_cpp_attribute(FEATURE)
+#else
+#define CPPGC_HAS_CPP_ATTRIBUTE(FEATURE) 0
+#endif
+
+// [[no_unique_address]] comes in C++20 but supported in clang with -std >=
+// c++11.
+#if CPPGC_HAS_CPP_ATTRIBUTE(no_unique_address)
+#define CPPGC_NO_UNIQUE_ADDRESS [[no_unique_address]]
+#else
+#define CPPGC_NO_UNIQUE_ADDRESS
+#endif
+
+#if CPPGC_HAS_ATTRIBUTE(unused)
+#define CPPGC_UNUSED __attribute__((unused))
+#else
+#define CPPGC_UNUSED
+#endif
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_COMPILER_SPECIFIC_H_
diff --git a/deps/include/cppgc/internal/finalizer-trait.h b/deps/include/cppgc/internal/finalizer-trait.h
new file mode 100755
index 0000000..ab49af8
--- /dev/null
+++ b/deps/include/cppgc/internal/finalizer-trait.h
@@ -0,0 +1,93 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_FINALIZER_TRAIT_H_
+#define INCLUDE_CPPGC_INTERNAL_FINALIZER_TRAIT_H_
+
+#include
+
+#include "cppgc/type-traits.h"
+
+namespace cppgc {
+namespace internal {
+
+using FinalizationCallback = void (*)(void*);
+
+template
+struct HasFinalizeGarbageCollectedObject : std::false_type {};
+
+template
+struct HasFinalizeGarbageCollectedObject<
+ T,
+ std::void_t().FinalizeGarbageCollectedObject())>>
+ : std::true_type {};
+
+// The FinalizerTraitImpl specifies how to finalize objects.
+template
+struct FinalizerTraitImpl;
+
+template
+struct FinalizerTraitImpl {
+ private:
+ // Dispatch to custom FinalizeGarbageCollectedObject().
+ struct Custom {
+ static void Call(void* obj) {
+ static_cast(obj)->FinalizeGarbageCollectedObject();
+ }
+ };
+
+ // Dispatch to regular destructor.
+ struct Destructor {
+ static void Call(void* obj) { static_cast(obj)->~T(); }
+ };
+
+ using FinalizeImpl =
+ std::conditional_t::value, Custom,
+ Destructor>;
+
+ public:
+ static void Finalize(void* obj) {
+ static_assert(sizeof(T), "T must be fully defined");
+ FinalizeImpl::Call(obj);
+ }
+};
+
+template
+struct FinalizerTraitImpl {
+ static void Finalize(void* obj) {
+ static_assert(sizeof(T), "T must be fully defined");
+ }
+};
+
+// The FinalizerTrait is used to determine if a type requires finalization and
+// what finalization means.
+template
+struct FinalizerTrait {
+ private:
+ // Object has a finalizer if it has
+ // - a custom FinalizeGarbageCollectedObject method, or
+ // - a destructor.
+ static constexpr bool kNonTrivialFinalizer =
+ internal::HasFinalizeGarbageCollectedObject::value ||
+ !std::is_trivially_destructible::type>::value;
+
+ static void Finalize(void* obj) {
+ internal::FinalizerTraitImpl::Finalize(obj);
+ }
+
+ public:
+ static constexpr bool HasFinalizer() { return kNonTrivialFinalizer; }
+
+ // The callback used to finalize an object of type T.
+ static constexpr FinalizationCallback kCallback =
+ kNonTrivialFinalizer ? Finalize : nullptr;
+};
+
+template
+constexpr FinalizationCallback FinalizerTrait::kCallback;
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_FINALIZER_TRAIT_H_
diff --git a/deps/include/cppgc/internal/gc-info.h b/deps/include/cppgc/internal/gc-info.h
new file mode 100755
index 0000000..e8f90fe
--- /dev/null
+++ b/deps/include/cppgc/internal/gc-info.h
@@ -0,0 +1,155 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
+#define INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
+
+#include
+#include
+#include
+
+#include "cppgc/internal/finalizer-trait.h"
+#include "cppgc/internal/name-trait.h"
+#include "cppgc/trace-trait.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+namespace internal {
+
+using GCInfoIndex = uint16_t;
+
+struct V8_EXPORT EnsureGCInfoIndexTrait final {
+ // Acquires a new GC info object and returns the index. In addition, also
+ // updates `registered_index` atomically.
+ template
+ V8_INLINE static GCInfoIndex EnsureIndex(
+ std::atomic& registered_index) {
+ return EnsureGCInfoIndexTraitDispatch{}(registered_index);
+ }
+
+ private:
+ template ::value,
+ bool = FinalizerTrait::HasFinalizer(),
+ bool = NameTrait::HasNonHiddenName()>
+ struct EnsureGCInfoIndexTraitDispatch;
+
+ static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic&,
+ TraceCallback,
+ FinalizationCallback,
+ NameCallback);
+ static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic&,
+ TraceCallback,
+ FinalizationCallback);
+ static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic&,
+ TraceCallback, NameCallback);
+ static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic&,
+ TraceCallback);
+ static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic&,
+ TraceCallback,
+ FinalizationCallback,
+ NameCallback);
+ static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic&,
+ TraceCallback,
+ FinalizationCallback);
+ static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic&,
+ TraceCallback,
+ NameCallback);
+ static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic&,
+ TraceCallback);
+};
+
+#define DISPATCH(is_polymorphic, has_finalizer, has_non_hidden_name, function) \
+ template \
+ struct EnsureGCInfoIndexTrait::EnsureGCInfoIndexTraitDispatch< \
+ T, is_polymorphic, has_finalizer, has_non_hidden_name> { \
+ V8_INLINE GCInfoIndex \
+ operator()(std::atomic& registered_index) { \
+ return function; \
+ } \
+ };
+
+// --------------------------------------------------------------------- //
+// DISPATCH(is_polymorphic, has_finalizer, has_non_hidden_name, function)
+// --------------------------------------------------------------------- //
+DISPATCH(true, true, true, //
+ EnsureGCInfoIndexPolymorphic(registered_index, //
+ TraceTrait::Trace, //
+ FinalizerTrait::kCallback, //
+ NameTrait::GetName)) //
+DISPATCH(true, true, false, //
+ EnsureGCInfoIndexPolymorphic(registered_index, //
+ TraceTrait::Trace, //
+ FinalizerTrait::kCallback)) //
+DISPATCH(true, false, true, //
+ EnsureGCInfoIndexPolymorphic(registered_index, //
+ TraceTrait::Trace, //
+ NameTrait::GetName)) //
+DISPATCH(true, false, false, //
+ EnsureGCInfoIndexPolymorphic(registered_index, //
+ TraceTrait::Trace)) //
+DISPATCH(false, true, true, //
+ EnsureGCInfoIndexNonPolymorphic(registered_index, //
+ TraceTrait::Trace, //
+ FinalizerTrait::kCallback, //
+ NameTrait::GetName)) //
+DISPATCH(false, true, false, //
+ EnsureGCInfoIndexNonPolymorphic(registered_index, //
+ TraceTrait::Trace, //
+ FinalizerTrait::kCallback)) //
+DISPATCH(false, false, true, //
+ EnsureGCInfoIndexNonPolymorphic(registered_index, //
+ TraceTrait::Trace, //
+ NameTrait::GetName)) //
+DISPATCH(false, false, false, //
+ EnsureGCInfoIndexNonPolymorphic(registered_index, //
+ TraceTrait::Trace)) //
+
+#undef DISPATCH
+
+// Fold types based on finalizer behavior. Note that finalizer characteristics
+// align with trace behavior, i.e., destructors are virtual when trace methods
+// are and vice versa.
+template
+struct GCInfoFolding {
+ static constexpr bool kHasVirtualDestructorAtBase =
+ std::has_virtual_destructor::value;
+ static constexpr bool kBothTypesAreTriviallyDestructible =
+ std::is_trivially_destructible::value &&
+ std::is_trivially_destructible::value;
+ static constexpr bool kHasCustomFinalizerDispatchAtBase =
+ internal::HasFinalizeGarbageCollectedObject<
+ ParentMostGarbageCollectedType>::value;
+#ifdef CPPGC_SUPPORTS_OBJECT_NAMES
+ static constexpr bool kWantsDetailedObjectNames = true;
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+ static constexpr bool kWantsDetailedObjectNames = false;
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
+
+ // Folding would regresses name resolution when deriving names from C++
+ // class names as it would just folds a name to the base class name.
+ using ResultType = std::conditional_t<(kHasVirtualDestructorAtBase ||
+ kBothTypesAreTriviallyDestructible ||
+ kHasCustomFinalizerDispatchAtBase) &&
+ !kWantsDetailedObjectNames,
+ ParentMostGarbageCollectedType, T>;
+};
+
+// Trait determines how the garbage collector treats objects wrt. to traversing,
+// finalization, and naming.
+template
+struct GCInfoTrait final {
+ V8_INLINE static GCInfoIndex Index() {
+ static_assert(sizeof(T), "T must be fully defined");
+ static std::atomic
+ registered_index; // Uses zero initialization.
+ const GCInfoIndex index = registered_index.load(std::memory_order_acquire);
+ return index ? index
+ : EnsureGCInfoIndexTrait::EnsureIndex(registered_index);
+ }
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
diff --git a/deps/include/cppgc/internal/logging.h b/deps/include/cppgc/internal/logging.h
new file mode 100755
index 0000000..3a279fe
--- /dev/null
+++ b/deps/include/cppgc/internal/logging.h
@@ -0,0 +1,50 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_LOGGING_H_
+#define INCLUDE_CPPGC_INTERNAL_LOGGING_H_
+
+#include "cppgc/source-location.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+namespace internal {
+
+void V8_EXPORT DCheckImpl(const char*,
+ const SourceLocation& = SourceLocation::Current());
+[[noreturn]] void V8_EXPORT
+FatalImpl(const char*, const SourceLocation& = SourceLocation::Current());
+
+// Used to ignore -Wunused-variable.
+template
+struct EatParams {};
+
+#if defined(DEBUG)
+#define CPPGC_DCHECK_MSG(condition, message) \
+ do { \
+ if (V8_UNLIKELY(!(condition))) { \
+ ::cppgc::internal::DCheckImpl(message); \
+ } \
+ } while (false)
+#else // !defined(DEBUG)
+#define CPPGC_DCHECK_MSG(condition, message) \
+ (static_cast(::cppgc::internal::EatParams(condition), message)>{}))
+#endif // !defined(DEBUG)
+
+#define CPPGC_DCHECK(condition) CPPGC_DCHECK_MSG(condition, #condition)
+
+#define CPPGC_CHECK_MSG(condition, message) \
+ do { \
+ if (V8_UNLIKELY(!(condition))) { \
+ ::cppgc::internal::FatalImpl(message); \
+ } \
+ } while (false)
+
+#define CPPGC_CHECK(condition) CPPGC_CHECK_MSG(condition, #condition)
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_LOGGING_H_
diff --git a/deps/include/cppgc/internal/member-storage.h b/deps/include/cppgc/internal/member-storage.h
new file mode 100755
index 0000000..0eb6382
--- /dev/null
+++ b/deps/include/cppgc/internal/member-storage.h
@@ -0,0 +1,236 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_MEMBER_STORAGE_H_
+#define INCLUDE_CPPGC_INTERNAL_MEMBER_STORAGE_H_
+
+#include
+#include
+#include
+
+#include "cppgc/internal/api-constants.h"
+#include "cppgc/internal/logging.h"
+#include "cppgc/sentinel-pointer.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+namespace internal {
+
+#if defined(CPPGC_POINTER_COMPRESSION)
+
+#if defined(__clang__)
+// Attribute const allows the compiler to assume that CageBaseGlobal::g_base_
+// doesn't change (e.g. across calls) and thereby avoid redundant loads.
+#define CPPGC_CONST __attribute__((const))
+#define CPPGC_REQUIRE_CONSTANT_INIT \
+ __attribute__((require_constant_initialization))
+#else // defined(__clang__)
+#define CPPGC_CONST
+#define CPPGC_REQUIRE_CONSTANT_INIT
+#endif // defined(__clang__)
+
+class CageBaseGlobal final {
+ public:
+ V8_INLINE CPPGC_CONST static uintptr_t Get() {
+ CPPGC_DCHECK(IsBaseConsistent());
+ return g_base_;
+ }
+
+ V8_INLINE CPPGC_CONST static bool IsSet() {
+ CPPGC_DCHECK(IsBaseConsistent());
+ return (g_base_ & ~kLowerHalfWordMask) != 0;
+ }
+
+ private:
+ // We keep the lower halfword as ones to speed up decompression.
+ static constexpr uintptr_t kLowerHalfWordMask =
+ (api_constants::kCagedHeapReservationAlignment - 1);
+
+ static V8_EXPORT uintptr_t g_base_ CPPGC_REQUIRE_CONSTANT_INIT;
+
+ CageBaseGlobal() = delete;
+
+ V8_INLINE static bool IsBaseConsistent() {
+ return kLowerHalfWordMask == (g_base_ & kLowerHalfWordMask);
+ }
+
+ friend class CageBaseGlobalUpdater;
+};
+
+#undef CPPGC_REQUIRE_CONSTANT_INIT
+#undef CPPGC_CONST
+
+class V8_TRIVIAL_ABI CompressedPointer final {
+ public:
+ using IntegralType = uint32_t;
+
+ V8_INLINE CompressedPointer() : value_(0u) {}
+ V8_INLINE explicit CompressedPointer(const void* ptr)
+ : value_(Compress(ptr)) {}
+ V8_INLINE explicit CompressedPointer(std::nullptr_t) : value_(0u) {}
+ V8_INLINE explicit CompressedPointer(SentinelPointer)
+ : value_(kCompressedSentinel) {}
+
+ V8_INLINE const void* Load() const { return Decompress(value_); }
+ V8_INLINE const void* LoadAtomic() const {
+ return Decompress(
+ reinterpret_cast&>(value_).load(
+ std::memory_order_relaxed));
+ }
+
+ V8_INLINE void Store(const void* ptr) { value_ = Compress(ptr); }
+ V8_INLINE void StoreAtomic(const void* value) {
+ reinterpret_cast&>(value_).store(
+ Compress(value), std::memory_order_relaxed);
+ }
+
+ V8_INLINE void Clear() { value_ = 0u; }
+ V8_INLINE bool IsCleared() const { return !value_; }
+
+ V8_INLINE bool IsSentinel() const { return value_ == kCompressedSentinel; }
+
+ V8_INLINE uint32_t GetAsInteger() const { return value_; }
+
+ V8_INLINE friend bool operator==(CompressedPointer a, CompressedPointer b) {
+ return a.value_ == b.value_;
+ }
+ V8_INLINE friend bool operator!=(CompressedPointer a, CompressedPointer b) {
+ return a.value_ != b.value_;
+ }
+ V8_INLINE friend bool operator<(CompressedPointer a, CompressedPointer b) {
+ return a.value_ < b.value_;
+ }
+ V8_INLINE friend bool operator<=(CompressedPointer a, CompressedPointer b) {
+ return a.value_ <= b.value_;
+ }
+ V8_INLINE friend bool operator>(CompressedPointer a, CompressedPointer b) {
+ return a.value_ > b.value_;
+ }
+ V8_INLINE friend bool operator>=(CompressedPointer a, CompressedPointer b) {
+ return a.value_ >= b.value_;
+ }
+
+ static V8_INLINE IntegralType Compress(const void* ptr) {
+ static_assert(
+ SentinelPointer::kSentinelValue == 0b10,
+ "The compression scheme relies on the sentinel encoded as 0b10");
+ static constexpr size_t kGigaCageMask =
+ ~(api_constants::kCagedHeapReservationAlignment - 1);
+
+ CPPGC_DCHECK(CageBaseGlobal::IsSet());
+ const uintptr_t base = CageBaseGlobal::Get();
+ CPPGC_DCHECK(!ptr || ptr == kSentinelPointer ||
+ (base & kGigaCageMask) ==
+ (reinterpret_cast(ptr) & kGigaCageMask));
+
+#if defined(CPPGC_2GB_CAGE)
+ // Truncate the pointer.
+ auto compressed =
+ static_cast(reinterpret_cast(ptr));
+#else // !defined(CPPGC_2GB_CAGE)
+ const auto uptr = reinterpret_cast(ptr);
+ // Shift the pointer by one and truncate.
+ auto compressed = static_cast(uptr >> 1);
+#endif // !defined(CPPGC_2GB_CAGE)
+ // Normal compressed pointers must have the MSB set.
+ CPPGC_DCHECK((!compressed || compressed == kCompressedSentinel) ||
+ (compressed & (1 << 31)));
+ return compressed;
+ }
+
+ static V8_INLINE void* Decompress(IntegralType ptr) {
+ CPPGC_DCHECK(CageBaseGlobal::IsSet());
+ const uintptr_t base = CageBaseGlobal::Get();
+ // Treat compressed pointer as signed and cast it to uint64_t, which will
+ // sign-extend it.
+#if defined(CPPGC_2GB_CAGE)
+ const uint64_t mask = static_cast(static_cast(ptr));
+#else // !defined(CPPGC_2GB_CAGE)
+ // Then, shift the result by one. It's important to shift the unsigned
+ // value, as otherwise it would result in undefined behavior.
+ const uint64_t mask = static_cast(static_cast(ptr)) << 1;
+#endif // !defined(CPPGC_2GB_CAGE)
+ return reinterpret_cast(mask & base);
+ }
+
+ private:
+#if defined(CPPGC_2GB_CAGE)
+ static constexpr IntegralType kCompressedSentinel =
+ SentinelPointer::kSentinelValue;
+#else // !defined(CPPGC_2GB_CAGE)
+ static constexpr IntegralType kCompressedSentinel =
+ SentinelPointer::kSentinelValue >> 1;
+#endif // !defined(CPPGC_2GB_CAGE)
+ // All constructors initialize `value_`. Do not add a default value here as it
+ // results in a non-atomic write on some builds, even when the atomic version
+ // of the constructor is used.
+ IntegralType value_;
+};
+
+#endif // defined(CPPGC_POINTER_COMPRESSION)
+
+class V8_TRIVIAL_ABI RawPointer final {
+ public:
+ using IntegralType = uintptr_t;
+
+ V8_INLINE RawPointer() : ptr_(nullptr) {}
+ V8_INLINE explicit RawPointer(const void* ptr) : ptr_(ptr) {}
+
+ V8_INLINE const void* Load() const { return ptr_; }
+ V8_INLINE const void* LoadAtomic() const {
+ return reinterpret_cast&>(ptr_).load(
+ std::memory_order_relaxed);
+ }
+
+ V8_INLINE void Store(const void* ptr) { ptr_ = ptr; }
+ V8_INLINE void StoreAtomic(const void* ptr) {
+ reinterpret_cast&>(ptr_).store(
+ ptr, std::memory_order_relaxed);
+ }
+
+ V8_INLINE void Clear() { ptr_ = nullptr; }
+ V8_INLINE bool IsCleared() const { return !ptr_; }
+
+ V8_INLINE bool IsSentinel() const { return ptr_ == kSentinelPointer; }
+
+ V8_INLINE uintptr_t GetAsInteger() const {
+ return reinterpret_cast(ptr_);
+ }
+
+ V8_INLINE friend bool operator==(RawPointer a, RawPointer b) {
+ return a.ptr_ == b.ptr_;
+ }
+ V8_INLINE friend bool operator!=(RawPointer a, RawPointer b) {
+ return a.ptr_ != b.ptr_;
+ }
+ V8_INLINE friend bool operator<(RawPointer a, RawPointer b) {
+ return a.ptr_ < b.ptr_;
+ }
+ V8_INLINE friend bool operator<=(RawPointer a, RawPointer b) {
+ return a.ptr_ <= b.ptr_;
+ }
+ V8_INLINE friend bool operator>(RawPointer a, RawPointer b) {
+ return a.ptr_ > b.ptr_;
+ }
+ V8_INLINE friend bool operator>=(RawPointer a, RawPointer b) {
+ return a.ptr_ >= b.ptr_;
+ }
+
+ private:
+ // All constructors initialize `ptr_`. Do not add a default value here as it
+ // results in a non-atomic write on some builds, even when the atomic version
+ // of the constructor is used.
+ const void* ptr_;
+};
+
+#if defined(CPPGC_POINTER_COMPRESSION)
+using MemberStorage = CompressedPointer;
+#else // !defined(CPPGC_POINTER_COMPRESSION)
+using MemberStorage = RawPointer;
+#endif // !defined(CPPGC_POINTER_COMPRESSION)
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_MEMBER_STORAGE_H_
diff --git a/deps/include/cppgc/internal/name-trait.h b/deps/include/cppgc/internal/name-trait.h
new file mode 100755
index 0000000..1d927a9
--- /dev/null
+++ b/deps/include/cppgc/internal/name-trait.h
@@ -0,0 +1,137 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_NAME_TRAIT_H_
+#define INCLUDE_CPPGC_INTERNAL_NAME_TRAIT_H_
+
+#include
+#include
+#include
+
+#include "cppgc/name-provider.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+namespace internal {
+
+#if CPPGC_SUPPORTS_OBJECT_NAMES && defined(__clang__)
+#define CPPGC_SUPPORTS_COMPILE_TIME_TYPENAME 1
+
+// Provides constexpr c-string storage for a name of fixed |Size| characters.
+// Automatically appends terminating 0 byte.
+template
+struct NameBuffer {
+ char name[Size + 1]{};
+
+ static constexpr NameBuffer FromCString(const char* str) {
+ NameBuffer result;
+ for (size_t i = 0; i < Size; ++i) result.name[i] = str[i];
+ result.name[Size] = 0;
+ return result;
+ }
+};
+
+template
+const char* GetTypename() {
+ static constexpr char kSelfPrefix[] =
+ "const char *cppgc::internal::GetTypename() [T =";
+ static_assert(__builtin_strncmp(__PRETTY_FUNCTION__, kSelfPrefix,
+ sizeof(kSelfPrefix) - 1) == 0,
+ "The prefix must match");
+ static constexpr const char* kTypenameStart =
+ __PRETTY_FUNCTION__ + sizeof(kSelfPrefix);
+ static constexpr size_t kTypenameSize =
+ __builtin_strlen(__PRETTY_FUNCTION__) - sizeof(kSelfPrefix) - 1;
+ // NameBuffer is an indirection that is needed to make sure that only a
+ // substring of __PRETTY_FUNCTION__ gets materialized in the binary.
+ static constexpr auto buffer =
+ NameBuffer